use of org.janusgraph.core.schema.JanusGraphManagement in project janusgraph by JanusGraph.
the class FulgoraGraphComputer method submit.
@Override
public Future<ComputerResult> submit() {
if (executed)
throw Exceptions.computerHasAlreadyBeenSubmittedAVertexProgram();
else
executed = true;
// it is not possible execute a computer if it has no vertex program nor map-reducers
if (null == vertexProgram && mapReduces.isEmpty())
throw GraphComputer.Exceptions.computerHasNoVertexProgramNorMapReducers();
// it is possible to run map-reducers without a vertex program
if (null != vertexProgram) {
GraphComputerHelper.validateProgramOnComputer(this, vertexProgram);
this.mapReduces.addAll(this.vertexProgram.getMapReducers());
}
// if the user didn't set desired persistence/resultgraph, then get from vertex program or else, no persistence
this.persistMode = GraphComputerHelper.getPersistState(Optional.ofNullable(this.vertexProgram), Optional.ofNullable(this.persistMode));
this.resultGraphMode = GraphComputerHelper.getResultGraphState(Optional.ofNullable(this.vertexProgram), Optional.ofNullable(this.resultGraphMode));
// determine the legality persistence and result graph options
if (!this.features().supportsResultGraphPersistCombination(this.resultGraphMode, this.persistMode))
throw GraphComputer.Exceptions.resultGraphPersistCombinationNotSupported(this.resultGraphMode, this.persistMode);
// ensure requested workers are not larger than supported workers
if (this.numThreads > this.features().getMaxWorkers())
throw GraphComputer.Exceptions.computerRequiresMoreWorkersThanSupported(this.numThreads, this.features().getMaxWorkers());
memory = new FulgoraMemory(vertexProgram, mapReduces);
return CompletableFuture.supplyAsync(() -> {
final long time = System.currentTimeMillis();
if (null != vertexProgram) {
// ##### Execute vertex program
vertexMemory = new FulgoraVertexMemory(expectedNumVertices, graph.getIDManager(), vertexProgram);
// execute the vertex program
vertexProgram.setup(memory);
try (VertexProgramScanJob.Executor job = VertexProgramScanJob.getVertexProgramScanJob(graph, memory, vertexMemory, vertexProgram)) {
for (int iteration = 1; ; iteration++) {
memory.completeSubRound();
vertexMemory.nextIteration(vertexProgram.getMessageScopes(memory));
jobId = name + "#" + iteration;
StandardScanner.Builder scanBuilder = graph.getBackend().buildEdgeScanJob();
scanBuilder.setJobId(jobId);
scanBuilder.setNumProcessingThreads(numThreads);
scanBuilder.setWorkBlockSize(readBatchSize);
scanBuilder.setJob(job);
PartitionedVertexProgramExecutor programExecutor = new PartitionedVertexProgramExecutor(graph, memory, vertexMemory, vertexProgram);
try {
// Iterates over all vertices and computes the vertex program on all non-partitioned vertices. For partitioned ones, the data is aggregated
ScanMetrics jobResult = scanBuilder.execute().get();
long failures = jobResult.get(ScanMetrics.Metric.FAILURE);
if (failures > 0) {
throw new JanusGraphException("Failed to process [" + failures + "] vertices in vertex program iteration [" + iteration + "]. Computer is aborting.");
}
// Runs the vertex program on all aggregated, partitioned vertices.
programExecutor.run(numThreads, jobResult);
failures = jobResult.getCustom(PartitionedVertexProgramExecutor.PARTITION_VERTEX_POSTFAIL);
if (failures > 0) {
throw new JanusGraphException("Failed to process [" + failures + "] partitioned vertices in vertex program iteration [" + iteration + "]. Computer is aborting.");
}
} catch (Exception e) {
throw new JanusGraphException(e);
}
vertexMemory.completeIteration();
memory.completeSubRound();
try {
if (this.vertexProgram.terminate(this.memory)) {
break;
}
} finally {
memory.incrIteration();
}
}
}
}
// ##### Execute map-reduce jobs
// Collect map jobs
Map<MapReduce, FulgoraMapEmitter> mapJobs = new HashMap<>(mapReduces.size());
for (MapReduce mapReduce : mapReduces) {
if (mapReduce.doStage(MapReduce.Stage.MAP)) {
FulgoraMapEmitter mapEmitter = new FulgoraMapEmitter<>(mapReduce.doStage(MapReduce.Stage.REDUCE));
mapJobs.put(mapReduce, mapEmitter);
}
}
// Execute map jobs
jobId = name + "#map";
try (VertexMapJob.Executor job = VertexMapJob.getVertexMapJob(graph, vertexMemory, mapJobs)) {
StandardScanner.Builder scanBuilder = graph.getBackend().buildEdgeScanJob();
scanBuilder.setJobId(jobId);
scanBuilder.setNumProcessingThreads(numThreads);
scanBuilder.setWorkBlockSize(readBatchSize);
scanBuilder.setJob(job);
try {
ScanMetrics jobResult = scanBuilder.execute().get();
long failures = jobResult.get(ScanMetrics.Metric.FAILURE);
if (failures > 0) {
throw new JanusGraphException("Failed to process [" + failures + "] vertices in map phase. Computer is aborting.");
}
failures = jobResult.getCustom(VertexMapJob.MAP_JOB_FAILURE);
if (failures > 0) {
throw new JanusGraphException("Failed to process [" + failures + "] individual map jobs. Computer is aborting.");
}
} catch (Exception e) {
throw new JanusGraphException(e);
}
// Execute reduce phase and add to memory
for (Map.Entry<MapReduce, FulgoraMapEmitter> mapJob : mapJobs.entrySet()) {
FulgoraMapEmitter<?, ?> mapEmitter = mapJob.getValue();
MapReduce mapReduce = mapJob.getKey();
// sort results if a map output sort is defined
mapEmitter.complete(mapReduce);
if (mapReduce.doStage(MapReduce.Stage.REDUCE)) {
final FulgoraReduceEmitter<?, ?> reduceEmitter = new FulgoraReduceEmitter<>();
try (WorkerPool workers = new WorkerPool(numThreads)) {
workers.submit(() -> mapReduce.workerStart(MapReduce.Stage.REDUCE));
for (final Map.Entry queueEntry : mapEmitter.reduceMap.entrySet()) {
if (null == queueEntry)
break;
workers.submit(() -> mapReduce.reduce(queueEntry.getKey(), ((Iterable) queueEntry.getValue()).iterator(), reduceEmitter));
}
workers.submit(() -> mapReduce.workerEnd(MapReduce.Stage.REDUCE));
} catch (Exception e) {
throw new JanusGraphException("Exception while executing reduce phase", e);
}
// mapEmitter.reduceMap.entrySet().parallelStream().forEach(entry -> mapReduce.reduce(entry.getKey(), entry.getValue().iterator(), reduceEmitter));
// sort results if a reduce output sort is defined
reduceEmitter.complete(mapReduce);
mapReduce.addResultToMemory(this.memory, reduceEmitter.reduceQueue.iterator());
} else {
mapReduce.addResultToMemory(this.memory, mapEmitter.mapQueue.iterator());
}
}
}
memory.attachReferenceElements(graph);
// #### Write mutated properties back into graph
Graph resultgraph = graph;
if (persistMode == Persist.NOTHING && resultGraphMode == ResultGraph.NEW) {
resultgraph = EmptyGraph.instance();
} else if (persistMode != Persist.NOTHING && vertexProgram != null && !vertexProgram.getVertexComputeKeys().isEmpty()) {
// First, create property keys in graph if they don't already exist
JanusGraphManagement management = graph.openManagement();
try {
for (VertexComputeKey key : vertexProgram.getVertexComputeKeys()) {
if (!management.containsPropertyKey(key.getKey()))
log.warn("Property key [{}] is not part of the schema and will be created. It is advised to initialize all keys.", key.getKey());
management.getOrCreatePropertyKey(key.getKey());
}
management.commit();
} finally {
if (management != null && management.isOpen())
management.rollback();
}
// TODO: Filter based on VertexProgram
Map<Long, Map<String, Object>> mutatedProperties = Maps.transformValues(vertexMemory.getMutableVertexProperties(), new Function<Map<String, Object>, Map<String, Object>>() {
@Nullable
@Override
public Map<String, Object> apply(final Map<String, Object> o) {
return Maps.filterKeys(o, s -> !VertexProgramHelper.isTransientVertexComputeKey(s, vertexProgram.getVertexComputeKeys()));
}
});
if (resultGraphMode == ResultGraph.ORIGINAL) {
AtomicInteger failures = new AtomicInteger(0);
try (WorkerPool workers = new WorkerPool(numThreads)) {
List<Map.Entry<Long, Map<String, Object>>> subset = new ArrayList<>(writeBatchSize / vertexProgram.getVertexComputeKeys().size());
int currentSize = 0;
for (Map.Entry<Long, Map<String, Object>> entry : mutatedProperties.entrySet()) {
subset.add(entry);
currentSize += entry.getValue().size();
if (currentSize >= writeBatchSize) {
workers.submit(new VertexPropertyWriter(subset, failures));
subset = new ArrayList<>(subset.size());
currentSize = 0;
}
}
if (!subset.isEmpty())
workers.submit(new VertexPropertyWriter(subset, failures));
} catch (Exception e) {
throw new JanusGraphException("Exception while attempting to persist result into graph", e);
}
if (failures.get() > 0)
throw new JanusGraphException("Could not persist program results to graph. Check log for details.");
} else if (resultGraphMode == ResultGraph.NEW) {
resultgraph = graph.newTransaction();
for (Map.Entry<Long, Map<String, Object>> vertexProperty : mutatedProperties.entrySet()) {
Vertex v = resultgraph.vertices(vertexProperty.getKey()).next();
for (Map.Entry<String, Object> prop : vertexProperty.getValue().entrySet()) {
if (prop.getValue() instanceof List) {
((List) prop.getValue()).forEach(value -> v.property(VertexProperty.Cardinality.list, prop.getKey(), value));
} else {
v.property(VertexProperty.Cardinality.single, prop.getKey(), prop.getValue());
}
}
}
}
}
// update runtime and return the newly computed graph
this.memory.setRuntime(System.currentTimeMillis() - time);
this.memory.complete();
return new DefaultComputerResult(resultgraph, this.memory);
});
}
use of org.janusgraph.core.schema.JanusGraphManagement in project janusgraph by JanusGraph.
the class JanusGraphStepStrategyTest method generateTestParameters.
@Parameterized.Parameters(name = "{0}")
public static Iterable<Object[]> generateTestParameters() {
final StandardJanusGraph graph = (StandardJanusGraph) StorageSetup.getInMemoryGraph();
final GraphTraversalSource g = graph.traversal();
// create a basic schema so that order pushdown can be tested as this optimization requires a JanusGraph
// transaction registered against a non-EmptyGraph
final JanusGraphManagement mgmt = graph.openManagement();
mgmt.makePropertyKey("name").dataType(String.class).make();
mgmt.makePropertyKey("lang").dataType(String.class).make();
mgmt.commit();
return Arrays.asList(new Object[][] { { g.V().out(), g_V().out(), Collections.emptyList() }, { g.V().has("name", "marko").out(), g_V("name", eq("marko")).out(), Collections.emptyList() }, { g.V().has("name", "marko").has("age", gt(31).and(lt(10))).out(), g_V("name", eq("marko"), "age", gt(31), "age", lt(10)).out(), Collections.emptyList() }, { g.V().has("name", "marko").or(has("age"), has("age", gt(32))).has("lang", "java"), g_V("name", eq("marko"), "lang", eq("java")).or(has("age"), has("age", gt(32))), Collections.singletonList(FilterRankingStrategy.instance()) }, { g.V().has("name", "marko").as("a").or(has("age"), has("age", gt(32))).has("lang", "java"), g_V("name", eq("marko")).as("a").or(has("age"), has("age", gt(32))).has("lang", "java"), Collections.emptyList() }, { g.V().has("name", "marko").as("a").or(has("age"), has("age", gt(32))).has("lang", "java"), g_V("name", eq("marko"), "lang", eq("java")).or(has("age"), has("age", gt(32))).as("a"), Collections.singletonList(FilterRankingStrategy.instance()) }, { g.V().dedup().has("name", "marko").or(has("age"), has("age", gt(32))).has("lang", "java"), g_V("name", eq("marko"), "lang", eq("java")).or(has("age"), has("age", gt(32))).dedup(), Collections.singletonList(FilterRankingStrategy.instance()) }, { g.V().as("a").dedup().has("name", "marko").or(has("age"), has("age", gt(32))).has("lang", "java"), g_V("name", eq("marko"), "lang", eq("java")).or(has("age"), has("age", gt(32))).dedup().as("a"), Collections.singletonList(FilterRankingStrategy.instance()) }, { g.V().as("a").has("name", "marko").as("b").or(has("age"), has("age", gt(32))).has("lang", "java"), g_V("name", eq("marko"), "lang", eq("java")).or(has("age"), has("age", gt(32))).as("b", "a"), Collections.singletonList(FilterRankingStrategy.instance()) }, { g.V().as("a").dedup().has("name", "marko").or(has("age"), has("age", gt(32))).filter(has("name", "bob")).has("lang", "java"), g_V("name", eq("marko"), "lang", eq("java"), "name", eq("bob")).or(has("age"), has("age", gt(32))).dedup().as("a"), Arrays.asList(InlineFilterStrategy.instance(), FilterRankingStrategy.instance()) }, { g.V().has("name", "marko").or(not(has("age")), has("age", gt(32))).has("name", "bob").has("lang", "java"), g_V("name", eq("marko"), "name", eq("bob"), "lang", eq("java")).or(not(filter(properties("age"))), has("age", gt(32))), TraversalStrategies.GlobalCache.getStrategies(JanusGraph.class).toList() }, { g.V().has("name", eq("marko").and(eq("bob").and(eq("stephen")))).out("knows"), g_V("name", eq("marko"), "name", eq("bob"), "name", eq("stephen")).out("knows"), Collections.emptyList() }, { g.V().hasId(1), g_V(T.id, 1), Collections.emptyList() }, { g.V().hasId(within(1, 2)), g_V(T.id, 1, T.id, 2), Collections.emptyList() }, { g.V().hasId(1).has("name", "marko"), g_V(T.id, 1, "name", eq("marko")), Collections.emptyList() }, { g.V().hasId(1).hasLabel("Person"), g_V(T.id, 1, "~label", eq("Person")), Collections.emptyList() }, { g.V().hasLabel("Person").has("lang", "java").order().by("name"), g_V("~label", eq("Person"), "lang", eq("java"), new HasStepFolder.OrderEntry("name", Order.incr)), Collections.emptyList() }, // same as above, different order
{ g.V().hasLabel("Person").has("lang", "java").order().by("name", Order.decr), g_V("~label", eq("Person"), "lang", eq("java"), new HasStepFolder.OrderEntry("name", Order.decr)), Collections.emptyList() }, // if multiple order steps are specified in a row, only the last will be folded in because it overrides previous ordering
{ g.V().hasLabel("Person").has("lang", "java").order().by("lang", Order.incr).order().by("name", Order.decr), g_V("~label", eq("Person"), "lang", eq("java"), new HasStepFolder.OrderEntry("name", Order.decr)), Collections.emptyList() }, // do not folder in orders that include a nested traversal
{ g.V().hasLabel("Person").order().by(values("age")), g_V("~label", eq("Person")).order().by(values("age")), Collections.emptyList() }, // age property is not registered in the schema so the order should not be folded in
{ g.V().hasLabel("Person").has("lang", "java").order().by("age"), g_V("~label", eq("Person"), "lang", eq("java")).order().by("age"), Collections.emptyList() }, // into a single within(ids) lookup
{ g.V().hasId(1).hasId(2), g_V(T.id, 1).hasId(2), Collections.emptyList() } });
}
use of org.janusgraph.core.schema.JanusGraphManagement in project atlas by apache.
the class AtlasJanusGraph method getIndexKeys.
private Set<String> getIndexKeys(Class<? extends Element> janusGraphElementClass) {
JanusGraphManagement mgmt = getGraph().openManagement();
Iterable<JanusGraphIndex> indices = mgmt.getGraphIndexes(janusGraphElementClass);
Set<String> result = new HashSet<String>();
for (JanusGraphIndex index : indices) {
result.add(index.name());
}
mgmt.commit();
return result;
}
use of org.janusgraph.core.schema.JanusGraphManagement in project janusgraph by JanusGraph.
the class JanusGraphSpeedBenchmark method setup.
@Setup
public void setup() {
ModifiableConfiguration config = GraphDatabaseConfiguration.buildGraphConfiguration();
config.set(GraphDatabaseConfiguration.STORAGE_BACKEND, "inmemory");
config.set(GraphDatabaseConfiguration.AUTO_TYPE, "none");
config.set(GraphDatabaseConfiguration.SCHEMA_CONSTRAINTS, true);
graph = JanusGraphFactory.open(config);
JanusGraphManagement jgm = graph.openManagement();
VertexLabel startVertex = jgm.makeVertexLabel(START_LABEL).make();
PropertyKey uid = jgm.makePropertyKey(UID_PROP).dataType(Integer.class).make();
jgm.buildIndex("byUid", Vertex.class).addKey(uid).indexOnly(startVertex).buildCompositeIndex();
jgm.addProperties(startVertex, uid);
VertexLabel endVertex = jgm.makeVertexLabel(END_LABEL).make();
jgm.addProperties(endVertex, uid);
EdgeLabel between = jgm.makeEdgeLabel("between").make();
jgm.addConnection(between, startVertex, endVertex);
jgm.commit();
Vertex next = graph.traversal().addV(START_LABEL).property(UID_PROP, 1).next();
for (int i = 0; i < numberOfVertices; i++) {
graph.traversal().addV(END_LABEL).property(UID_PROP, i).as("end").addE("between").to("end").from(next).iterate();
}
}
use of org.janusgraph.core.schema.JanusGraphManagement in project janusgraph by JanusGraph.
the class GraphCentricQueryBenchmark method setUp.
@Setup
public void setUp() throws Exception {
graph = JanusGraphFactory.open(getConfiguration());
JanusGraphManagement mgmt = graph.openManagement();
PropertyKey name = mgmt.makePropertyKey("name").dataType(String.class).cardinality(Cardinality.SINGLE).make();
mgmt.buildIndex("nameIndex", Vertex.class).addKey(name).buildCompositeIndex();
mgmt.commit();
final int batchSize = Math.min(10000, size);
for (int i = 0; i < size / batchSize; i++) {
for (int j = 0; j < batchSize; j++) {
graph.addVertex("name", "value");
}
graph.tx().commit();
}
}
Aggregations