Search in sources :

Example 6 with ScanMetrics

use of org.janusgraph.diskstorage.keycolumnvalue.scan.ScanMetrics in project janusgraph by JanusGraph.

the class FulgoraGraphComputer method submit.

@Override
public Future<ComputerResult> submit() {
    if (executed)
        throw Exceptions.computerHasAlreadyBeenSubmittedAVertexProgram();
    else
        executed = true;
    // it is not possible execute a computer if it has no vertex program nor map-reducers
    if (null == vertexProgram && mapReduces.isEmpty())
        throw GraphComputer.Exceptions.computerHasNoVertexProgramNorMapReducers();
    // it is possible to run map-reducers without a vertex program
    if (null != vertexProgram) {
        GraphComputerHelper.validateProgramOnComputer(this, vertexProgram);
        this.mapReduces.addAll(this.vertexProgram.getMapReducers());
    }
    // if the user didn't set desired persistence/resultgraph, then get from vertex program or else, no persistence
    this.persistMode = GraphComputerHelper.getPersistState(Optional.ofNullable(this.vertexProgram), Optional.ofNullable(this.persistMode));
    this.resultGraphMode = GraphComputerHelper.getResultGraphState(Optional.ofNullable(this.vertexProgram), Optional.ofNullable(this.resultGraphMode));
    // determine the legality persistence and result graph options
    if (!this.features().supportsResultGraphPersistCombination(this.resultGraphMode, this.persistMode))
        throw GraphComputer.Exceptions.resultGraphPersistCombinationNotSupported(this.resultGraphMode, this.persistMode);
    // ensure requested workers are not larger than supported workers
    if (this.numThreads > this.features().getMaxWorkers())
        throw GraphComputer.Exceptions.computerRequiresMoreWorkersThanSupported(this.numThreads, this.features().getMaxWorkers());
    memory = new FulgoraMemory(vertexProgram, mapReduces);
    return CompletableFuture.supplyAsync(() -> {
        final long time = System.currentTimeMillis();
        if (null != vertexProgram) {
            // ##### Execute vertex program
            vertexMemory = new FulgoraVertexMemory(expectedNumVertices, graph.getIDManager(), vertexProgram);
            // execute the vertex program
            vertexProgram.setup(memory);
            try (VertexProgramScanJob.Executor job = VertexProgramScanJob.getVertexProgramScanJob(graph, memory, vertexMemory, vertexProgram)) {
                for (int iteration = 1; ; iteration++) {
                    memory.completeSubRound();
                    vertexMemory.nextIteration(vertexProgram.getMessageScopes(memory));
                    jobId = name + "#" + iteration;
                    StandardScanner.Builder scanBuilder = graph.getBackend().buildEdgeScanJob();
                    scanBuilder.setJobId(jobId);
                    scanBuilder.setNumProcessingThreads(numThreads);
                    scanBuilder.setWorkBlockSize(readBatchSize);
                    scanBuilder.setJob(job);
                    PartitionedVertexProgramExecutor programExecutor = new PartitionedVertexProgramExecutor(graph, memory, vertexMemory, vertexProgram);
                    try {
                        // Iterates over all vertices and computes the vertex program on all non-partitioned vertices. For partitioned ones, the data is aggregated
                        ScanMetrics jobResult = scanBuilder.execute().get();
                        long failures = jobResult.get(ScanMetrics.Metric.FAILURE);
                        if (failures > 0) {
                            throw new JanusGraphException("Failed to process [" + failures + "] vertices in vertex program iteration [" + iteration + "]. Computer is aborting.");
                        }
                        // Runs the vertex program on all aggregated, partitioned vertices.
                        programExecutor.run(numThreads, jobResult);
                        failures = jobResult.getCustom(PartitionedVertexProgramExecutor.PARTITION_VERTEX_POSTFAIL);
                        if (failures > 0) {
                            throw new JanusGraphException("Failed to process [" + failures + "] partitioned vertices in vertex program iteration [" + iteration + "]. Computer is aborting.");
                        }
                    } catch (Exception e) {
                        throw new JanusGraphException(e);
                    }
                    vertexMemory.completeIteration();
                    memory.completeSubRound();
                    try {
                        if (this.vertexProgram.terminate(this.memory)) {
                            break;
                        }
                    } finally {
                        memory.incrIteration();
                    }
                }
            }
        }
        // ##### Execute map-reduce jobs
        // Collect map jobs
        Map<MapReduce, FulgoraMapEmitter> mapJobs = new HashMap<>(mapReduces.size());
        for (MapReduce mapReduce : mapReduces) {
            if (mapReduce.doStage(MapReduce.Stage.MAP)) {
                FulgoraMapEmitter mapEmitter = new FulgoraMapEmitter<>(mapReduce.doStage(MapReduce.Stage.REDUCE));
                mapJobs.put(mapReduce, mapEmitter);
            }
        }
        // Execute map jobs
        jobId = name + "#map";
        try (VertexMapJob.Executor job = VertexMapJob.getVertexMapJob(graph, vertexMemory, mapJobs)) {
            StandardScanner.Builder scanBuilder = graph.getBackend().buildEdgeScanJob();
            scanBuilder.setJobId(jobId);
            scanBuilder.setNumProcessingThreads(numThreads);
            scanBuilder.setWorkBlockSize(readBatchSize);
            scanBuilder.setJob(job);
            try {
                ScanMetrics jobResult = scanBuilder.execute().get();
                long failures = jobResult.get(ScanMetrics.Metric.FAILURE);
                if (failures > 0) {
                    throw new JanusGraphException("Failed to process [" + failures + "] vertices in map phase. Computer is aborting.");
                }
                failures = jobResult.getCustom(VertexMapJob.MAP_JOB_FAILURE);
                if (failures > 0) {
                    throw new JanusGraphException("Failed to process [" + failures + "] individual map jobs. Computer is aborting.");
                }
            } catch (Exception e) {
                throw new JanusGraphException(e);
            }
            // Execute reduce phase and add to memory
            for (Map.Entry<MapReduce, FulgoraMapEmitter> mapJob : mapJobs.entrySet()) {
                FulgoraMapEmitter<?, ?> mapEmitter = mapJob.getValue();
                MapReduce mapReduce = mapJob.getKey();
                // sort results if a map output sort is defined
                mapEmitter.complete(mapReduce);
                if (mapReduce.doStage(MapReduce.Stage.REDUCE)) {
                    final FulgoraReduceEmitter<?, ?> reduceEmitter = new FulgoraReduceEmitter<>();
                    try (WorkerPool workers = new WorkerPool(numThreads)) {
                        workers.submit(() -> mapReduce.workerStart(MapReduce.Stage.REDUCE));
                        for (final Map.Entry queueEntry : mapEmitter.reduceMap.entrySet()) {
                            if (null == queueEntry)
                                break;
                            workers.submit(() -> mapReduce.reduce(queueEntry.getKey(), ((Iterable) queueEntry.getValue()).iterator(), reduceEmitter));
                        }
                        workers.submit(() -> mapReduce.workerEnd(MapReduce.Stage.REDUCE));
                    } catch (Exception e) {
                        throw new JanusGraphException("Exception while executing reduce phase", e);
                    }
                    // mapEmitter.reduceMap.entrySet().parallelStream().forEach(entry -> mapReduce.reduce(entry.getKey(), entry.getValue().iterator(), reduceEmitter));
                    // sort results if a reduce output sort is defined
                    reduceEmitter.complete(mapReduce);
                    mapReduce.addResultToMemory(this.memory, reduceEmitter.reduceQueue.iterator());
                } else {
                    mapReduce.addResultToMemory(this.memory, mapEmitter.mapQueue.iterator());
                }
            }
        }
        memory.attachReferenceElements(graph);
        // #### Write mutated properties back into graph
        Graph resultgraph = graph;
        if (persistMode == Persist.NOTHING && resultGraphMode == ResultGraph.NEW) {
            resultgraph = EmptyGraph.instance();
        } else if (persistMode != Persist.NOTHING && vertexProgram != null && !vertexProgram.getVertexComputeKeys().isEmpty()) {
            // First, create property keys in graph if they don't already exist
            JanusGraphManagement management = graph.openManagement();
            try {
                for (VertexComputeKey key : vertexProgram.getVertexComputeKeys()) {
                    if (!management.containsPropertyKey(key.getKey()))
                        log.warn("Property key [{}] is not part of the schema and will be created. It is advised to initialize all keys.", key.getKey());
                    management.getOrCreatePropertyKey(key.getKey());
                }
                management.commit();
            } finally {
                if (management != null && management.isOpen())
                    management.rollback();
            }
            // TODO: Filter based on VertexProgram
            Map<Long, Map<String, Object>> mutatedProperties = Maps.transformValues(vertexMemory.getMutableVertexProperties(), new Function<Map<String, Object>, Map<String, Object>>() {

                @Nullable
                @Override
                public Map<String, Object> apply(final Map<String, Object> o) {
                    return Maps.filterKeys(o, s -> !VertexProgramHelper.isTransientVertexComputeKey(s, vertexProgram.getVertexComputeKeys()));
                }
            });
            if (resultGraphMode == ResultGraph.ORIGINAL) {
                AtomicInteger failures = new AtomicInteger(0);
                try (WorkerPool workers = new WorkerPool(numThreads)) {
                    List<Map.Entry<Long, Map<String, Object>>> subset = new ArrayList<>(writeBatchSize / vertexProgram.getVertexComputeKeys().size());
                    int currentSize = 0;
                    for (Map.Entry<Long, Map<String, Object>> entry : mutatedProperties.entrySet()) {
                        subset.add(entry);
                        currentSize += entry.getValue().size();
                        if (currentSize >= writeBatchSize) {
                            workers.submit(new VertexPropertyWriter(subset, failures));
                            subset = new ArrayList<>(subset.size());
                            currentSize = 0;
                        }
                    }
                    if (!subset.isEmpty())
                        workers.submit(new VertexPropertyWriter(subset, failures));
                } catch (Exception e) {
                    throw new JanusGraphException("Exception while attempting to persist result into graph", e);
                }
                if (failures.get() > 0)
                    throw new JanusGraphException("Could not persist program results to graph. Check log for details.");
            } else if (resultGraphMode == ResultGraph.NEW) {
                resultgraph = graph.newTransaction();
                for (Map.Entry<Long, Map<String, Object>> vertexProperty : mutatedProperties.entrySet()) {
                    Vertex v = resultgraph.vertices(vertexProperty.getKey()).next();
                    for (Map.Entry<String, Object> prop : vertexProperty.getValue().entrySet()) {
                        if (prop.getValue() instanceof List) {
                            ((List) prop.getValue()).forEach(value -> v.property(VertexProperty.Cardinality.list, prop.getKey(), value));
                        } else {
                            v.property(VertexProperty.Cardinality.single, prop.getKey(), prop.getValue());
                        }
                    }
                }
            }
        }
        // update runtime and return the newly computed graph
        this.memory.setRuntime(System.currentTimeMillis() - time);
        this.memory.complete();
        return new DefaultComputerResult(resultgraph, this.memory);
    });
}
Also used : JanusGraphManagement(org.janusgraph.core.schema.JanusGraphManagement) Vertex(org.apache.tinkerpop.gremlin.structure.Vertex) HashMap(java.util.HashMap) JanusGraphException(org.janusgraph.core.JanusGraphException) ArrayList(java.util.ArrayList) ScanMetrics(org.janusgraph.diskstorage.keycolumnvalue.scan.ScanMetrics) MapReduce(org.apache.tinkerpop.gremlin.process.computer.MapReduce) Function(com.google.common.base.Function) ArrayList(java.util.ArrayList) List(java.util.List) JanusGraphException(org.janusgraph.core.JanusGraphException) WorkerPool(org.janusgraph.graphdb.util.WorkerPool) Graph(org.apache.tinkerpop.gremlin.structure.Graph) StandardJanusGraph(org.janusgraph.graphdb.database.StandardJanusGraph) EmptyGraph(org.apache.tinkerpop.gremlin.structure.util.empty.EmptyGraph) StandardScanner(org.janusgraph.diskstorage.keycolumnvalue.scan.StandardScanner) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) DefaultComputerResult(org.apache.tinkerpop.gremlin.process.computer.util.DefaultComputerResult) VertexComputeKey(org.apache.tinkerpop.gremlin.process.computer.VertexComputeKey) HashMap(java.util.HashMap) Map(java.util.Map)

Example 7 with ScanMetrics

use of org.janusgraph.diskstorage.keycolumnvalue.scan.ScanMetrics in project janusgraph by JanusGraph.

the class AbstractIndexManagementIT method testRemoveGraphIndex.

@Test
public void testRemoveGraphIndex() throws InterruptedException, BackendException, ExecutionException {
    tx.commit();
    mgmt.commit();
    // Load the "Graph of the Gods" sample data
    GraphOfTheGodsFactory.loadWithoutMixedIndex(graph, true);
    // Disable the "name" composite index
    JanusGraphManagement m = graph.openManagement();
    JanusGraphIndex nameIndex = m.getGraphIndex("name");
    m.updateIndex(nameIndex, SchemaAction.DISABLE_INDEX);
    m.commit();
    graph.tx().commit();
    // Block until the SchemaStatus transitions to DISABLED
    assertTrue(ManagementSystem.awaitGraphIndexStatus(graph, "name").status(SchemaStatus.DISABLED).call().getSucceeded());
    // Remove index
    MapReduceIndexManagement mri = new MapReduceIndexManagement(graph);
    m = graph.openManagement();
    JanusGraphIndex index = m.getGraphIndex("name");
    ScanMetrics metrics = mri.updateIndex(index, SchemaAction.REMOVE_INDEX).get();
    assertEquals(12, metrics.getCustom(IndexRemoveJob.DELETED_RECORDS_COUNT));
}
Also used : JanusGraphManagement(org.janusgraph.core.schema.JanusGraphManagement) JanusGraphIndex(org.janusgraph.core.schema.JanusGraphIndex) ScanMetrics(org.janusgraph.diskstorage.keycolumnvalue.scan.ScanMetrics) Test(org.junit.Test) JanusGraphBaseTest(org.janusgraph.graphdb.JanusGraphBaseTest)

Example 8 with ScanMetrics

use of org.janusgraph.diskstorage.keycolumnvalue.scan.ScanMetrics in project janusgraph by JanusGraph.

the class OLAPTest method testVertexScan.

@Test
public void testVertexScan() throws Exception {
    int numV = 100;
    int numE = generateRandomGraph(numV);
    final String DEGREE_COUNT = "degree";
    final String VERTEX_COUNT = "numV";
    clopen();
    ScanMetrics result1 = executeScanJob(new VertexScanJob() {

        @Override
        public void process(JanusGraphVertex vertex, ScanMetrics metrics) {
            long outDegree = vertex.query().labels("knows").direction(Direction.OUT).edgeCount();
            assertEquals(0, vertex.query().labels("knows").direction(Direction.IN).edgeCount());
            assertEquals(1, vertex.query().labels("uid").propertyCount());
            assertTrue(vertex.<Integer>property("uid").orElse(0) > 0);
            metrics.incrementCustom(DEGREE_COUNT, outDegree);
            metrics.incrementCustom(VERTEX_COUNT);
        }

        @Override
        public void getQueries(QueryContainer queries) {
            queries.addQuery().labels("knows").direction(Direction.OUT).edges();
            queries.addQuery().keys("uid").properties();
        }

        @Override
        public VertexScanJob clone() {
            return this;
        }
    });
    assertEquals(numV, result1.getCustom(VERTEX_COUNT));
    assertEquals(numE, result1.getCustom(DEGREE_COUNT));
    ScanMetrics result2 = executeScanJob(new VertexScanJob() {

        @Override
        public void process(JanusGraphVertex vertex, ScanMetrics metrics) {
            metrics.incrementCustom(VERTEX_COUNT);
            assertEquals(1, vertex.query().labels("numvals").propertyCount());
            int numvals = vertex.value("numvals");
            assertEquals(numvals, vertex.query().labels("values").propertyCount());
        }

        @Override
        public void getQueries(QueryContainer queries) {
            queries.addQuery().keys("values").properties();
            queries.addQuery().keys("numvals").properties();
        }

        @Override
        public VertexScanJob clone() {
            return this;
        }
    });
    assertEquals(numV, result2.getCustom(VERTEX_COUNT));
}
Also used : ScanMetrics(org.janusgraph.diskstorage.keycolumnvalue.scan.ScanMetrics) Test(org.junit.Test) JanusGraphBaseTest(org.janusgraph.graphdb.JanusGraphBaseTest)

Example 9 with ScanMetrics

use of org.janusgraph.diskstorage.keycolumnvalue.scan.ScanMetrics in project janusgraph by JanusGraph.

the class SimpleScanJob method runBasicTests.

public static void runBasicTests(int keys, int columns, SimpleScanJobRunner runner) throws InterruptedException, ExecutionException, BackendException, IOException {
    Configuration conf1 = getJobConf(ImmutableList.of(new SliceQuery(BufferUtil.zeroBuffer(1), BufferUtil.oneBuffer(128))));
    ScanMetrics result1 = runner.run(new SimpleScanJob(), conf1, SimpleScanJob.class.getName() + "#ROOT_NS");
    assertEquals(keys, result1.getCustom(SimpleScanJob.KEY_COUNT));
    assertEquals(keys * columns / 4 * 3, result1.getCustom(SimpleScanJob.TOTAL_COUNT));
    /* These assertions are not valid on Hadoop.  The Hadoop implementation uses
         * Hadoop Counters to store ScanMetrics.  These Counters are shared
         * clusterwide.  Hence there will be as many setups and teardowns as there
         * are input splits -- generally more than one.  So these don't apply:
         *
         *  assertEquals(1, result1.getCustom(SimpleScanJob.SETUP_COUNT));
         *  assertEquals(1, result1.getCustom(SimpleScanJob.TEARDOWN_COUNT));
         *
         * However, even on Hadoop, we can expect both of the following to hold:
         * 1. The number of setups must equal the number of teardowns
         * 2. The number of setups (teardowns) must be positive
         */
    assertEquals("Number of ScanJob setup calls must equal number of ScanJob teardown calls", result1.getCustom(SimpleScanJob.SETUP_COUNT), result1.getCustom(SimpleScanJob.TEARDOWN_COUNT));
    assertTrue("Number of ScanJob setup/teardown calls must be positive", 0 < result1.getCustom(SimpleScanJob.SETUP_COUNT));
    Configuration conf2 = getJobConf(ImmutableList.of(new SliceQuery(BufferUtil.zeroBuffer(1), BufferUtil.oneBuffer(128)).setLimit(5)));
    ScanMetrics result2 = runner.run(new SimpleScanJob(), conf2, SimpleScanJob.class.getName() + "#ROOT_NS");
    assertEquals(keys, result2.getCustom(SimpleScanJob.KEY_COUNT));
    assertEquals(keys * 5, result2.getCustom(SimpleScanJob.TOTAL_COUNT));
    Configuration conf3 = getJobConf(ImmutableList.of(new SliceQuery(KeyValueStoreUtil.getBuffer(0), KeyValueStoreUtil.getBuffer(5))));
    ScanMetrics result3 = runner.run(new SimpleScanJob(), conf3, SimpleScanJob.class.getName() + "#ROOT_NS");
    assertEquals(keys, result3.getCustom(SimpleScanJob.KEY_COUNT));
    assertEquals(keys * 5, result3.getCustom(SimpleScanJob.TOTAL_COUNT));
    Configuration conf4 = getJobConf(ImmutableList.of(new SliceQuery(BufferUtil.zeroBuffer(1), BufferUtil.oneBuffer(128)).setLimit(1), new SliceQuery(KeyValueStoreUtil.getBuffer(0), KeyValueStoreUtil.getBuffer(5))));
    ScanMetrics result4 = runner.run(new SimpleScanJob(), conf4, SimpleScanJob.class.getName() + "#ROOT_NS");
    assertEquals(keys, result4.getCustom(SimpleScanJob.KEY_COUNT));
    assertEquals(keys * 6, result4.getCustom(SimpleScanJob.TOTAL_COUNT));
    Configuration conf5 = getJobConf(ImmutableList.of(new SliceQuery(BufferUtil.zeroBuffer(1), BufferUtil.oneBuffer(128)).setLimit(1), new SliceQuery(KeyValueStoreUtil.getBuffer(2), KeyValueStoreUtil.getBuffer(4)), new SliceQuery(KeyValueStoreUtil.getBuffer(6), KeyValueStoreUtil.getBuffer(8)), new SliceQuery(KeyValueStoreUtil.getBuffer(10), KeyValueStoreUtil.getBuffer(20)).setLimit(4)));
    ScanMetrics result5 = runner.run(new SimpleScanJob(), conf5, SimpleScanJob.class.getName() + "#ROOT_NS");
    assertEquals(keys, result5.getCustom(SimpleScanJob.KEY_COUNT));
    assertEquals(keys * 9, result5.getCustom(SimpleScanJob.TOTAL_COUNT));
    Configuration conf6 = getJobConf(ImmutableList.of(new SliceQuery(BufferUtil.zeroBuffer(1), BufferUtil.oneBuffer(128)).setLimit(5)), 2L);
    ScanMetrics result6 = runner.run(new SimpleScanJob(), conf6, SimpleScanJob.class.getName() + "#ROOT_NS");
    assertEquals(keys / 2, result6.getCustom(SimpleScanJob.KEY_COUNT));
    assertEquals(keys / 2 * 5, result6.getCustom(SimpleScanJob.TOTAL_COUNT));
    Configuration conf7 = getJobConf(ImmutableList.of(new SliceQuery(BufferUtil.zeroBuffer(1), BufferUtil.oneBuffer(128)).setLimit(1), new SliceQuery(KeyValueStoreUtil.getBuffer(2), KeyValueStoreUtil.getBuffer(4)), new SliceQuery(KeyValueStoreUtil.getBuffer(31), KeyValueStoreUtil.getBuffer(35)), new SliceQuery(KeyValueStoreUtil.getBuffer(36), KeyValueStoreUtil.getBuffer(40)).setLimit(1)));
    ScanMetrics result7 = runner.run(new SimpleScanJob(), conf7, SimpleScanJob.class.getName() + "#ROOT_NS");
    assertEquals(keys, result7.getCustom(SimpleScanJob.KEY_COUNT));
    assertEquals(keys * 3 + keys / 2 * 5, result7.getCustom(SimpleScanJob.TOTAL_COUNT));
    Configuration conf8 = getJobConf(ImmutableList.of(new SliceQuery(BufferUtil.zeroBuffer(1), BufferUtil.oneBuffer(128)).setLimit(1), new SliceQuery(KeyValueStoreUtil.getBuffer(31), KeyValueStoreUtil.getBuffer(35))), 2L, 1L);
    ScanMetrics result8 = runner.run(new SimpleScanJob(), conf8, SimpleScanJob.class.getName() + "#ROOT_NS");
    // k -> KeyValueStoreUtil.getID(k) % 2 == 1));
    assertEquals(keys / 2, result8.getCustom(SimpleScanJob.KEY_COUNT));
    assertEquals(keys / 2 * 5, result8.getCustom(SimpleScanJob.TOTAL_COUNT));
    Configuration conf9 = getJobConf(ImmutableList.of(new SliceQuery(BufferUtil.zeroBuffer(1), BufferUtil.oneBuffer(128)).setLimit(1), new SliceQuery(KeyValueStoreUtil.getBuffer(31), KeyValueStoreUtil.getBuffer(35))), 2L);
    // k -> KeyValueStoreUtil.getID(k) % 2 == 0));
    ScanMetrics result9 = runner.run(new SimpleScanJob(), conf9, SimpleScanJob.class.getName() + "#ROOT_NS");
    assertEquals(keys / 2, result9.getCustom(SimpleScanJob.KEY_COUNT));
    assertEquals(keys / 2, result9.getCustom(SimpleScanJob.TOTAL_COUNT));
    try {
        Configuration conf10 = getJobConf(ImmutableList.of(new SliceQuery(StaticArrayBuffer.of(new byte[] { (byte) 2 }), BufferUtil.oneBuffer(1)), new SliceQuery(BufferUtil.zeroBuffer(1), BufferUtil.oneBuffer(1))));
        runner.run(new SimpleScanJob(), conf10, SimpleScanJob.class.getName() + "#ROOT_NS");
        fail();
    } catch (Exception e) {
    // assertTrue(e instanceof ExecutionException && e.getCause() instanceof IllegalArgumentException);
    }
}
Also used : CommonsConfiguration(org.janusgraph.diskstorage.configuration.backend.CommonsConfiguration) BaseConfiguration(org.apache.commons.configuration.BaseConfiguration) ScanMetrics(org.janusgraph.diskstorage.keycolumnvalue.scan.ScanMetrics) SliceQuery(org.janusgraph.diskstorage.keycolumnvalue.SliceQuery) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException)

Aggregations

ScanMetrics (org.janusgraph.diskstorage.keycolumnvalue.scan.ScanMetrics)9 Test (org.junit.Test)7 JanusGraphBaseTest (org.janusgraph.graphdb.JanusGraphBaseTest)6 JanusGraphManagement (org.janusgraph.core.schema.JanusGraphManagement)5 PropertyKey (org.janusgraph.core.PropertyKey)3 JanusGraphIndex (org.janusgraph.core.schema.JanusGraphIndex)3 RelationTypeIndex (org.janusgraph.core.schema.RelationTypeIndex)3 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)2 EdgeLabel (org.janusgraph.core.EdgeLabel)2 JanusGraphVertex (org.janusgraph.core.JanusGraphVertex)2 Function (com.google.common.base.Function)1 IOException (java.io.IOException)1 ArrayList (java.util.ArrayList)1 HashMap (java.util.HashMap)1 List (java.util.List)1 Map (java.util.Map)1 ExecutionException (java.util.concurrent.ExecutionException)1 BaseConfiguration (org.apache.commons.configuration.BaseConfiguration)1 MapReduce (org.apache.tinkerpop.gremlin.process.computer.MapReduce)1 VertexComputeKey (org.apache.tinkerpop.gremlin.process.computer.VertexComputeKey)1