use of com.thinkaurelius.titan.diskstorage.keycolumnvalue.scan.ScanMetrics in project titan by thinkaurelius.
the class AbstractIndexManagementIT method testRepairGraphIndex.
@Test
public void testRepairGraphIndex() throws InterruptedException, BackendException, ExecutionException {
tx.commit();
mgmt.commit();
// Load the "Graph of the Gods" sample data (WITHOUT mixed index coverage)
GraphOfTheGodsFactory.loadWithoutMixedIndex(graph, true);
// Create and enable a graph index on age
TitanManagement m = graph.openManagement();
PropertyKey age = m.getPropertyKey("age");
m.buildIndex("verticesByAge", Vertex.class).addKey(age).buildCompositeIndex();
m.commit();
graph.tx().commit();
// Block until the SchemaStatus transitions to REGISTERED
assertTrue(ManagementSystem.awaitGraphIndexStatus(graph, "verticesByAge").status(SchemaStatus.REGISTERED).call().getSucceeded());
m = graph.openManagement();
TitanGraphIndex index = m.getGraphIndex("verticesByAge");
m.updateIndex(index, SchemaAction.ENABLE_INDEX);
m.commit();
graph.tx().commit();
// Block until the SchemaStatus transitions to ENABLED
assertTrue(ManagementSystem.awaitGraphIndexStatus(graph, "verticesByAge").status(SchemaStatus.ENABLED).call().getSucceeded());
// Run a query that hits the index but erroneously returns nothing because we haven't repaired yet
assertFalse(graph.query().has("age", 10000).vertices().iterator().hasNext());
// Repair
MapReduceIndexManagement mri = new MapReduceIndexManagement(graph);
m = graph.openManagement();
index = m.getGraphIndex("verticesByAge");
ScanMetrics metrics = mri.updateIndex(index, SchemaAction.REINDEX).get();
assertEquals(6, metrics.getCustom(IndexRepairJob.ADDED_RECORDS_COUNT));
// Test the index
Iterable<TitanVertex> hits = graph.query().has("age", 4500).vertices();
assertNotNull(hits);
assertEquals(1, Iterables.size(hits));
TitanVertex v = Iterables.getOnlyElement(hits);
assertNotNull(v);
assertEquals("neptune", v.value("name"));
}
use of com.thinkaurelius.titan.diskstorage.keycolumnvalue.scan.ScanMetrics in project titan by thinkaurelius.
the class AbstractIndexManagementIT method testRepairRelationIndex.
@Test
public void testRepairRelationIndex() throws InterruptedException, BackendException, ExecutionException {
tx.commit();
mgmt.commit();
// Load the "Graph of the Gods" sample data (WITHOUT mixed index coverage)
GraphOfTheGodsFactory.loadWithoutMixedIndex(graph, true);
// Create and enable a relation index on lives edges by reason
TitanManagement m = graph.openManagement();
PropertyKey reason = m.getPropertyKey("reason");
EdgeLabel lives = m.getEdgeLabel("lives");
m.buildEdgeIndex(lives, "livesByReason", Direction.BOTH, Order.decr, reason);
m.commit();
graph.tx().commit();
// Block until the SchemaStatus transitions to REGISTERED
assertTrue(ManagementSystem.awaitRelationIndexStatus(graph, "livesByReason", "lives").status(SchemaStatus.REGISTERED).call().getSucceeded());
m = graph.openManagement();
RelationTypeIndex index = m.getRelationIndex(m.getRelationType("lives"), "livesByReason");
m.updateIndex(index, SchemaAction.ENABLE_INDEX);
m.commit();
graph.tx().commit();
// Block until the SchemaStatus transitions to ENABLED
assertTrue(ManagementSystem.awaitRelationIndexStatus(graph, "livesByReason", "lives").status(SchemaStatus.ENABLED).call().getSucceeded());
// Run a query that hits the index but erroneously returns nothing because we haven't repaired yet
//assertFalse(graph.query().has("reason", "no fear of death").edges().iterator().hasNext());
// Repair
MapReduceIndexManagement mri = new MapReduceIndexManagement(graph);
m = graph.openManagement();
index = m.getRelationIndex(m.getRelationType("lives"), "livesByReason");
ScanMetrics metrics = mri.updateIndex(index, SchemaAction.REINDEX).get();
assertEquals(8, metrics.getCustom(IndexRepairJob.ADDED_RECORDS_COUNT));
}
use of com.thinkaurelius.titan.diskstorage.keycolumnvalue.scan.ScanMetrics in project titan by thinkaurelius.
the class AbstractIndexManagementIT method testRemoveRelationIndex.
@Test
public void testRemoveRelationIndex() throws InterruptedException, BackendException, ExecutionException {
tx.commit();
mgmt.commit();
// Load the "Graph of the Gods" sample data
GraphOfTheGodsFactory.loadWithoutMixedIndex(graph, true);
// Disable the "battlesByTime" index
TitanManagement m = graph.openManagement();
RelationType battled = m.getRelationType("battled");
RelationTypeIndex battlesByTime = m.getRelationIndex(battled, "battlesByTime");
m.updateIndex(battlesByTime, SchemaAction.DISABLE_INDEX);
m.commit();
graph.tx().commit();
// Block until the SchemaStatus transitions to DISABLED
assertTrue(ManagementSystem.awaitRelationIndexStatus(graph, "battlesByTime", "battled").status(SchemaStatus.DISABLED).call().getSucceeded());
// Remove index
MapReduceIndexManagement mri = new MapReduceIndexManagement(graph);
m = graph.openManagement();
battled = m.getRelationType("battled");
battlesByTime = m.getRelationIndex(battled, "battlesByTime");
ScanMetrics metrics = mri.updateIndex(battlesByTime, SchemaAction.REMOVE_INDEX).get();
assertEquals(6, metrics.getCustom(IndexRemoveJob.DELETED_RECORDS_COUNT));
}
use of com.thinkaurelius.titan.diskstorage.keycolumnvalue.scan.ScanMetrics in project titan by thinkaurelius.
the class SimpleScanJob method runBasicTests.
public static void runBasicTests(int keys, int columns, SimpleScanJobRunner runner) throws InterruptedException, ExecutionException, BackendException, IOException {
Configuration conf1 = getJobConf(ImmutableList.of(new SliceQuery(BufferUtil.zeroBuffer(1), BufferUtil.oneBuffer(128))));
ScanMetrics result1 = runner.run(new SimpleScanJob(), conf1, SimpleScanJob.class.getName() + "#ROOT_NS");
assertEquals(keys, result1.getCustom(SimpleScanJob.KEY_COUNT));
assertEquals(keys * columns / 4 * 3, result1.getCustom(SimpleScanJob.TOTAL_COUNT));
/* These assertions are not valid on Hadoop. The Hadoop implementation uses
* Hadoop Counters to store ScanMetrics. These Counters are shared
* clusterwide. Hence there will be as many setups and teardowns as there
* are input splits -- generally more than one. So these don't apply:
*
* assertEquals(1, result1.getCustom(SimpleScanJob.SETUP_COUNT));
* assertEquals(1, result1.getCustom(SimpleScanJob.TEARDOWN_COUNT));
*
* However, even on Hadoop, we can expect both of the following to hold:
* 1. The number of setups must equal the number of teardowns
* 2. The number of setups (teardowns) must be positive
*/
assertEquals("Number of ScanJob setup calls must equal number of ScanJob teardown calls", result1.getCustom(SimpleScanJob.SETUP_COUNT), result1.getCustom(SimpleScanJob.TEARDOWN_COUNT));
assertTrue("Number of ScanJob setup/teardown calls must be positive", 0 < result1.getCustom(SimpleScanJob.SETUP_COUNT));
Configuration conf2 = getJobConf(ImmutableList.of(new SliceQuery(BufferUtil.zeroBuffer(1), BufferUtil.oneBuffer(128)).setLimit(5)));
ScanMetrics result2 = runner.run(new SimpleScanJob(), conf2, SimpleScanJob.class.getName() + "#ROOT_NS");
assertEquals(keys, result2.getCustom(SimpleScanJob.KEY_COUNT));
assertEquals(keys * 5, result2.getCustom(SimpleScanJob.TOTAL_COUNT));
Configuration conf3 = getJobConf(ImmutableList.of(new SliceQuery(KeyValueStoreUtil.getBuffer(0), KeyValueStoreUtil.getBuffer(5))));
ScanMetrics result3 = runner.run(new SimpleScanJob(), conf3, SimpleScanJob.class.getName() + "#ROOT_NS");
assertEquals(keys, result3.getCustom(SimpleScanJob.KEY_COUNT));
assertEquals(keys * 5, result3.getCustom(SimpleScanJob.TOTAL_COUNT));
Configuration conf4 = getJobConf(ImmutableList.of(new SliceQuery(BufferUtil.zeroBuffer(1), BufferUtil.oneBuffer(128)).setLimit(1), new SliceQuery(KeyValueStoreUtil.getBuffer(0), KeyValueStoreUtil.getBuffer(5))));
ScanMetrics result4 = runner.run(new SimpleScanJob(), conf4, SimpleScanJob.class.getName() + "#ROOT_NS");
assertEquals(keys, result4.getCustom(SimpleScanJob.KEY_COUNT));
assertEquals(keys * 6, result4.getCustom(SimpleScanJob.TOTAL_COUNT));
Configuration conf5 = getJobConf(ImmutableList.of(new SliceQuery(BufferUtil.zeroBuffer(1), BufferUtil.oneBuffer(128)).setLimit(1), new SliceQuery(KeyValueStoreUtil.getBuffer(2), KeyValueStoreUtil.getBuffer(4)), new SliceQuery(KeyValueStoreUtil.getBuffer(6), KeyValueStoreUtil.getBuffer(8)), new SliceQuery(KeyValueStoreUtil.getBuffer(10), KeyValueStoreUtil.getBuffer(20)).setLimit(4)));
ScanMetrics result5 = runner.run(new SimpleScanJob(), conf5, SimpleScanJob.class.getName() + "#ROOT_NS");
assertEquals(keys, result5.getCustom(SimpleScanJob.KEY_COUNT));
assertEquals(keys * 9, result5.getCustom(SimpleScanJob.TOTAL_COUNT));
Configuration conf6 = getJobConf(ImmutableList.of(new SliceQuery(BufferUtil.zeroBuffer(1), BufferUtil.oneBuffer(128)).setLimit(5)), 2L);
ScanMetrics result6 = runner.run(new SimpleScanJob(), conf6, SimpleScanJob.class.getName() + "#ROOT_NS");
assertEquals(keys / 2, result6.getCustom(SimpleScanJob.KEY_COUNT));
assertEquals(keys / 2 * 5, result6.getCustom(SimpleScanJob.TOTAL_COUNT));
Configuration conf7 = getJobConf(ImmutableList.of(new SliceQuery(BufferUtil.zeroBuffer(1), BufferUtil.oneBuffer(128)).setLimit(1), new SliceQuery(KeyValueStoreUtil.getBuffer(2), KeyValueStoreUtil.getBuffer(4)), new SliceQuery(KeyValueStoreUtil.getBuffer(31), KeyValueStoreUtil.getBuffer(35)), new SliceQuery(KeyValueStoreUtil.getBuffer(36), KeyValueStoreUtil.getBuffer(40)).setLimit(1)));
ScanMetrics result7 = runner.run(new SimpleScanJob(), conf7, SimpleScanJob.class.getName() + "#ROOT_NS");
assertEquals(keys, result7.getCustom(SimpleScanJob.KEY_COUNT));
assertEquals(keys * 3 + keys / 2 * 5, result7.getCustom(SimpleScanJob.TOTAL_COUNT));
Configuration conf8 = getJobConf(ImmutableList.of(new SliceQuery(BufferUtil.zeroBuffer(1), BufferUtil.oneBuffer(128)).setLimit(1), new SliceQuery(KeyValueStoreUtil.getBuffer(31), KeyValueStoreUtil.getBuffer(35))), 2L, 1L);
ScanMetrics result8 = runner.run(new SimpleScanJob(), conf8, SimpleScanJob.class.getName() + "#ROOT_NS");
// k -> KeyValueStoreUtil.getID(k) % 2 == 1));
assertEquals(keys / 2, result8.getCustom(SimpleScanJob.KEY_COUNT));
assertEquals(keys / 2 * 5, result8.getCustom(SimpleScanJob.TOTAL_COUNT));
Configuration conf9 = getJobConf(ImmutableList.of(new SliceQuery(BufferUtil.zeroBuffer(1), BufferUtil.oneBuffer(128)).setLimit(1), new SliceQuery(KeyValueStoreUtil.getBuffer(31), KeyValueStoreUtil.getBuffer(35))), 2L);
// k -> KeyValueStoreUtil.getID(k) % 2 == 0));
ScanMetrics result9 = runner.run(new SimpleScanJob(), conf9, SimpleScanJob.class.getName() + "#ROOT_NS");
assertEquals(keys / 2, result9.getCustom(SimpleScanJob.KEY_COUNT));
assertEquals(keys / 2, result9.getCustom(SimpleScanJob.TOTAL_COUNT));
try {
Configuration conf10 = getJobConf(ImmutableList.of(new SliceQuery(StaticArrayBuffer.of(new byte[] { (byte) 2 }), BufferUtil.oneBuffer(1)), new SliceQuery(BufferUtil.zeroBuffer(1), BufferUtil.oneBuffer(1))));
runner.run(new SimpleScanJob(), conf10, SimpleScanJob.class.getName() + "#ROOT_NS");
fail();
} catch (Exception e) {
//assertTrue(e instanceof ExecutionException && e.getCause() instanceof IllegalArgumentException);
}
}
use of com.thinkaurelius.titan.diskstorage.keycolumnvalue.scan.ScanMetrics in project titan by thinkaurelius.
the class IndexRemoveJob method process.
@Override
public void process(StaticBuffer key, Map<SliceQuery, EntryList> entries, ScanMetrics metrics) {
//The queries are already tailored enough => everything should be removed
try {
BackendTransaction mutator = writeTx.getTxHandle();
final List<Entry> deletions;
if (entries.size() == 1)
deletions = Iterables.getOnlyElement(entries.values());
else {
int size = IteratorUtils.stream(entries.values().iterator()).map(e -> e.size()).reduce(0, (x, y) -> x + y);
deletions = new ArrayList<>(size);
entries.values().forEach(e -> deletions.addAll(e));
}
metrics.incrementCustom(DELETED_RECORDS_COUNT, deletions.size());
if (isRelationTypeIndex()) {
mutator.mutateEdges(key, KCVSCache.NO_ADDITIONS, deletions);
} else {
mutator.mutateIndex(key, KCVSCache.NO_ADDITIONS, deletions);
}
} catch (final Exception e) {
mgmt.rollback();
writeTx.rollback();
metrics.incrementCustom(FAILED_TX);
throw new TitanException(e.getMessage(), e);
}
}
Aggregations