use of org.janusgraph.diskstorage.keycolumnvalue.SliceQuery in project janusgraph by JanusGraph.
the class ExpirationCacheTest method testGracePeriod.
private void testGracePeriod(Duration graceWait) throws Exception {
final int minCleanupTriggerCalls = 5;
final int numKeys = 100, numCols = 10;
loadStore(numKeys, numCols);
// Replace cache with proper times
cache = getCache(store, Duration.ofDays(200), graceWait);
final StaticBuffer key = BufferUtil.getIntBuffer(81);
final List<StaticBuffer> keys = new ArrayList<>();
keys.add(key);
keys.add(BufferUtil.getIntBuffer(37));
keys.add(BufferUtil.getIntBuffer(2));
SliceQuery query = getQuery(2, 8);
verifyResults(key, keys, query, 6);
// If we modify through cache store...
CacheTransaction tx = getCacheTx();
cache.mutateEntries(key, KeyColumnValueStore.NO_ADDITIONS, Lists.newArrayList(getEntry(4, 4)), tx);
tx.commit();
Instant utime = times.getTime();
store.resetCounter();
// ...invalidation should happen and the result set is updated immediately
verifyResults(key, keys, query, 5);
assertEquals(2, store.getSliceCalls());
// however, the key is expired and hence repeated calls need to go through to the store
verifyResults(key, keys, query, 5);
assertEquals(4, store.getSliceCalls());
// however, when we sleep past the grace wait time and trigger a cleanup...
times.sleepPast(utime.plus(graceWait));
for (int t = 0; t < minCleanupTriggerCalls; t++) {
assertEquals(5, cache.getSlice(new KeySliceQuery(key, query), tx).size());
times.sleepFor(Duration.ofMillis(5));
}
// ...the cache should cache results again
store.resetCounter();
verifyResults(key, keys, query, 5);
assertEquals(0, store.getSliceCalls());
verifyResults(key, keys, query, 5);
assertEquals(0, store.getSliceCalls());
}
use of org.janusgraph.diskstorage.keycolumnvalue.SliceQuery in project janusgraph by JanusGraph.
the class SimpleScanJob method workerIterationStart.
@Override
public void workerIterationStart(Configuration config, Configuration graphConfig, ScanMetrics metrics) {
assertNotNull(config);
metrics.incrementCustom(SETUP_COUNT);
if (config.has(HEX_QUERIES)) {
String[] queryStrings = config.get(HEX_QUERIES).split(":");
List<SliceQuery> queries = new LinkedList<>();
for (String qString : queryStrings) {
String[] queryTokens = qString.split("/");
StaticBuffer start = StaticArrayBuffer.of(Hex.hexToBytes(queryTokens[0]));
StaticBuffer end = StaticArrayBuffer.of(Hex.hexToBytes(queryTokens[1]));
SliceQuery query = new SliceQuery(start, end);
int limit = Integer.valueOf(queryTokens[2]);
if (0 <= limit) {
query.setLimit(limit);
}
queries.add(query);
}
qs = queries;
}
if (config.has(KEY_FILTER_ID_MODULUS)) {
final long mod = config.get(KEY_FILTER_ID_MODULUS);
final long modVal;
if (config.has(KEY_FILTER_ID_MODULAR_VALUE)) {
modVal = config.get(KEY_FILTER_ID_MODULAR_VALUE);
} else {
modVal = 0;
}
keyFilter = k -> KeyValueStoreUtil.getID(k) % mod == modVal;
}
}
use of org.janusgraph.diskstorage.keycolumnvalue.SliceQuery in project janusgraph by JanusGraph.
the class SimpleScanJob method runBasicTests.
public static void runBasicTests(int keys, int columns, SimpleScanJobRunner runner) throws InterruptedException, ExecutionException, BackendException, IOException {
Configuration conf1 = getJobConf(ImmutableList.of(new SliceQuery(BufferUtil.zeroBuffer(1), BufferUtil.oneBuffer(128))));
ScanMetrics result1 = runner.run(new SimpleScanJob(), conf1, SimpleScanJob.class.getName() + "#ROOT_NS");
assertEquals(keys, result1.getCustom(SimpleScanJob.KEY_COUNT));
assertEquals(keys * columns / 4 * 3, result1.getCustom(SimpleScanJob.TOTAL_COUNT));
/* These assertions are not valid on Hadoop. The Hadoop implementation uses
* Hadoop Counters to store ScanMetrics. These Counters are shared
* clusterwide. Hence there will be as many setups and teardowns as there
* are input splits -- generally more than one. So these don't apply:
*
* assertEquals(1, result1.getCustom(SimpleScanJob.SETUP_COUNT));
* assertEquals(1, result1.getCustom(SimpleScanJob.TEARDOWN_COUNT));
*
* However, even on Hadoop, we can expect both of the following to hold:
* 1. The number of setups must equal the number of teardowns
* 2. The number of setups (teardowns) must be positive
*/
assertEquals("Number of ScanJob setup calls must equal number of ScanJob teardown calls", result1.getCustom(SimpleScanJob.SETUP_COUNT), result1.getCustom(SimpleScanJob.TEARDOWN_COUNT));
assertTrue("Number of ScanJob setup/teardown calls must be positive", 0 < result1.getCustom(SimpleScanJob.SETUP_COUNT));
Configuration conf2 = getJobConf(ImmutableList.of(new SliceQuery(BufferUtil.zeroBuffer(1), BufferUtil.oneBuffer(128)).setLimit(5)));
ScanMetrics result2 = runner.run(new SimpleScanJob(), conf2, SimpleScanJob.class.getName() + "#ROOT_NS");
assertEquals(keys, result2.getCustom(SimpleScanJob.KEY_COUNT));
assertEquals(keys * 5, result2.getCustom(SimpleScanJob.TOTAL_COUNT));
Configuration conf3 = getJobConf(ImmutableList.of(new SliceQuery(KeyValueStoreUtil.getBuffer(0), KeyValueStoreUtil.getBuffer(5))));
ScanMetrics result3 = runner.run(new SimpleScanJob(), conf3, SimpleScanJob.class.getName() + "#ROOT_NS");
assertEquals(keys, result3.getCustom(SimpleScanJob.KEY_COUNT));
assertEquals(keys * 5, result3.getCustom(SimpleScanJob.TOTAL_COUNT));
Configuration conf4 = getJobConf(ImmutableList.of(new SliceQuery(BufferUtil.zeroBuffer(1), BufferUtil.oneBuffer(128)).setLimit(1), new SliceQuery(KeyValueStoreUtil.getBuffer(0), KeyValueStoreUtil.getBuffer(5))));
ScanMetrics result4 = runner.run(new SimpleScanJob(), conf4, SimpleScanJob.class.getName() + "#ROOT_NS");
assertEquals(keys, result4.getCustom(SimpleScanJob.KEY_COUNT));
assertEquals(keys * 6, result4.getCustom(SimpleScanJob.TOTAL_COUNT));
Configuration conf5 = getJobConf(ImmutableList.of(new SliceQuery(BufferUtil.zeroBuffer(1), BufferUtil.oneBuffer(128)).setLimit(1), new SliceQuery(KeyValueStoreUtil.getBuffer(2), KeyValueStoreUtil.getBuffer(4)), new SliceQuery(KeyValueStoreUtil.getBuffer(6), KeyValueStoreUtil.getBuffer(8)), new SliceQuery(KeyValueStoreUtil.getBuffer(10), KeyValueStoreUtil.getBuffer(20)).setLimit(4)));
ScanMetrics result5 = runner.run(new SimpleScanJob(), conf5, SimpleScanJob.class.getName() + "#ROOT_NS");
assertEquals(keys, result5.getCustom(SimpleScanJob.KEY_COUNT));
assertEquals(keys * 9, result5.getCustom(SimpleScanJob.TOTAL_COUNT));
Configuration conf6 = getJobConf(ImmutableList.of(new SliceQuery(BufferUtil.zeroBuffer(1), BufferUtil.oneBuffer(128)).setLimit(5)), 2L);
ScanMetrics result6 = runner.run(new SimpleScanJob(), conf6, SimpleScanJob.class.getName() + "#ROOT_NS");
assertEquals(keys / 2, result6.getCustom(SimpleScanJob.KEY_COUNT));
assertEquals(keys / 2 * 5, result6.getCustom(SimpleScanJob.TOTAL_COUNT));
Configuration conf7 = getJobConf(ImmutableList.of(new SliceQuery(BufferUtil.zeroBuffer(1), BufferUtil.oneBuffer(128)).setLimit(1), new SliceQuery(KeyValueStoreUtil.getBuffer(2), KeyValueStoreUtil.getBuffer(4)), new SliceQuery(KeyValueStoreUtil.getBuffer(31), KeyValueStoreUtil.getBuffer(35)), new SliceQuery(KeyValueStoreUtil.getBuffer(36), KeyValueStoreUtil.getBuffer(40)).setLimit(1)));
ScanMetrics result7 = runner.run(new SimpleScanJob(), conf7, SimpleScanJob.class.getName() + "#ROOT_NS");
assertEquals(keys, result7.getCustom(SimpleScanJob.KEY_COUNT));
assertEquals(keys * 3 + keys / 2 * 5, result7.getCustom(SimpleScanJob.TOTAL_COUNT));
Configuration conf8 = getJobConf(ImmutableList.of(new SliceQuery(BufferUtil.zeroBuffer(1), BufferUtil.oneBuffer(128)).setLimit(1), new SliceQuery(KeyValueStoreUtil.getBuffer(31), KeyValueStoreUtil.getBuffer(35))), 2L, 1L);
ScanMetrics result8 = runner.run(new SimpleScanJob(), conf8, SimpleScanJob.class.getName() + "#ROOT_NS");
// k -> KeyValueStoreUtil.getID(k) % 2 == 1));
assertEquals(keys / 2, result8.getCustom(SimpleScanJob.KEY_COUNT));
assertEquals(keys / 2 * 5, result8.getCustom(SimpleScanJob.TOTAL_COUNT));
Configuration conf9 = getJobConf(ImmutableList.of(new SliceQuery(BufferUtil.zeroBuffer(1), BufferUtil.oneBuffer(128)).setLimit(1), new SliceQuery(KeyValueStoreUtil.getBuffer(31), KeyValueStoreUtil.getBuffer(35))), 2L);
// k -> KeyValueStoreUtil.getID(k) % 2 == 0));
ScanMetrics result9 = runner.run(new SimpleScanJob(), conf9, SimpleScanJob.class.getName() + "#ROOT_NS");
assertEquals(keys / 2, result9.getCustom(SimpleScanJob.KEY_COUNT));
assertEquals(keys / 2, result9.getCustom(SimpleScanJob.TOTAL_COUNT));
try {
Configuration conf10 = getJobConf(ImmutableList.of(new SliceQuery(StaticArrayBuffer.of(new byte[] { (byte) 2 }), BufferUtil.oneBuffer(1)), new SliceQuery(BufferUtil.zeroBuffer(1), BufferUtil.oneBuffer(1))));
runner.run(new SimpleScanJob(), conf10, SimpleScanJob.class.getName() + "#ROOT_NS");
fail();
} catch (Exception e) {
// assertTrue(e instanceof ExecutionException && e.getCause() instanceof IllegalArgumentException);
}
}
use of org.janusgraph.diskstorage.keycolumnvalue.SliceQuery in project janusgraph by JanusGraph.
the class SimpleScanJob method process.
@Override
public void process(StaticBuffer key, Map<SliceQuery, EntryList> entries, ScanMetrics metrics) {
assertNotNull(key);
assertTrue(keyFilter.test(key));
metrics.incrementCustom(KEY_COUNT);
assertNotNull(entries);
assertTrue(qs.size() >= entries.size());
for (SliceQuery q : qs) {
if (!entries.containsKey(q)) {
continue;
}
EntryList result = entries.get(q);
metrics.incrementCustom(TOTAL_COUNT, result.size());
}
}
use of org.janusgraph.diskstorage.keycolumnvalue.SliceQuery in project janusgraph by JanusGraph.
the class MultiVertexCentricQueryBuilder method execute.
/* ---------------------------------------------------------------
* Query Execution
* ---------------------------------------------------------------
*/
/**
* Constructs the BaseVertexCentricQuery through {@link BasicVertexCentricQueryBuilder#constructQuery(org.janusgraph.graphdb.internal.RelationCategory)}.
* If the query asks for an implicit key, the resulting map is computed and returned directly.
* If the query is empty, a map that maps each vertex to an empty list is returned.
* Otherwise, the query is executed for all vertices through the transaction which will effectively
* pre-load the return result sets into the associated {@link org.janusgraph.graphdb.vertices.CacheVertex} or
* don't do anything at all if the vertex is new (and hence no edges in the storage backend).
* After that, a map is constructed that maps each vertex to the corresponding VertexCentricQuery and wrapped
* into a QueryProcessor. Hence, upon iteration the query will be executed like any other VertexCentricQuery
* with the performance difference that the SliceQueries will have already been preloaded and not further
* calls to the storage backend are needed.
*
* @param returnType
* @return
*/
protected <Q> Map<JanusGraphVertex, Q> execute(RelationCategory returnType, ResultConstructor<Q> resultConstructor) {
Preconditions.checkArgument(!vertices.isEmpty(), "Need to add at least one vertex to query");
final Map<JanusGraphVertex, Q> result = new HashMap<>(vertices.size());
BaseVertexCentricQuery bq = super.constructQuery(returnType);
profiler.setAnnotation(QueryProfiler.MULTIQUERY_ANNOTATION, true);
profiler.setAnnotation(QueryProfiler.NUMVERTICES_ANNOTATION, vertices.size());
if (!bq.isEmpty()) {
for (BackendQueryHolder<SliceQuery> sq : bq.getQueries()) {
Set<InternalVertex> adjVertices = Sets.newHashSet(vertices);
for (InternalVertex v : vertices) {
if (isPartitionedVertex(v)) {
profiler.setAnnotation(QueryProfiler.PARTITIONED_VERTEX_ANNOTATION, true);
adjVertices.remove(v);
adjVertices.addAll(allRequiredRepresentatives(v));
}
}
// Overwrite with more accurate size accounting for partitioned vertices
profiler.setAnnotation(QueryProfiler.NUMVERTICES_ANNOTATION, adjVertices.size());
tx.executeMultiQuery(adjVertices, sq.getBackendQuery(), sq.getProfiler());
}
for (InternalVertex v : vertices) {
result.put(v, resultConstructor.getResult(v, bq));
}
} else {
for (JanusGraphVertex v : vertices) result.put(v, resultConstructor.emptyResult());
}
return result;
}
Aggregations