use of org.janusgraph.diskstorage.keycolumnvalue.SliceQuery in project janusgraph by JanusGraph.
the class ExpirationCacheTest method testExpiration.
private void testExpiration(Duration expirationTime) throws Exception {
final int numKeys = 100, numCols = 10;
loadStore(numKeys, numCols);
// Replace cache with proper times
cache = getCache(store, expirationTime, Duration.ZERO);
final StaticBuffer key = BufferUtil.getIntBuffer(81);
final List<StaticBuffer> keys = new ArrayList<>();
keys.add(key);
keys.add(BufferUtil.getIntBuffer(37));
keys.add(BufferUtil.getIntBuffer(2));
SliceQuery query = getQuery(2, 8);
verifyResults(key, keys, query, 6);
// Modify store directly
StoreTransaction txs = getStoreTx();
store.mutate(key, KeyColumnValueStore.NO_ADDITIONS, Lists.newArrayList(BufferUtil.getIntBuffer(5)), txs);
txs.commit();
Instant utime = times.getTime();
// Should still see cached results
verifyResults(key, keys, query, 6);
// Sleep half way through expiration time
times.sleepPast(utime.plus(expirationTime.dividedBy(2)));
verifyResults(key, keys, query, 6);
// Sleep past expiration time...
times.sleepPast(utime.plus(expirationTime));
// ...and just a little bit longer
times.sleepFor(Duration.ofMillis(5));
// Now the results should be different
verifyResults(key, keys, query, 5);
// If we modify through cache store...
CacheTransaction tx = getCacheTx();
cache.mutateEntries(key, KeyColumnValueStore.NO_ADDITIONS, Lists.newArrayList(getEntry(4, 4)), tx);
tx.commit();
store.resetCounter();
// ...invalidation should happen and the result set is updated immediately
verifyResults(key, keys, query, 4);
}
use of org.janusgraph.diskstorage.keycolumnvalue.SliceQuery in project janusgraph by JanusGraph.
the class SimpleScanJob method encodeQueries.
private static String encodeQueries(List<SliceQuery> queries) {
List<String> queryStrings = new ArrayList<>(queries.size());
for (SliceQuery query : queries) {
String start = Hex.bytesToHex(query.getSliceStart().as(StaticBuffer.ARRAY_FACTORY));
String end = Hex.bytesToHex(query.getSliceEnd().as(StaticBuffer.ARRAY_FACTORY));
final int limit;
if (query.hasLimit()) {
limit = query.getLimit();
} else {
limit = -1;
}
queryStrings.add(String.format("%s/%s/%d", start, end, limit));
}
return Joiner.on(":").join(queryStrings);
}
use of org.janusgraph.diskstorage.keycolumnvalue.SliceQuery in project janusgraph by JanusGraph.
the class HadoopScanMapper method map.
@Override
protected void map(StaticBuffer key, Iterable<Entry> values, Context context) throws IOException, InterruptedException {
EntryArrayList al = EntryArrayList.of(values);
// KeyFilter check
if (!keyFilter.test(key)) {
log.debug("Skipping key {} based on KeyFilter", key);
return;
}
// InitialQuery check (at least one match is required or else the key is ignored)
EntryList initialQueryMatches = findEntriesMatchingQuery(initialQuery, al);
if (0 == initialQueryMatches.size()) {
log.debug("Skipping key {} based on InitialQuery ({}) match failure", key, initialQuery);
return;
}
// Both conditions (KeyFilter && InitialQuery) for invoking process are satisfied
// Create an entries parameter to be passed into the process method
Map<SliceQuery, EntryList> matches = new HashMap<>();
matches.put(initialQuery, initialQueryMatches);
// Find matches (if any are present) for subsequent queries
for (SliceQuery sq : subsequentQueries) {
matches.put(sq, findEntriesMatchingQuery(sq, al));
}
// Process
job.process(key, matches, metrics);
}
use of org.janusgraph.diskstorage.keycolumnvalue.SliceQuery in project janusgraph by JanusGraph.
the class HadoopScanMapper method finishSetup.
protected void finishSetup(ModifiableHadoopConfiguration scanConf, Configuration graphConf) {
jobConf = getJobConfiguration(scanConf);
Preconditions.checkNotNull(metrics);
// Allowed to be null for jobs that specify no configuration and no configuration root
// Preconditions.checkNotNull(jobConf);
Preconditions.checkNotNull(job);
job.workerIterationStart(jobConf, graphConf, metrics);
keyFilter = job.getKeyFilter();
List<SliceQuery> sliceQueries = job.getQueries();
Preconditions.checkArgument(null != sliceQueries, "Job cannot specify null query list");
Preconditions.checkArgument(0 < sliceQueries.size(), "Job must specify at least one query");
// Assign head of getQueries() to "initialQuery"
initialQuery = sliceQueries.get(0);
// Assign tail of getQueries() to "subsequentQueries"
subsequentQueries = new ArrayList<>(sliceQueries.subList(1, sliceQueries.size()));
Preconditions.checkState(sliceQueries.size() == subsequentQueries.size() + 1);
Preconditions.checkNotNull(initialQuery);
if (0 < subsequentQueries.size()) {
// It is assumed that the first query is the grounding query if multiple queries exist
StaticBuffer start = initialQuery.getSliceStart();
Preconditions.checkArgument(start.equals(BufferUtil.zeroBuffer(1)), "Expected start of first query to be all 0s: %s", start);
StaticBuffer end = initialQuery.getSliceEnd();
Preconditions.checkArgument(end.equals(BufferUtil.oneBuffer(end.length())), "Expected end of first query to be all 1s: %s", end);
}
}
use of org.janusgraph.diskstorage.keycolumnvalue.SliceQuery in project janusgraph by JanusGraph.
the class VertexJobConverter method process.
@Override
public void process(StaticBuffer key, Map<SliceQuery, EntryList> entries, ScanMetrics metrics) {
long vertexId = getVertexId(key);
assert entries.get(VERTEX_EXISTS_QUERY) != null;
if (isGhostVertex(vertexId, entries.get(VERTEX_EXISTS_QUERY))) {
metrics.incrementCustom(GHOST_VERTEX_COUNT);
return;
}
JanusGraphVertex vertex = tx.getInternalVertex(vertexId);
Preconditions.checkArgument(vertex instanceof PreloadedVertex, "The bounding transaction is not configured correctly");
PreloadedVertex v = (PreloadedVertex) vertex;
v.setAccessCheck(PreloadedVertex.OPENSTAR_CHECK);
for (Map.Entry<SliceQuery, EntryList> entry : entries.entrySet()) {
SliceQuery sq = entry.getKey();
if (sq.equals(VERTEX_EXISTS_QUERY))
continue;
EntryList entryList = entry.getValue();
if (entryList.size() >= sq.getLimit())
metrics.incrementCustom(TRUNCATED_ENTRY_LISTS);
v.addToQueryCache(sq.updateLimit(Query.NO_LIMIT), entryList);
}
job.process(v, metrics);
}
Aggregations