use of org.janusgraph.diskstorage.StaticBuffer in project janusgraph by JanusGraph.
the class StandardScannerExecutor method run.
@Override
public void run() {
BlockingQueue<Row> processorQueue;
try {
job.workerIterationStart(jobConfiguration, graphConfiguration, metrics);
List<SliceQuery> queries = job.getQueries();
int numQueries = queries.size();
processorQueue = new LinkedBlockingQueue<>(this.graphConfiguration.get(GraphDatabaseConfiguration.PAGE_SIZE) * numProcessors * numQueries);
Preconditions.checkArgument(numQueries > 0, "Must at least specify one query for job: %s", job);
if (numQueries > 1) {
// It is assumed that the first query is the grounding query if multiple queries exist
SliceQuery ground = queries.get(0);
StaticBuffer start = ground.getSliceStart();
Preconditions.checkArgument(start.equals(BufferUtil.zeroBuffer(1)), "Expected start of first query to be a single 0s: %s", start);
StaticBuffer end = ground.getSliceEnd();
Preconditions.checkArgument(end.equals(BufferUtil.oneBuffer(end.length())), "Expected end of first query to be all 1s: %s", end);
}
rowsCollector = buildScanner(processorQueue, queries);
} catch (Throwable e) {
log.error("Exception trying to setup the job:", e);
cleanupSilent();
job.workerIterationEnd(metrics);
setException(e);
return;
}
Processor[] processors = new Processor[numProcessors];
for (int i = 0; i < processors.length; i++) {
processors[i] = new Processor(job.clone(), processorQueue);
processors[i].start();
}
try {
rowsCollector.run();
rowsCollector.join();
for (Processor processor : processors) {
processor.finish();
}
if (!Threads.waitForCompletion(processors, TIMEOUT_MS))
log.error("Processor did not terminate in time");
cleanup();
try {
job.workerIterationEnd(metrics);
} catch (IllegalArgumentException e) {
// https://github.com/JanusGraph/janusgraph/pull/891
log.warn("Exception occurred processing worker iteration end. See PR 891.", e);
}
if (interrupted) {
setException(new InterruptedException("Scanner got interrupted"));
} else {
finishJob.accept(metrics);
set(metrics);
}
} catch (Throwable e) {
log.error("Exception occurred during job execution:", e);
job.workerIterationEnd(metrics);
setException(e);
} finally {
Threads.terminate(processors);
cleanupSilent();
}
}
use of org.janusgraph.diskstorage.StaticBuffer in project janusgraph by JanusGraph.
the class CacheTransaction method convert.
private KCVMutation convert(KCVEntryMutation mutation) {
assert !mutation.isEmpty();
if (mutation.hasDeletions()) {
return new KCVMutation(() -> new ArrayList<>(mutation.getAdditions()), () -> {
List<Entry> deletions = mutation.getDeletions();
ArrayList<StaticBuffer> convertedDeletions = new ArrayList<>(deletions.size());
for (Entry entry : deletions) {
convertedDeletions.add(KCVEntryMutation.ENTRY2COLUMN_FCT.apply(entry));
}
return convertedDeletions;
});
}
return new KCVMutation(mutation.getAdditions(), KeyColumnValueStore.NO_DELETIONS);
}
use of org.janusgraph.diskstorage.StaticBuffer in project janusgraph by JanusGraph.
the class CQLResultSetKeyIteratorTest method testNoIterateColumns.
@Test
public void testNoIterateColumns() throws IOException {
final Array<Tuple2<ByteBuffer, Array<Tuple2<ByteBuffer, ByteBuffer>>>> keysMap = generateRandomKeysMap();
final ResultSet resultSet = generateMockedResultSet(keysMap);
final CQLColValGetter getter = new CQLColValGetter(new EntryMetaData[0]);
try (final CQLResultSetKeyIterator resultSetKeyIterator = new CQLResultSetKeyIterator(ALL_COLUMNS, getter, resultSet)) {
final Iterator<Tuple2<ByteBuffer, Array<Tuple2<ByteBuffer, ByteBuffer>>>> iterator = keysMap.iterator();
while (resultSetKeyIterator.hasNext()) {
final StaticBuffer next = resultSetKeyIterator.next();
assertEquals(iterator.next()._1, next.asByteBuffer());
}
}
}
use of org.janusgraph.diskstorage.StaticBuffer in project janusgraph by JanusGraph.
the class CQLResultSetKeyIteratorTest method testUneven.
@Test
public void testUneven() throws IOException {
final Array<Tuple2<ByteBuffer, Array<Tuple2<ByteBuffer, ByteBuffer>>>> keysMap = generateRandomKeysMap();
final ResultSet resultSet = generateMockedResultSet(keysMap);
final CQLColValGetter getter = new CQLColValGetter(new EntryMetaData[0]);
try (final CQLResultSetKeyIterator resultSetKeyIterator = new CQLResultSetKeyIterator(ALL_COLUMNS, getter, resultSet)) {
final Iterator<Tuple2<ByteBuffer, Array<Tuple2<ByteBuffer, ByteBuffer>>>> iterator = keysMap.iterator();
while (resultSetKeyIterator.hasNext()) {
final StaticBuffer next = resultSetKeyIterator.next();
try (final RecordIterator<Entry> entries = resultSetKeyIterator.getEntries()) {
final Tuple2<ByteBuffer, Array<Tuple2<ByteBuffer, ByteBuffer>>> current = iterator.next();
final ByteBuffer currentKey = current._1;
final Array<Tuple2<ByteBuffer, ByteBuffer>> columnValues = current._2;
final Iterator<Tuple2<ByteBuffer, ByteBuffer>> columnIterator = columnValues.iterator();
while (entries.hasNext()) {
final Entry entry = entries.next();
final Tuple2<ByteBuffer, ByteBuffer> columnAndValue = columnIterator.next();
assertEquals(currentKey, next.asByteBuffer());
assertEquals(columnAndValue._1, entry.getColumn().asByteBuffer());
assertEquals(columnAndValue._2, entry.getValue().asByteBuffer());
assertEquals(columnIterator.hasNext(), entries.hasNext());
}
}
}
}
}
use of org.janusgraph.diskstorage.StaticBuffer in project janusgraph by JanusGraph.
the class CqlBinaryRecordReader method completeNextKV.
private KV completeNextKV() throws IOException {
KV completedKV = null;
boolean hasNext;
do {
hasNext = reader.nextKeyValue();
if (!hasNext) {
completedKV = incompleteKV;
incompleteKV = null;
} else {
Row row = reader.getCurrentValue();
StaticArrayBuffer key = StaticArrayBuffer.of(row.getBytesUnsafe(CQLKeyColumnValueStore.KEY_COLUMN_NAME));
StaticBuffer column1 = StaticArrayBuffer.of(row.getBytesUnsafe(CQLKeyColumnValueStore.COLUMN_COLUMN_NAME));
StaticBuffer value = StaticArrayBuffer.of(row.getBytesUnsafe(CQLKeyColumnValueStore.VALUE_COLUMN_NAME));
Entry entry = StaticArrayEntry.of(column1, value);
if (null == incompleteKV) {
// Initialization; this should happen just once in an instance's lifetime
incompleteKV = new KV(key);
} else if (!incompleteKV.key.equals(key)) {
// The underlying Cassandra reader has just changed to a key we haven't seen yet
// This implies that there will be no more entries for the prior key
completedKV = incompleteKV;
incompleteKV = new KV(key);
}
incompleteKV.addEntry(entry);
}
/* Loop ends when either
* A) the cassandra reader ran out of data
* or
* B) the cassandra reader switched keys, thereby completing a KV */
} while (hasNext && null == completedKV);
return completedKV;
}
Aggregations