use of com.carrotsearch.hppc.IntContainer in project crate by crate.
the class NodeFetchRequestTest method testStreaming.
@Test
public void testStreaming() throws Exception {
IntObjectHashMap<IntContainer> toFetch = new IntObjectHashMap<>();
IntHashSet docIds = new IntHashSet(3);
toFetch.put(1, docIds);
NodeFetchRequest orig = new NodeFetchRequest(UUID.randomUUID(), 1, true, toFetch);
BytesStreamOutput out = new BytesStreamOutput();
orig.writeTo(out);
StreamInput in = StreamInput.wrap(out.bytes());
NodeFetchRequest streamed = new NodeFetchRequest();
streamed.readFrom(in);
assertThat(orig.jobId(), is(streamed.jobId()));
assertThat(orig.fetchPhaseId(), is(streamed.fetchPhaseId()));
assertThat(orig.isCloseContext(), is(streamed.isCloseContext()));
assertThat(orig.toFetch().toString(), is(streamed.toFetch().toString()));
}
use of com.carrotsearch.hppc.IntContainer in project crate by crate.
the class JobSetup method prepareSourceOperations.
/**
* recursively build all contexts that depend on startPhaseId (excl. startPhaseId)
* <p>
* {@link Context#opCtx#targetToSourceMap} will be used to traverse the nodeOperations
*/
private void prepareSourceOperations(int startPhaseId, Context context) {
IntContainer sourcePhaseIds = context.opCtx.targetToSourceMap.get(startPhaseId);
if (sourcePhaseIds == null) {
return;
}
for (IntCursor sourcePhaseId : sourcePhaseIds) {
NodeOperation nodeOperation = context.opCtx.nodeOperationByPhaseId.get(sourcePhaseId.value);
createContexts(nodeOperation.executionPhase(), context);
context.opCtx.builtNodeOperations.set(nodeOperation.executionPhase().phaseId());
}
for (IntCursor sourcePhaseId : sourcePhaseIds) {
prepareSourceOperations(sourcePhaseId.value, context);
}
}
use of com.carrotsearch.hppc.IntContainer in project graphhopper by graphhopper.
the class PrepareContractionHierarchies method contractNode.
private IntContainer contractNode(int node, int level) {
if (isContracted(node))
throw new IllegalArgumentException("Node " + node + " was contracted already");
contractionSW.start();
chBuilder.setLevel(node, level);
IntContainer neighbors = nodeContractor.contractNode(node);
contractionSW.stop();
return neighbors;
}
use of com.carrotsearch.hppc.IntContainer in project crate by crate.
the class NodeFetchResponseTest method testStreaming.
@Test
public void testStreaming() throws Exception {
IntObjectHashMap<IntContainer> toFetch = new IntObjectHashMap<>();
IntHashSet docIds = new IntHashSet(3);
toFetch.put(1, docIds);
IntObjectMap<Streamer[]> streamers = new IntObjectHashMap<>(1);
streamers.put(1, new Streamer[] { DataTypes.BOOLEAN.streamer() });
StreamBucket.Builder builder = new StreamBucket.Builder(streamers.get(1));
builder.add(new RowN(new Object[] { true }));
IntObjectHashMap<StreamBucket> fetched = new IntObjectHashMap<>(1);
fetched.put(1, builder.build());
NodeFetchResponse orig = NodeFetchResponse.forSending(fetched);
BytesStreamOutput out = new BytesStreamOutput();
orig.writeTo(out);
StreamInput in = StreamInput.wrap(out.bytes());
// receiving side is required to set the streamers
NodeFetchResponse streamed = NodeFetchResponse.forReceiveing(streamers);
streamed.readFrom(in);
assertThat((Row) Iterables.getOnlyElement(streamed.fetched().get(1)), isRow(true));
}
use of com.carrotsearch.hppc.IntContainer in project crate by crate.
the class NodeFetchOperation method doFetch.
private void doFetch(FetchContext fetchContext, SettableFuture<IntObjectMap<StreamBucket>> resultFuture, IntObjectMap<? extends IntContainer> toFetch) throws Exception {
final IntObjectHashMap<StreamBucket> fetched = new IntObjectHashMap<>(toFetch.size());
HashMap<TableIdent, TableFetchInfo> tableFetchInfos = getTableFetchInfos(fetchContext);
final AtomicReference<Throwable> lastThrowable = new AtomicReference<>(null);
final AtomicInteger threadLatch = new AtomicInteger(toFetch.size());
for (IntObjectCursor<? extends IntContainer> toFetchCursor : toFetch) {
final int readerId = toFetchCursor.key;
final IntContainer docIds = toFetchCursor.value;
TableIdent ident = fetchContext.tableIdent(readerId);
final TableFetchInfo tfi = tableFetchInfos.get(ident);
assert tfi != null : "tfi must not be null";
CollectRunnable runnable = new CollectRunnable(tfi.createCollector(readerId), docIds, fetched, readerId, lastThrowable, threadLatch, resultFuture, fetchContext.isKilled());
try {
executor.execute(runnable);
} catch (EsRejectedExecutionException | RejectedExecutionException e) {
runnable.run();
}
}
}
Aggregations