use of io.crate.data.BatchConsumer in project crate by crate.
the class ContextPreparer method prepareOnHandler.
public List<CompletableFuture<Bucket>> prepareOnHandler(Iterable<? extends NodeOperation> nodeOperations, JobExecutionContext.Builder contextBuilder, List<Tuple<ExecutionPhase, BatchConsumer>> handlerPhases, SharedShardContexts sharedShardContexts) {
ContextPreparer.PreparerContext preparerContext = new PreparerContext(clusterService.localNode().getId(), contextBuilder, logger, distributingDownstreamFactory, nodeOperations, sharedShardContexts);
for (Tuple<ExecutionPhase, BatchConsumer> handlerPhase : handlerPhases) {
preparerContext.registerLeaf(handlerPhase.v1(), handlerPhase.v2());
}
registerContextPhases(nodeOperations, preparerContext);
logger.trace("prepareOnHandler: nodeOperations={}, handlerPhases={}, targetSourceMap={}", nodeOperations, handlerPhases, preparerContext.opCtx.targetToSourceMap);
IntHashSet leafs = new IntHashSet();
for (Tuple<ExecutionPhase, BatchConsumer> handlerPhase : handlerPhases) {
ExecutionPhase phase = handlerPhase.v1();
createContexts(phase, preparerContext);
leafs.add(phase.phaseId());
}
leafs.addAll(preparerContext.opCtx.findLeafs());
for (IntCursor cursor : leafs) {
prepareSourceOperations(cursor.value, preparerContext);
}
assert preparerContext.opCtx.allNodeOperationContextsBuilt() : "some nodeOperations haven't been processed";
return preparerContext.directResponseFutures;
}
use of io.crate.data.BatchConsumer in project crate by crate.
the class DistributingDownstreamFactoryTest method testCreateDownstreamMultipleNode.
@Test
public void testCreateDownstreamMultipleNode() throws Exception {
BatchConsumer downstream = createDownstream(ImmutableSet.of("downstream_node1", "downstream_node2"));
assertThat(((DistributingConsumer) downstream).multiBucketBuilder, instanceOf(ModuloBucketBuilder.class));
}
use of io.crate.data.BatchConsumer in project crate by crate.
the class ExecutionPhasesTask method createHandlerPhaseAndReceivers.
private List<Tuple<ExecutionPhase, BatchConsumer>> createHandlerPhaseAndReceivers(List<ExecutionPhase> handlerPhases, List<BatchConsumer> handlerReceivers, InitializationTracker initializationTracker) {
List<Tuple<ExecutionPhase, BatchConsumer>> handlerPhaseAndReceiver = new ArrayList<>();
ListIterator<BatchConsumer> consumerIt = handlerReceivers.listIterator();
for (ExecutionPhase handlerPhase : handlerPhases) {
InterceptingBatchConsumer interceptingBatchConsumer = new InterceptingBatchConsumer(jobId(), consumerIt.next(), initializationTracker, transportKillJobsNodeAction);
handlerPhaseAndReceiver.add(new Tuple<>(handlerPhase, interceptingBatchConsumer));
}
return handlerPhaseAndReceiver;
}
use of io.crate.data.BatchConsumer in project crate by crate.
the class SystemCollectSource method getCollector.
@Override
public CrateCollector getCollector(CollectPhase phase, BatchConsumer consumer, JobCollectContext jobCollectContext) {
RoutedCollectPhase collectPhase = (RoutedCollectPhase) phase;
// sys.operations can contain a _node column - these refs need to be normalized into literals
EvaluatingNormalizer normalizer = new EvaluatingNormalizer(functions, RowGranularity.DOC, ReplaceMode.COPY, new NodeSysReferenceResolver(nodeSysExpression), null);
final RoutedCollectPhase routedCollectPhase = collectPhase.normalize(normalizer, null);
Map<String, Map<String, List<Integer>>> locations = collectPhase.routing().locations();
String table = Iterables.getOnlyElement(locations.get(clusterService.localNode().getId()).keySet());
Supplier<CompletableFuture<? extends Iterable<?>>> iterableGetter = iterableGetters.get(table);
assert iterableGetter != null : "iterableGetter for " + table + " must exist";
boolean requiresScroll = consumer.requiresScroll();
return BatchIteratorCollectorBridge.newInstance(() -> iterableGetter.get().thenApply(dataIterable -> RowsBatchIterator.newInstance(dataIterableToRowsIterable(routedCollectPhase, requiresScroll, dataIterable), collectPhase.toCollect().size())), consumer);
}
use of io.crate.data.BatchConsumer in project crate by crate.
the class BatchPortal method sync.
@Override
public CompletableFuture<Void> sync(Planner planner, JobsLogs jobsLogs) {
CountdownFutureCallback completionCallback = new CountdownFutureCallback(analysis.size());
for (int i = 0; i < analysis.size(); i++) {
UUID jobId = UUID.randomUUID();
Plan plan;
String stmt = queries.get(i);
try {
plan = planner.plan(analysis.get(i), jobId, 0, 0);
} catch (Throwable t) {
jobsLogs.logPreExecutionFailure(jobId, stmt, SQLExceptions.messageOf(t));
throw t;
}
ResultReceiver resultReceiver = resultReceivers.get(i);
jobsLogs.logExecutionStart(jobId, stmt);
JobsLogsUpdateListener jobsLogsUpdateListener = new JobsLogsUpdateListener(jobId, jobsLogs);
resultReceiver.completionFuture().whenComplete(jobsLogsUpdateListener).whenComplete(completionCallback);
BatchConsumer consumer = new BatchConsumerToResultReceiver(resultReceiver, 0);
portalContext.getExecutor().execute(plan, consumer, new RowN(batchParams.toArray()));
}
synced = true;
return completionCallback;
}
Aggregations