use of io.crate.execution.dsl.phases.NodeOperation in project crate by crate.
the class ExplainPlan method extractPhasesTimingsFrom.
private static Map<String, Object> extractPhasesTimingsFrom(Map<String, Map<String, Object>> timingsByNodeId, NodeOperationTree operationTree) {
Map<String, Object> allPhases = new TreeMap<>();
for (NodeOperation operation : operationTree.nodeOperations()) {
ExecutionPhase phase = operation.executionPhase();
getPhaseTimingsAndAddThemToPhasesMap(phase, timingsByNodeId, allPhases);
}
ExecutionPhase leafExecutionPhase = operationTree.leaf();
getPhaseTimingsAndAddThemToPhasesMap(leafExecutionPhase, timingsByNodeId, allPhases);
return allPhases;
}
use of io.crate.execution.dsl.phases.NodeOperation in project crate by crate.
the class DocLevelCollectTest method collect.
private Bucket collect(RoutedCollectPhase collectNode) throws Throwable {
JobSetup jobSetup = internalCluster().getDataNodeInstance(JobSetup.class);
TasksService tasksService = internalCluster().getDataNodeInstance(TasksService.class);
SharedShardContexts sharedShardContexts = new SharedShardContexts(internalCluster().getDataNodeInstance(IndicesService.class), UnaryOperator.identity());
RootTask.Builder builder = tasksService.newBuilder(collectNode.jobId());
NodeOperation nodeOperation = NodeOperation.withDirectResponse(collectNode, mock(ExecutionPhase.class), (byte) 0, "remoteNode");
List<CompletableFuture<StreamBucket>> results = jobSetup.prepareOnRemote(DUMMY_SESSION_INFO, List.of(nodeOperation), builder, sharedShardContexts);
RootTask rootTask = tasksService.createTask(builder);
rootTask.start();
return results.get(0).get(2, TimeUnit.SECONDS);
}
use of io.crate.execution.dsl.phases.NodeOperation in project crate by crate.
the class DistributingConsumerFactoryTest method createDownstream.
private RowConsumer createDownstream(Set<String> downstreamExecutionNodes) {
UUID jobId = UUID.randomUUID();
Routing routing = new Routing(Map.of("n1", Map.of("i1", IntArrayList.from(1, 2))));
RoutedCollectPhase collectPhase = new RoutedCollectPhase(jobId, 1, "collect", routing, RowGranularity.DOC, List.of(), List.of(), WhereClause.MATCH_ALL.queryOrFallback(), DistributionInfo.DEFAULT_MODULO);
MergePhase mergePhase = new MergePhase(jobId, 2, "merge", 1, 1, downstreamExecutionNodes, List.of(LongType.INSTANCE), List.of(), DistributionInfo.DEFAULT_BROADCAST, null);
NodeOperation nodeOperation = NodeOperation.withDownstream(collectPhase, mergePhase, (byte) 0);
return rowDownstreamFactory.create(nodeOperation, RamAccounting.NO_ACCOUNTING, collectPhase.distributionInfo(), jobId, Paging.PAGE_SIZE);
}
use of io.crate.execution.dsl.phases.NodeOperation in project crate by crate.
the class JobLauncher method setupTasks.
private void setupTasks(TransactionContext txnCtx, Map<String, Collection<NodeOperation>> operationByServer, List<ExecutionPhase> handlerPhases, List<RowConsumer> handlerConsumers) throws Throwable {
assert handlerPhases.size() == handlerConsumers.size() : "handlerPhases size must match handlerConsumers size";
String localNodeId = clusterService.localNode().getId();
Collection<NodeOperation> localNodeOperations = operationByServer.remove(localNodeId);
if (localNodeOperations == null) {
localNodeOperations = Collections.emptyList();
}
// + 1 for localTask which is always created
InitializationTracker initializationTracker = new InitializationTracker(operationByServer.size() + 1);
List<Tuple<ExecutionPhase, RowConsumer>> handlerPhaseAndReceiver = createHandlerPhaseAndReceivers(handlerPhases, handlerConsumers, initializationTracker);
RootTask.Builder builder = tasksService.newBuilder(jobId, txnCtx.sessionSettings().userName(), localNodeId, operationByServer.keySet());
SharedShardContexts sharedShardContexts = maybeInstrumentProfiler(builder);
List<CompletableFuture<StreamBucket>> directResponseFutures = jobSetup.prepareOnHandler(txnCtx.sessionSettings(), localNodeOperations, builder, handlerPhaseAndReceiver, sharedShardContexts);
RootTask localTask = tasksService.createTask(builder);
List<PageBucketReceiver> pageBucketReceivers = getHandlerBucketReceivers(localTask, handlerPhaseAndReceiver);
int bucketIdx = 0;
/*
* If you touch anything here make sure the following tests pass with > 1k iterations:
*
* Seed: 112E1807417E925A - testInvalidPatternSyntax
* Seed: Any - testRegularSelectWithFewAvailableThreadsShouldNeverGetStuck
* Seed: CC456FF5004F35D3 - testFailureOfJoinDownstream
*/
if (!localNodeOperations.isEmpty() && !directResponseFutures.isEmpty()) {
assert directResponseFutures.size() == pageBucketReceivers.size() : "directResponses size must match pageBucketReceivers";
CompletableFutures.allAsList(directResponseFutures).whenComplete(BucketForwarder.asConsumer(pageBucketReceivers, bucketIdx, initializationTracker));
bucketIdx++;
try {
// initializationTracker for localNodeOperations is triggered via SetBucketCallback
localTask.start();
} catch (Throwable t) {
accountFailureForRemoteOperations(operationByServer, initializationTracker, handlerPhaseAndReceiver, t);
return;
}
} else {
try {
localTask.start();
initializationTracker.jobInitialized();
} catch (Throwable t) {
initializationTracker.jobInitializationFailed(t);
accountFailureForRemoteOperations(operationByServer, initializationTracker, handlerPhaseAndReceiver, t);
return;
}
}
sendJobRequests(txnCtx, localNodeId, operationByServer, pageBucketReceivers, handlerPhaseAndReceiver, bucketIdx, initializationTracker);
}
Aggregations