use of io.trino.sql.planner.plan.PlanFragmentId in project trino by trinodb.
the class PhasedExecutionSchedule method removeCompletedStage.
private Set<PlanFragmentId> removeCompletedStage(StageExecution stage) {
// start all stages that depend on completed stage
PlanFragmentId fragmentId = stage.getFragment().getId();
if (!fragmentDependency.containsVertex(fragmentId)) {
// already gone
return ImmutableSet.of();
}
Set<PlanFragmentId> fragmentsToExecute = fragmentDependency.outgoingEdgesOf(fragmentId).stream().map(FragmentsEdge::getTarget).filter(dependentFragmentId -> fragmentDependency.inDegreeOf(dependentFragmentId) == 1).collect(toImmutableSet());
fragmentDependency.removeVertex(fragmentId);
fragmentTopology.removeVertex(fragmentId);
activeStages.remove(stage);
return fragmentsToExecute;
}
use of io.trino.sql.planner.plan.PlanFragmentId in project trino by trinodb.
the class PipelinedStageExecution method createPipelinedStageExecution.
public static PipelinedStageExecution createPipelinedStageExecution(SqlStage stage, Map<PlanFragmentId, OutputBufferManager> outputBufferManagers, TaskLifecycleListener taskLifecycleListener, FailureDetector failureDetector, Executor executor, Optional<int[]> bucketToPartition, int attempt) {
PipelinedStageStateMachine stateMachine = new PipelinedStageStateMachine(stage.getStageId(), executor);
ImmutableMap.Builder<PlanFragmentId, RemoteSourceNode> exchangeSources = ImmutableMap.builder();
for (RemoteSourceNode remoteSourceNode : stage.getFragment().getRemoteSourceNodes()) {
for (PlanFragmentId planFragmentId : remoteSourceNode.getSourceFragmentIds()) {
exchangeSources.put(planFragmentId, remoteSourceNode);
}
}
PipelinedStageExecution execution = new PipelinedStageExecution(stateMachine, stage, outputBufferManagers, taskLifecycleListener, failureDetector, executor, bucketToPartition, exchangeSources.buildOrThrow(), attempt);
execution.initialize();
return execution;
}
Aggregations