use of com.facebook.presto.sql.planner.plan.PlanFragmentId in project presto by prestodb.
the class TestStageStateMachine method createValuesPlan.
private static PlanFragment createValuesPlan() {
Symbol symbol = new Symbol("column");
PlanNodeId valuesNodeId = new PlanNodeId("plan");
PlanFragment planFragment = new PlanFragment(new PlanFragmentId("plan"), new ValuesNode(valuesNodeId, ImmutableList.of(symbol), ImmutableList.of(ImmutableList.of(new StringLiteral("foo")))), ImmutableMap.of(symbol, VARCHAR), SOURCE_DISTRIBUTION, ImmutableList.of(valuesNodeId), new PartitioningScheme(Partitioning.create(SINGLE_DISTRIBUTION, ImmutableList.of()), ImmutableList.of(symbol)));
return planFragment;
}
use of com.facebook.presto.sql.planner.plan.PlanFragmentId in project presto by prestodb.
the class AllAtOnceExecutionSchedule method getPreferredScheduleOrder.
@VisibleForTesting
static List<PlanFragmentId> getPreferredScheduleOrder(Collection<PlanFragment> fragments) {
// determine output fragment
Set<PlanFragmentId> remoteSources = fragments.stream().map(PlanFragment::getRemoteSourceNodes).flatMap(Collection::stream).map(RemoteSourceNode::getSourceFragmentIds).flatMap(Collection::stream).collect(toImmutableSet());
Set<PlanFragment> rootFragments = fragments.stream().filter(fragment -> !remoteSources.contains(fragment.getId())).collect(toImmutableSet());
Visitor visitor = new Visitor(fragments);
rootFragments.forEach(fragment -> visitor.processFragment(fragment.getId()));
return visitor.getSchedulerOrder();
}
use of com.facebook.presto.sql.planner.plan.PlanFragmentId in project presto by prestodb.
the class MockRemoteTaskFactory method createTableScanTask.
public MockRemoteTask createTableScanTask(TaskId taskId, InternalNode newNode, List<Split> splits, NodeTaskMap.NodeStatsTracker nodeStatsTracker) {
VariableReferenceExpression variable = new VariableReferenceExpression(Optional.empty(), "column", VARCHAR);
PlanNodeId sourceId = new PlanNodeId("sourceId");
PlanFragment testFragment = new PlanFragment(new PlanFragmentId(0), new TableScanNode(Optional.empty(), sourceId, new TableHandle(new ConnectorId("test"), new TestingTableHandle(), TestingTransactionHandle.create(), Optional.of(TestingHandle.INSTANCE)), ImmutableList.of(variable), ImmutableMap.of(variable, new TestingColumnHandle("column")), TupleDomain.all(), TupleDomain.all()), ImmutableSet.of(variable), SOURCE_DISTRIBUTION, ImmutableList.of(sourceId), new PartitioningScheme(Partitioning.create(SINGLE_DISTRIBUTION, ImmutableList.of()), ImmutableList.of(variable)), StageExecutionDescriptor.ungroupedExecution(), false, StatsAndCosts.empty(), Optional.empty());
ImmutableMultimap.Builder<PlanNodeId, Split> initialSplits = ImmutableMultimap.builder();
for (Split sourceSplit : splits) {
initialSplits.put(sourceId, sourceSplit);
}
return createRemoteTask(TEST_SESSION, taskId, newNode, testFragment, initialSplits.build(), createInitialEmptyOutputBuffers(BROADCAST), nodeStatsTracker, true, new TableWriteInfo(Optional.empty(), Optional.empty(), Optional.empty()));
}
use of com.facebook.presto.sql.planner.plan.PlanFragmentId in project presto by prestodb.
the class SectionExecutionFactory method createStreamingLinkedStageExecutions.
/**
* returns a List of StageExecutionAndSchedulers in a postorder representation of the tree
*/
private List<StageExecutionAndScheduler> createStreamingLinkedStageExecutions(Session session, ExchangeLocationsConsumer parent, StreamingSubPlan plan, Function<PartitioningHandle, NodePartitionMap> partitioningCache, TableWriteInfo tableWriteInfo, Optional<SqlStageExecution> parentStageExecution, boolean summarizeTaskInfo, RemoteTaskFactory remoteTaskFactory, SplitSourceFactory splitSourceFactory, int attemptId) {
ImmutableList.Builder<StageExecutionAndScheduler> stageExecutionAndSchedulers = ImmutableList.builder();
PlanFragmentId fragmentId = plan.getFragment().getId();
StageId stageId = new StageId(session.getQueryId(), fragmentId.getId());
SqlStageExecution stageExecution = createSqlStageExecution(new StageExecutionId(stageId, attemptId), plan.getFragment(), remoteTaskFactory, session, summarizeTaskInfo, nodeTaskMap, executor, failureDetector, schedulerStats, tableWriteInfo);
PartitioningHandle partitioningHandle = plan.getFragment().getPartitioning();
List<RemoteSourceNode> remoteSourceNodes = plan.getFragment().getRemoteSourceNodes();
Optional<int[]> bucketToPartition = getBucketToPartition(partitioningHandle, partitioningCache, plan.getFragment().getRoot(), remoteSourceNodes);
// create child stages
ImmutableSet.Builder<SqlStageExecution> childStagesBuilder = ImmutableSet.builder();
for (StreamingSubPlan stagePlan : plan.getChildren()) {
List<StageExecutionAndScheduler> subTree = createStreamingLinkedStageExecutions(session, stageExecution::addExchangeLocations, stagePlan.withBucketToPartition(bucketToPartition), partitioningCache, tableWriteInfo, Optional.of(stageExecution), summarizeTaskInfo, remoteTaskFactory, splitSourceFactory, attemptId);
stageExecutionAndSchedulers.addAll(subTree);
childStagesBuilder.add(getLast(subTree).getStageExecution());
}
Set<SqlStageExecution> childStageExecutions = childStagesBuilder.build();
stageExecution.addStateChangeListener(newState -> {
if (newState.isDone()) {
childStageExecutions.forEach(SqlStageExecution::cancel);
}
});
StageLinkage stageLinkage = new StageLinkage(fragmentId, parent, childStageExecutions);
StageScheduler stageScheduler = createStageScheduler(splitSourceFactory, session, plan, partitioningCache, parentStageExecution, stageId, stageExecution, partitioningHandle, tableWriteInfo, childStageExecutions);
stageExecutionAndSchedulers.add(new StageExecutionAndScheduler(stageExecution, stageLinkage, stageScheduler));
return stageExecutionAndSchedulers.build();
}
use of com.facebook.presto.sql.planner.plan.PlanFragmentId in project presto by prestodb.
the class PhasedExecutionSchedule method extractPhases.
@VisibleForTesting
static List<Set<PlanFragmentId>> extractPhases(Collection<PlanFragment> fragments) {
// Build a graph where the plan fragments are vertexes and the edges represent
// a before -> after relationship. For example, a join hash build has an edge
// to the join probe.
DirectedGraph<PlanFragmentId, DefaultEdge> graph = new DefaultDirectedGraph<>(DefaultEdge.class);
fragments.forEach(fragment -> graph.addVertex(fragment.getId()));
Visitor visitor = new Visitor(fragments, graph);
for (PlanFragment fragment : fragments) {
visitor.processFragment(fragment.getId());
}
// Computes all the strongly connected components of the directed graph.
// These are the "phases" which hold the set of fragments that must be started
// at the same time to avoid deadlock.
List<Set<PlanFragmentId>> components = new StrongConnectivityInspector<>(graph).stronglyConnectedSets();
Map<PlanFragmentId, Set<PlanFragmentId>> componentMembership = new HashMap<>();
for (Set<PlanFragmentId> component : components) {
for (PlanFragmentId planFragmentId : component) {
componentMembership.put(planFragmentId, component);
}
}
// build graph of components (phases)
DirectedGraph<Set<PlanFragmentId>, DefaultEdge> componentGraph = new DefaultDirectedGraph<>(DefaultEdge.class);
components.forEach(componentGraph::addVertex);
for (DefaultEdge edge : graph.edgeSet()) {
PlanFragmentId source = graph.getEdgeSource(edge);
PlanFragmentId target = graph.getEdgeTarget(edge);
Set<PlanFragmentId> from = componentMembership.get(source);
Set<PlanFragmentId> to = componentMembership.get(target);
if (!from.equals(to)) {
// the topological order iterator below doesn't include vertices that have self-edges, so don't add them
componentGraph.addEdge(from, to);
}
}
List<Set<PlanFragmentId>> schedulePhases = ImmutableList.copyOf(new TopologicalOrderIterator<>(componentGraph));
return schedulePhases;
}
Aggregations