use of io.trino.sql.planner.PlanFragment in project trino by trinodb.
the class HttpRemoteTask method sendUpdate.
private synchronized void sendUpdate() {
TaskStatus taskStatus = getTaskStatus();
// don't update if the task hasn't been started yet or if it is already finished
if (!started.get() || !needsUpdate.get() || taskStatus.getState().isDone()) {
return;
}
// currentRequest is always cleared when request is complete
if (currentRequest != null) {
return;
}
// if throttled due to error, asynchronously wait for timeout and try again
ListenableFuture<Void> errorRateLimit = updateErrorTracker.acquireRequestPermit();
if (!errorRateLimit.isDone()) {
errorRateLimit.addListener(this::sendUpdate, executor);
return;
}
List<SplitAssignment> splitAssignments = getSplitAssignments();
VersionedDynamicFilterDomains dynamicFilterDomains = outboundDynamicFiltersCollector.acknowledgeAndGetNewDomains(sentDynamicFiltersVersion);
// Workers don't need the embedded JSON representation when the fragment is sent
Optional<PlanFragment> fragment = sendPlan.get() ? Optional.of(planFragment.withoutEmbeddedJsonRepresentation()) : Optional.empty();
TaskUpdateRequest updateRequest = new TaskUpdateRequest(session.toSessionRepresentation(), session.getIdentity().getExtraCredentials(), fragment, splitAssignments, outputBuffers.get(), dynamicFilterDomains.getDynamicFilterDomains());
byte[] taskUpdateRequestJson = taskUpdateRequestCodec.toJsonBytes(updateRequest);
if (fragment.isPresent()) {
stats.updateWithPlanBytes(taskUpdateRequestJson.length);
}
if (!dynamicFilterDomains.getDynamicFilterDomains().isEmpty()) {
stats.updateWithDynamicFilterBytes(taskUpdateRequestJson.length);
}
HttpUriBuilder uriBuilder = getHttpUriBuilder(taskStatus);
Request request = preparePost().setUri(uriBuilder.build()).setHeader(HttpHeaders.CONTENT_TYPE, MediaType.JSON_UTF_8.toString()).setBodyGenerator(createStaticBodyGenerator(taskUpdateRequestJson)).build();
updateErrorTracker.startRequest();
ListenableFuture<JsonResponse<TaskInfo>> future = httpClient.executeAsync(request, createFullJsonResponseHandler(taskInfoCodec));
currentRequest = future;
currentRequestStartNanos = System.nanoTime();
// The needsUpdate flag needs to be set to false BEFORE adding the Future callback since callback might change the flag value
// and does so without grabbing the instance lock.
needsUpdate.set(false);
Futures.addCallback(future, new SimpleHttpResponseHandler<>(new UpdateResponseHandler(splitAssignments, dynamicFilterDomains.getVersion()), request.getUri(), stats), executor);
}
use of io.trino.sql.planner.PlanFragment in project trino by trinodb.
the class AllAtOnceExecutionSchedule method getPreferredScheduleOrder.
@VisibleForTesting
static List<PlanFragmentId> getPreferredScheduleOrder(Collection<PlanFragment> fragments) {
// determine output fragment
Set<PlanFragmentId> remoteSources = fragments.stream().map(PlanFragment::getRemoteSourceNodes).flatMap(Collection::stream).map(RemoteSourceNode::getSourceFragmentIds).flatMap(Collection::stream).collect(toImmutableSet());
Set<PlanFragment> rootFragments = fragments.stream().filter(fragment -> !remoteSources.contains(fragment.getId())).collect(toImmutableSet());
Visitor visitor = new Visitor(fragments);
rootFragments.forEach(fragment -> visitor.processFragment(fragment.getId()));
return visitor.getSchedulerOrder();
}
use of io.trino.sql.planner.PlanFragment in project trino by trinodb.
the class LegacyPhasedExecutionSchedule method extractPhases.
@VisibleForTesting
static List<Set<PlanFragmentId>> extractPhases(Collection<PlanFragment> fragments) {
// Build a graph where the plan fragments are vertexes and the edges represent
// a before -> after relationship. For example, a join hash build has an edge
// to the join probe.
DirectedGraph<PlanFragmentId, DefaultEdge> graph = new DefaultDirectedGraph<>(DefaultEdge.class);
fragments.forEach(fragment -> graph.addVertex(fragment.getId()));
Visitor visitor = new Visitor(fragments, graph);
for (PlanFragment fragment : fragments) {
visitor.processFragment(fragment.getId());
}
// Computes all the strongly connected components of the directed graph.
// These are the "phases" which hold the set of fragments that must be started
// at the same time to avoid deadlock.
List<Set<PlanFragmentId>> components = new StrongConnectivityInspector<>(graph).stronglyConnectedSets();
Map<PlanFragmentId, Set<PlanFragmentId>> componentMembership = new HashMap<>();
for (Set<PlanFragmentId> component : components) {
for (PlanFragmentId planFragmentId : component) {
componentMembership.put(planFragmentId, component);
}
}
// build graph of components (phases)
DirectedGraph<Set<PlanFragmentId>, DefaultEdge> componentGraph = new DefaultDirectedGraph<>(DefaultEdge.class);
components.forEach(componentGraph::addVertex);
for (DefaultEdge edge : graph.edgeSet()) {
PlanFragmentId source = graph.getEdgeSource(edge);
PlanFragmentId target = graph.getEdgeTarget(edge);
Set<PlanFragmentId> from = componentMembership.get(source);
Set<PlanFragmentId> to = componentMembership.get(target);
if (!from.equals(to)) {
// the topological order iterator below doesn't include vertices that have self-edges, so don't add them
componentGraph.addEdge(from, to);
}
}
List<Set<PlanFragmentId>> schedulePhases = ImmutableList.copyOf(new TopologicalOrderIterator<>(componentGraph));
return schedulePhases;
}
use of io.trino.sql.planner.PlanFragment in project trino by trinodb.
the class PhasedExecutionSchedule method extractDependenciesAndReturnNonLazyFragments.
private Set<PlanFragmentId> extractDependenciesAndReturnNonLazyFragments(Collection<StageExecution> stages) {
if (stages.isEmpty()) {
return ImmutableSet.of();
}
QueryId queryId = stages.stream().map(stage -> stage.getStageId().getQueryId()).findAny().orElseThrow();
List<PlanFragment> fragments = stages.stream().map(StageExecution::getFragment).collect(toImmutableList());
// Build a graph where the plan fragments are vertexes and the edges represent
// a before -> after relationship. Destination fragment should be started only
// when source fragment is completed. For example, a join hash build has an edge
// to the join probe.
Visitor visitor = new Visitor(queryId, fragments);
visitor.processAllFragments();
// Make sure there are no strongly connected components as it would mean circular dependency between stages
List<Set<PlanFragmentId>> components = new StrongConnectivityInspector<>(fragmentDependency).stronglyConnectedSets();
verify(components.size() == fragmentDependency.vertexSet().size(), "circular dependency between stages");
return visitor.getNonLazyFragments();
}
use of io.trino.sql.planner.PlanFragment in project trino by trinodb.
the class TestSqlStage method createExchangePlanFragment.
private static PlanFragment createExchangePlanFragment() {
PlanNode planNode = new RemoteSourceNode(new PlanNodeId("exchange"), ImmutableList.of(new PlanFragmentId("source")), ImmutableList.of(new Symbol("column")), Optional.empty(), REPARTITION, RetryPolicy.NONE);
ImmutableMap.Builder<Symbol, Type> types = ImmutableMap.builder();
for (Symbol symbol : planNode.getOutputSymbols()) {
types.put(symbol, VARCHAR);
}
return new PlanFragment(new PlanFragmentId("exchange_fragment_id"), planNode, types.buildOrThrow(), SOURCE_DISTRIBUTION, ImmutableList.of(planNode.getId()), new PartitioningScheme(Partitioning.create(SINGLE_DISTRIBUTION, ImmutableList.of()), planNode.getOutputSymbols()), ungroupedExecution(), StatsAndCosts.empty(), Optional.empty());
}
Aggregations