use of io.trino.sql.planner.plan.PlanFragmentId in project trino by trinodb.
the class TestPhasedExecutionSchedule method testBroadcastSourceJoin.
@Test
public void testBroadcastSourceJoin() {
PlanFragment buildFragment = createTableScanPlanFragment("build");
PlanFragment joinSourceFragment = createBroadcastJoinPlanFragment("probe", buildFragment);
TestingStageExecution buildStage = new TestingStageExecution(buildFragment);
TestingStageExecution joinSourceStage = new TestingStageExecution(joinSourceFragment);
PhasedExecutionSchedule schedule = PhasedExecutionSchedule.forStages(ImmutableSet.of(joinSourceStage, buildStage), dynamicFilterService);
assertThat(schedule.getSortedFragments()).containsExactly(buildFragment.getId(), joinSourceFragment.getId());
// single dependency between build and join stages
DirectedGraph<PlanFragmentId, FragmentsEdge> dependencies = schedule.getFragmentDependency();
assertThat(dependencies.edgeSet()).containsExactlyInAnyOrder(new FragmentsEdge(buildFragment.getId(), joinSourceFragment.getId()));
// build stage should start immediately
assertThat(getActiveFragments(schedule)).containsExactly(buildFragment.getId());
// join stage should start after build stage buffer is full
buildStage.setAnyTaskBlocked(true);
schedule.schedule();
assertThat(getActiveFragments(schedule)).containsExactly(buildFragment.getId(), joinSourceFragment.getId());
}
use of io.trino.sql.planner.plan.PlanFragmentId in project trino by trinodb.
the class TestPhasedExecutionSchedule method testStageWithBroadcastAndPartitionedJoin.
@Test
public void testStageWithBroadcastAndPartitionedJoin() {
PlanFragment broadcastBuildFragment = createTableScanPlanFragment("broadcast_build");
PlanFragment partitionedBuildFragment = createTableScanPlanFragment("partitioned_build");
PlanFragment probeFragment = createTableScanPlanFragment("probe");
PlanFragment joinFragment = createBroadcastAndPartitionedJoinPlanFragment("join", broadcastBuildFragment, partitionedBuildFragment, probeFragment);
TestingStageExecution broadcastBuildStage = new TestingStageExecution(broadcastBuildFragment);
TestingStageExecution partitionedBuildStage = new TestingStageExecution(partitionedBuildFragment);
TestingStageExecution probeStage = new TestingStageExecution(probeFragment);
TestingStageExecution joinStage = new TestingStageExecution(joinFragment);
PhasedExecutionSchedule schedule = PhasedExecutionSchedule.forStages(ImmutableSet.of(broadcastBuildStage, partitionedBuildStage, probeStage, joinStage), dynamicFilterService);
// join stage should start immediately because partitioned join forces that
DirectedGraph<PlanFragmentId, FragmentsEdge> dependencies = schedule.getFragmentDependency();
assertThat(dependencies.edgeSet()).containsExactlyInAnyOrder(new FragmentsEdge(broadcastBuildFragment.getId(), probeFragment.getId()), new FragmentsEdge(partitionedBuildFragment.getId(), probeFragment.getId()), new FragmentsEdge(broadcastBuildFragment.getId(), joinFragment.getId()));
assertThat(getActiveFragments(schedule)).containsExactly(partitionedBuildFragment.getId(), broadcastBuildFragment.getId(), joinFragment.getId());
// completing single build dependency shouldn't cause probe stage to start
broadcastBuildStage.setState(FLUSHING);
schedule.schedule();
assertThat(getActiveFragments(schedule)).containsExactly(partitionedBuildFragment.getId(), joinFragment.getId());
// completing all build dependencies should cause probe stage to start
partitionedBuildStage.setState(FLUSHING);
schedule.schedule();
assertThat(getActiveFragments(schedule)).containsExactly(joinFragment.getId(), probeFragment.getId());
}
use of io.trino.sql.planner.plan.PlanFragmentId in project trino by trinodb.
the class TestStageStateMachine method createValuesPlan.
private static PlanFragment createValuesPlan() {
Symbol symbol = new Symbol("column");
PlanNodeId valuesNodeId = new PlanNodeId("plan");
PlanFragment planFragment = new PlanFragment(new PlanFragmentId("plan"), new ValuesNode(valuesNodeId, ImmutableList.of(symbol), ImmutableList.of(new Row(ImmutableList.of(new StringLiteral("foo"))))), ImmutableMap.of(symbol, VARCHAR), SOURCE_DISTRIBUTION, ImmutableList.of(valuesNodeId), new PartitioningScheme(Partitioning.create(SINGLE_DISTRIBUTION, ImmutableList.of()), ImmutableList.of(symbol)), ungroupedExecution(), StatsAndCosts.empty(), Optional.empty());
return planFragment;
}
use of io.trino.sql.planner.plan.PlanFragmentId in project trino by trinodb.
the class TestInput method testRoundTrip.
@Test
public void testRoundTrip() {
Input expected = new Input("connectorId", "schema", "table", Optional.empty(), ImmutableList.of(new Column("column1", "string"), new Column("column2", "string"), new Column("column3", "string")), new PlanFragmentId("fragment"), new PlanNodeId("plan-node"));
String json = codec.toJson(expected);
Input actual = codec.fromJson(json);
assertEquals(actual, expected);
}
use of io.trino.sql.planner.plan.PlanFragmentId in project trino by trinodb.
the class TestingTaskSourceFactory method getHandlesForRemoteSources.
private static ListMultimap<PlanNodeId, ExchangeSourceHandle> getHandlesForRemoteSources(List<RemoteSourceNode> remoteSources, Multimap<PlanFragmentId, ExchangeSourceHandle> exchangeSourceHandles) {
ImmutableListMultimap.Builder<PlanNodeId, ExchangeSourceHandle> result = ImmutableListMultimap.builder();
for (RemoteSourceNode remoteSource : remoteSources) {
checkArgument(remoteSource.getExchangeType() == REPLICATE, "expected exchange type to be REPLICATE, got: %s", remoteSource.getExchangeType());
for (PlanFragmentId fragmentId : remoteSource.getSourceFragmentIds()) {
Collection<ExchangeSourceHandle> handles = requireNonNull(exchangeSourceHandles.get(fragmentId), () -> "exchange source handle is missing for fragment: " + fragmentId);
checkArgument(handles.size() == 1, "single exchange source handle is expected, got: %s", handles);
result.putAll(remoteSource.getId(), handles);
}
}
return result.build();
}
Aggregations