use of org.apache.flink.runtime.scheduler.strategy.TestingSchedulingExecutionVertex in project flink by apache.
the class SchedulingPipelinedRegionComputeUtilTest method testMultipleComponentsViaCascadeOfJoins.
/**
* Cascades of joins with partially blocking, partially pipelined exchanges.
*
* <pre>
* (1)--+
* +--(5)-+
* (2)--+ |
* (blocking)
* |
* +--(7)
* |
* (blocking)
* (3)--+ |
* +--(6)-+
* (4)--+
* </pre>
*
* <p>Component 1: 1, 2, 5; component 2: 3,4,6; component 3: 7
*/
@Test
public void testMultipleComponentsViaCascadeOfJoins() {
TestingSchedulingTopology topology = new TestingSchedulingTopology();
TestingSchedulingExecutionVertex v1 = topology.newExecutionVertex();
TestingSchedulingExecutionVertex v2 = topology.newExecutionVertex();
TestingSchedulingExecutionVertex v3 = topology.newExecutionVertex();
TestingSchedulingExecutionVertex v4 = topology.newExecutionVertex();
TestingSchedulingExecutionVertex v5 = topology.newExecutionVertex();
TestingSchedulingExecutionVertex v6 = topology.newExecutionVertex();
TestingSchedulingExecutionVertex v7 = topology.newExecutionVertex();
topology.connect(v1, v5, ResultPartitionType.PIPELINED).connect(v2, v5, ResultPartitionType.PIPELINED).connect(v3, v6, ResultPartitionType.PIPELINED).connect(v4, v6, ResultPartitionType.PIPELINED).connect(v5, v7, ResultPartitionType.BLOCKING).connect(v6, v7, ResultPartitionType.BLOCKING);
Map<ExecutionVertexID, Set<SchedulingExecutionVertex>> pipelinedRegionByVertex = computePipelinedRegionByVertex(topology);
Set<SchedulingExecutionVertex> r1 = pipelinedRegionByVertex.get(v1.getId());
Set<SchedulingExecutionVertex> r2 = pipelinedRegionByVertex.get(v2.getId());
Set<SchedulingExecutionVertex> r3 = pipelinedRegionByVertex.get(v3.getId());
Set<SchedulingExecutionVertex> r4 = pipelinedRegionByVertex.get(v4.getId());
Set<SchedulingExecutionVertex> r5 = pipelinedRegionByVertex.get(v5.getId());
Set<SchedulingExecutionVertex> r6 = pipelinedRegionByVertex.get(v6.getId());
Set<SchedulingExecutionVertex> r7 = pipelinedRegionByVertex.get(v7.getId());
assertSameRegion(r1, r2, r5);
assertSameRegion(r3, r4, r6);
assertDistinctRegions(r1, r3, r7);
}
use of org.apache.flink.runtime.scheduler.strategy.TestingSchedulingExecutionVertex in project flink by apache.
the class SchedulingPipelinedRegionComputeUtilTest method testEmbarrassinglyParallelCase.
/**
* Tests that validates that embarrassingly parallel chains of vertices work correctly.
*
* <pre>
* (a1) --> (b1)
*
* (a2) --> (b2)
*
* (a3) --> (b3)
* </pre>
*/
@Test
public void testEmbarrassinglyParallelCase() {
TestingSchedulingTopology topology = new TestingSchedulingTopology();
TestingSchedulingExecutionVertex va1 = topology.newExecutionVertex();
TestingSchedulingExecutionVertex va2 = topology.newExecutionVertex();
TestingSchedulingExecutionVertex va3 = topology.newExecutionVertex();
TestingSchedulingExecutionVertex vb1 = topology.newExecutionVertex();
TestingSchedulingExecutionVertex vb2 = topology.newExecutionVertex();
TestingSchedulingExecutionVertex vb3 = topology.newExecutionVertex();
topology.connect(va1, vb1, ResultPartitionType.PIPELINED).connect(va2, vb2, ResultPartitionType.PIPELINED).connect(va3, vb3, ResultPartitionType.PIPELINED);
Map<ExecutionVertexID, Set<SchedulingExecutionVertex>> pipelinedRegionByVertex = computePipelinedRegionByVertex(topology);
Set<SchedulingExecutionVertex> ra1 = pipelinedRegionByVertex.get(va1.getId());
Set<SchedulingExecutionVertex> ra2 = pipelinedRegionByVertex.get(va2.getId());
Set<SchedulingExecutionVertex> ra3 = pipelinedRegionByVertex.get(va3.getId());
Set<SchedulingExecutionVertex> rb1 = pipelinedRegionByVertex.get(vb1.getId());
Set<SchedulingExecutionVertex> rb2 = pipelinedRegionByVertex.get(vb2.getId());
Set<SchedulingExecutionVertex> rb3 = pipelinedRegionByVertex.get(vb3.getId());
assertSameRegion(ra1, rb1);
assertSameRegion(ra2, rb2);
assertSameRegion(ra3, rb3);
assertDistinctRegions(ra1, ra2, ra3);
}
use of org.apache.flink.runtime.scheduler.strategy.TestingSchedulingExecutionVertex in project flink by apache.
the class LocalInputPreferredSlotSharingStrategyTest method testGetExecutionSlotSharingGroupOfLateAttachedVertices.
@Test
public void testGetExecutionSlotSharingGroupOfLateAttachedVertices() {
JobVertexID jobVertexID1 = new JobVertexID();
JobVertexID jobVertexID2 = new JobVertexID();
JobVertexID jobVertexID3 = new JobVertexID();
final SlotSharingGroup slotSharingGroup1 = new SlotSharingGroup();
slotSharingGroup1.addVertexToGroup(jobVertexID1);
slotSharingGroup1.addVertexToGroup(jobVertexID2);
final SlotSharingGroup slotSharingGroup2 = new SlotSharingGroup();
slotSharingGroup2.addVertexToGroup(jobVertexID3);
TestingSchedulingTopology topology = new TestingSchedulingTopology();
TestingSchedulingExecutionVertex ev1 = topology.newExecutionVertex(jobVertexID1, 0);
TestingSchedulingExecutionVertex ev2 = topology.newExecutionVertex(jobVertexID2, 0);
topology.connect(ev1, ev2);
final LocalInputPreferredSlotSharingStrategy strategy = new LocalInputPreferredSlotSharingStrategy(topology, new HashSet<>(Arrays.asList(slotSharingGroup1, slotSharingGroup2)), Collections.emptySet());
assertThat(strategy.getExecutionSlotSharingGroups().size(), is(1));
assertThat(strategy.getExecutionSlotSharingGroup(ev1.getId()).getExecutionVertexIds(), containsInAnyOrder(ev1.getId(), ev2.getId()));
assertThat(strategy.getExecutionSlotSharingGroup(ev2.getId()).getExecutionVertexIds(), containsInAnyOrder(ev1.getId(), ev2.getId()));
// add new job vertices and notify scheduling topology updated
TestingSchedulingExecutionVertex ev3 = topology.newExecutionVertex(jobVertexID3, 0);
topology.connect(ev2, ev3, ResultPartitionType.BLOCKING);
strategy.notifySchedulingTopologyUpdated(topology, Collections.singletonList(ev3.getId()));
assertThat(strategy.getExecutionSlotSharingGroups().size(), is(2));
assertThat(strategy.getExecutionSlotSharingGroup(ev1.getId()).getExecutionVertexIds(), containsInAnyOrder(ev1.getId(), ev2.getId()));
assertThat(strategy.getExecutionSlotSharingGroup(ev2.getId()).getExecutionVertexIds(), containsInAnyOrder(ev1.getId(), ev2.getId()));
assertThat(strategy.getExecutionSlotSharingGroup(ev3.getId()).getExecutionVertexIds(), containsInAnyOrder(ev3.getId()));
}
use of org.apache.flink.runtime.scheduler.strategy.TestingSchedulingExecutionVertex in project flink by apache.
the class RegionPartitionGroupReleaseStrategyTest method releasePartitionsIfDownstreamRegionWithMultipleOperatorsIsFinished.
@Test
public void releasePartitionsIfDownstreamRegionWithMultipleOperatorsIsFinished() {
final List<TestingSchedulingExecutionVertex> sourceVertices = testingSchedulingTopology.addExecutionVertices().finish();
final List<TestingSchedulingExecutionVertex> intermediateVertices = testingSchedulingTopology.addExecutionVertices().finish();
final List<TestingSchedulingExecutionVertex> sinkVertices = testingSchedulingTopology.addExecutionVertices().finish();
final List<TestingSchedulingResultPartition> sourceResultPartitions = testingSchedulingTopology.connectAllToAll(sourceVertices, intermediateVertices).finish();
testingSchedulingTopology.connectAllToAll(intermediateVertices, sinkVertices).withResultPartitionType(ResultPartitionType.PIPELINED).finish();
final ExecutionVertexID onlyIntermediateVertexId = intermediateVertices.get(0).getId();
final ExecutionVertexID onlySinkVertexId = sinkVertices.get(0).getId();
final IntermediateResultPartitionID onlySourceResultPartitionId = sourceResultPartitions.get(0).getId();
final RegionPartitionGroupReleaseStrategy regionPartitionGroupReleaseStrategy = new RegionPartitionGroupReleaseStrategy(testingSchedulingTopology);
regionPartitionGroupReleaseStrategy.vertexFinished(onlyIntermediateVertexId);
final List<IntermediateResultPartitionID> partitionsToRelease = getReleasablePartitions(regionPartitionGroupReleaseStrategy, onlySinkVertexId);
assertThat(partitionsToRelease, contains(onlySourceResultPartitionId));
}
use of org.apache.flink.runtime.scheduler.strategy.TestingSchedulingExecutionVertex in project flink by apache.
the class RegionPartitionGroupReleaseStrategyTest method notReleasePartitionsIfDownstreamRegionIsNotFinished.
@Test
public void notReleasePartitionsIfDownstreamRegionIsNotFinished() {
final List<TestingSchedulingExecutionVertex> producers = testingSchedulingTopology.addExecutionVertices().finish();
final List<TestingSchedulingExecutionVertex> consumers = testingSchedulingTopology.addExecutionVertices().withParallelism(2).finish();
testingSchedulingTopology.connectAllToAll(producers, consumers).finish();
final ExecutionVertexID consumerVertex1 = consumers.get(0).getId();
final RegionPartitionGroupReleaseStrategy regionPartitionGroupReleaseStrategy = new RegionPartitionGroupReleaseStrategy(testingSchedulingTopology);
final List<IntermediateResultPartitionID> partitionsToRelease = getReleasablePartitions(regionPartitionGroupReleaseStrategy, consumerVertex1);
assertThat(partitionsToRelease, is(empty()));
}
Aggregations