use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink by splunk.
the class RegionPartitionGroupReleaseStrategyTest method notReleasePartitionsIfDownstreamRegionIsNotFinished.
@Test
public void notReleasePartitionsIfDownstreamRegionIsNotFinished() {
final List<TestingSchedulingExecutionVertex> producers = testingSchedulingTopology.addExecutionVertices().finish();
final List<TestingSchedulingExecutionVertex> consumers = testingSchedulingTopology.addExecutionVertices().withParallelism(2).finish();
testingSchedulingTopology.connectAllToAll(producers, consumers).finish();
final ExecutionVertexID consumerVertex1 = consumers.get(0).getId();
final RegionPartitionGroupReleaseStrategy regionPartitionGroupReleaseStrategy = new RegionPartitionGroupReleaseStrategy(testingSchedulingTopology);
final List<IntermediateResultPartitionID> partitionsToRelease = getReleasablePartitions(regionPartitionGroupReleaseStrategy, consumerVertex1);
assertThat(partitionsToRelease, is(empty()));
}
use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink by splunk.
the class RegionPartitionGroupReleaseStrategyTest method releasePartitionsIfDownstreamRegionWithMultipleOperatorsIsFinished.
@Test
public void releasePartitionsIfDownstreamRegionWithMultipleOperatorsIsFinished() {
final List<TestingSchedulingExecutionVertex> sourceVertices = testingSchedulingTopology.addExecutionVertices().finish();
final List<TestingSchedulingExecutionVertex> intermediateVertices = testingSchedulingTopology.addExecutionVertices().finish();
final List<TestingSchedulingExecutionVertex> sinkVertices = testingSchedulingTopology.addExecutionVertices().finish();
final List<TestingSchedulingResultPartition> sourceResultPartitions = testingSchedulingTopology.connectAllToAll(sourceVertices, intermediateVertices).finish();
testingSchedulingTopology.connectAllToAll(intermediateVertices, sinkVertices).withResultPartitionType(ResultPartitionType.PIPELINED).finish();
final ExecutionVertexID onlyIntermediateVertexId = intermediateVertices.get(0).getId();
final ExecutionVertexID onlySinkVertexId = sinkVertices.get(0).getId();
final IntermediateResultPartitionID onlySourceResultPartitionId = sourceResultPartitions.get(0).getId();
final RegionPartitionGroupReleaseStrategy regionPartitionGroupReleaseStrategy = new RegionPartitionGroupReleaseStrategy(testingSchedulingTopology);
regionPartitionGroupReleaseStrategy.vertexFinished(onlyIntermediateVertexId);
final List<IntermediateResultPartitionID> partitionsToRelease = getReleasablePartitions(regionPartitionGroupReleaseStrategy, onlySinkVertexId);
assertThat(partitionsToRelease, contains(onlySourceResultPartitionId));
}
use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink by splunk.
the class SchedulingPipelinedRegionComputeUtilTest method testOneComponentViaCascadeOfJoins.
/**
* Tests that validates that a single pipelined component via a cascade of joins works
* correctly.
*
* <pre>
* (v1)--+
* +--(v5)-+
* (v2)--+ |
* +--(v7)
* (v3)--+ |
* +--(v6)-+
* (v4)--+
* </pre>
*/
@Test
public void testOneComponentViaCascadeOfJoins() {
TestingSchedulingTopology topology = new TestingSchedulingTopology();
TestingSchedulingExecutionVertex v1 = topology.newExecutionVertex();
TestingSchedulingExecutionVertex v2 = topology.newExecutionVertex();
TestingSchedulingExecutionVertex v3 = topology.newExecutionVertex();
TestingSchedulingExecutionVertex v4 = topology.newExecutionVertex();
TestingSchedulingExecutionVertex v5 = topology.newExecutionVertex();
TestingSchedulingExecutionVertex v6 = topology.newExecutionVertex();
TestingSchedulingExecutionVertex v7 = topology.newExecutionVertex();
topology.connect(v1, v5, ResultPartitionType.PIPELINED).connect(v2, v5, ResultPartitionType.PIPELINED).connect(v3, v6, ResultPartitionType.PIPELINED).connect(v4, v6, ResultPartitionType.PIPELINED).connect(v5, v7, ResultPartitionType.PIPELINED).connect(v6, v7, ResultPartitionType.PIPELINED);
Map<ExecutionVertexID, Set<SchedulingExecutionVertex>> pipelinedRegionByVertex = computePipelinedRegionByVertex(topology);
Set<SchedulingExecutionVertex> r1 = pipelinedRegionByVertex.get(v1.getId());
Set<SchedulingExecutionVertex> r2 = pipelinedRegionByVertex.get(v2.getId());
Set<SchedulingExecutionVertex> r3 = pipelinedRegionByVertex.get(v3.getId());
Set<SchedulingExecutionVertex> r4 = pipelinedRegionByVertex.get(v4.getId());
Set<SchedulingExecutionVertex> r5 = pipelinedRegionByVertex.get(v5.getId());
Set<SchedulingExecutionVertex> r6 = pipelinedRegionByVertex.get(v6.getId());
Set<SchedulingExecutionVertex> r7 = pipelinedRegionByVertex.get(v7.getId());
assertSameRegion(r1, r2, r3, r4, r5, r6, r7);
}
use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink by splunk.
the class SchedulingPipelinedRegionComputeUtilTest method testOneComponentViaTwoExchanges.
/**
* Tests that validates that a single pipelined component via a sequence of all-to-all
* connections works correctly.
*
* <pre>
* (a1) -+-> (b1) -+-> (c1)
* X X
* (a2) -+-> (b2) -+-> (c2)
* </pre>
*/
@Test
public void testOneComponentViaTwoExchanges() {
TestingSchedulingTopology topology = new TestingSchedulingTopology();
TestingSchedulingExecutionVertex va1 = topology.newExecutionVertex();
TestingSchedulingExecutionVertex va2 = topology.newExecutionVertex();
TestingSchedulingExecutionVertex vb1 = topology.newExecutionVertex();
TestingSchedulingExecutionVertex vb2 = topology.newExecutionVertex();
TestingSchedulingExecutionVertex vc1 = topology.newExecutionVertex();
TestingSchedulingExecutionVertex vc2 = topology.newExecutionVertex();
topology.connect(va1, vb1, ResultPartitionType.PIPELINED).connect(va1, vb2, ResultPartitionType.PIPELINED).connect(va2, vb1, ResultPartitionType.PIPELINED).connect(va2, vb2, ResultPartitionType.PIPELINED).connect(vb1, vc1, ResultPartitionType.PIPELINED).connect(vb1, vc2, ResultPartitionType.PIPELINED).connect(vb2, vc1, ResultPartitionType.PIPELINED).connect(vb2, vc2, ResultPartitionType.PIPELINED);
Map<ExecutionVertexID, Set<SchedulingExecutionVertex>> pipelinedRegionByVertex = computePipelinedRegionByVertex(topology);
Set<SchedulingExecutionVertex> ra1 = pipelinedRegionByVertex.get(va1.getId());
Set<SchedulingExecutionVertex> ra2 = pipelinedRegionByVertex.get(va2.getId());
Set<SchedulingExecutionVertex> rb1 = pipelinedRegionByVertex.get(vb1.getId());
Set<SchedulingExecutionVertex> rb2 = pipelinedRegionByVertex.get(vb2.getId());
Set<SchedulingExecutionVertex> rc1 = pipelinedRegionByVertex.get(vc1.getId());
Set<SchedulingExecutionVertex> rc2 = pipelinedRegionByVertex.get(vc2.getId());
assertSameRegion(ra1, ra2, rb1, rb2, rc1, rc2);
}
use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink by splunk.
the class SchedulingPipelinedRegionComputeUtilTest method testOneComponentInstanceFromOneSource.
/**
* Tests that validates that a single pipelined component instance from one source works
* correctly.
*
* <pre>
* +--(v4)
* +--(v2)-+
* | +--(v5)
* (v1)--+
* | +--(v6)
* +--(v3)-+
* +--(v7)
* </pre>
*/
@Test
public void testOneComponentInstanceFromOneSource() {
TestingSchedulingTopology topology = new TestingSchedulingTopology();
TestingSchedulingExecutionVertex v1 = topology.newExecutionVertex();
TestingSchedulingExecutionVertex v2 = topology.newExecutionVertex();
TestingSchedulingExecutionVertex v3 = topology.newExecutionVertex();
TestingSchedulingExecutionVertex v4 = topology.newExecutionVertex();
TestingSchedulingExecutionVertex v5 = topology.newExecutionVertex();
TestingSchedulingExecutionVertex v6 = topology.newExecutionVertex();
TestingSchedulingExecutionVertex v7 = topology.newExecutionVertex();
topology.connect(v1, v2, ResultPartitionType.PIPELINED).connect(v1, v3, ResultPartitionType.PIPELINED).connect(v2, v4, ResultPartitionType.PIPELINED).connect(v2, v5, ResultPartitionType.PIPELINED).connect(v3, v6, ResultPartitionType.PIPELINED).connect(v3, v7, ResultPartitionType.PIPELINED);
Map<ExecutionVertexID, Set<SchedulingExecutionVertex>> pipelinedRegionByVertex = computePipelinedRegionByVertex(topology);
Set<SchedulingExecutionVertex> r1 = pipelinedRegionByVertex.get(v1.getId());
Set<SchedulingExecutionVertex> r2 = pipelinedRegionByVertex.get(v2.getId());
Set<SchedulingExecutionVertex> r3 = pipelinedRegionByVertex.get(v3.getId());
Set<SchedulingExecutionVertex> r4 = pipelinedRegionByVertex.get(v4.getId());
Set<SchedulingExecutionVertex> r5 = pipelinedRegionByVertex.get(v5.getId());
Set<SchedulingExecutionVertex> r6 = pipelinedRegionByVertex.get(v6.getId());
Set<SchedulingExecutionVertex> r7 = pipelinedRegionByVertex.get(v7.getId());
assertSameRegion(r1, r2, r3, r4, r5, r6, r7);
}
Aggregations