Search in sources :

Example 91 with ExecutionVertexID

use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink by splunk.

the class RegionPartitionGroupReleaseStrategyTest method notReleasePartitionsIfDownstreamRegionIsNotFinished.

@Test
public void notReleasePartitionsIfDownstreamRegionIsNotFinished() {
    final List<TestingSchedulingExecutionVertex> producers = testingSchedulingTopology.addExecutionVertices().finish();
    final List<TestingSchedulingExecutionVertex> consumers = testingSchedulingTopology.addExecutionVertices().withParallelism(2).finish();
    testingSchedulingTopology.connectAllToAll(producers, consumers).finish();
    final ExecutionVertexID consumerVertex1 = consumers.get(0).getId();
    final RegionPartitionGroupReleaseStrategy regionPartitionGroupReleaseStrategy = new RegionPartitionGroupReleaseStrategy(testingSchedulingTopology);
    final List<IntermediateResultPartitionID> partitionsToRelease = getReleasablePartitions(regionPartitionGroupReleaseStrategy, consumerVertex1);
    assertThat(partitionsToRelease, is(empty()));
}
Also used : TestingSchedulingExecutionVertex(org.apache.flink.runtime.scheduler.strategy.TestingSchedulingExecutionVertex) ExecutionVertexID(org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID) RegionPartitionGroupReleaseStrategy(org.apache.flink.runtime.executiongraph.failover.flip1.partitionrelease.RegionPartitionGroupReleaseStrategy) IntermediateResultPartitionID(org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID) Test(org.junit.Test)

Example 92 with ExecutionVertexID

use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink by splunk.

the class RegionPartitionGroupReleaseStrategyTest method releasePartitionsIfDownstreamRegionWithMultipleOperatorsIsFinished.

@Test
public void releasePartitionsIfDownstreamRegionWithMultipleOperatorsIsFinished() {
    final List<TestingSchedulingExecutionVertex> sourceVertices = testingSchedulingTopology.addExecutionVertices().finish();
    final List<TestingSchedulingExecutionVertex> intermediateVertices = testingSchedulingTopology.addExecutionVertices().finish();
    final List<TestingSchedulingExecutionVertex> sinkVertices = testingSchedulingTopology.addExecutionVertices().finish();
    final List<TestingSchedulingResultPartition> sourceResultPartitions = testingSchedulingTopology.connectAllToAll(sourceVertices, intermediateVertices).finish();
    testingSchedulingTopology.connectAllToAll(intermediateVertices, sinkVertices).withResultPartitionType(ResultPartitionType.PIPELINED).finish();
    final ExecutionVertexID onlyIntermediateVertexId = intermediateVertices.get(0).getId();
    final ExecutionVertexID onlySinkVertexId = sinkVertices.get(0).getId();
    final IntermediateResultPartitionID onlySourceResultPartitionId = sourceResultPartitions.get(0).getId();
    final RegionPartitionGroupReleaseStrategy regionPartitionGroupReleaseStrategy = new RegionPartitionGroupReleaseStrategy(testingSchedulingTopology);
    regionPartitionGroupReleaseStrategy.vertexFinished(onlyIntermediateVertexId);
    final List<IntermediateResultPartitionID> partitionsToRelease = getReleasablePartitions(regionPartitionGroupReleaseStrategy, onlySinkVertexId);
    assertThat(partitionsToRelease, contains(onlySourceResultPartitionId));
}
Also used : TestingSchedulingExecutionVertex(org.apache.flink.runtime.scheduler.strategy.TestingSchedulingExecutionVertex) TestingSchedulingResultPartition(org.apache.flink.runtime.scheduler.strategy.TestingSchedulingResultPartition) ExecutionVertexID(org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID) RegionPartitionGroupReleaseStrategy(org.apache.flink.runtime.executiongraph.failover.flip1.partitionrelease.RegionPartitionGroupReleaseStrategy) IntermediateResultPartitionID(org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID) Test(org.junit.Test)

Example 93 with ExecutionVertexID

use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink by splunk.

the class SchedulingPipelinedRegionComputeUtilTest method testOneComponentViaCascadeOfJoins.

/**
 * Tests that validates that a single pipelined component via a cascade of joins works
 * correctly.
 *
 * <pre>
 *     (v1)--+
 *          +--(v5)-+
 *     (v2)--+      |
 *                 +--(v7)
 *     (v3)--+      |
 *          +--(v6)-+
 *     (v4)--+
 * </pre>
 */
@Test
public void testOneComponentViaCascadeOfJoins() {
    TestingSchedulingTopology topology = new TestingSchedulingTopology();
    TestingSchedulingExecutionVertex v1 = topology.newExecutionVertex();
    TestingSchedulingExecutionVertex v2 = topology.newExecutionVertex();
    TestingSchedulingExecutionVertex v3 = topology.newExecutionVertex();
    TestingSchedulingExecutionVertex v4 = topology.newExecutionVertex();
    TestingSchedulingExecutionVertex v5 = topology.newExecutionVertex();
    TestingSchedulingExecutionVertex v6 = topology.newExecutionVertex();
    TestingSchedulingExecutionVertex v7 = topology.newExecutionVertex();
    topology.connect(v1, v5, ResultPartitionType.PIPELINED).connect(v2, v5, ResultPartitionType.PIPELINED).connect(v3, v6, ResultPartitionType.PIPELINED).connect(v4, v6, ResultPartitionType.PIPELINED).connect(v5, v7, ResultPartitionType.PIPELINED).connect(v6, v7, ResultPartitionType.PIPELINED);
    Map<ExecutionVertexID, Set<SchedulingExecutionVertex>> pipelinedRegionByVertex = computePipelinedRegionByVertex(topology);
    Set<SchedulingExecutionVertex> r1 = pipelinedRegionByVertex.get(v1.getId());
    Set<SchedulingExecutionVertex> r2 = pipelinedRegionByVertex.get(v2.getId());
    Set<SchedulingExecutionVertex> r3 = pipelinedRegionByVertex.get(v3.getId());
    Set<SchedulingExecutionVertex> r4 = pipelinedRegionByVertex.get(v4.getId());
    Set<SchedulingExecutionVertex> r5 = pipelinedRegionByVertex.get(v5.getId());
    Set<SchedulingExecutionVertex> r6 = pipelinedRegionByVertex.get(v6.getId());
    Set<SchedulingExecutionVertex> r7 = pipelinedRegionByVertex.get(v7.getId());
    assertSameRegion(r1, r2, r3, r4, r5, r6, r7);
}
Also used : TestingSchedulingExecutionVertex(org.apache.flink.runtime.scheduler.strategy.TestingSchedulingExecutionVertex) SchedulingExecutionVertex(org.apache.flink.runtime.scheduler.strategy.SchedulingExecutionVertex) Set(java.util.Set) TestingSchedulingExecutionVertex(org.apache.flink.runtime.scheduler.strategy.TestingSchedulingExecutionVertex) ExecutionVertexID(org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID) TestingSchedulingTopology(org.apache.flink.runtime.scheduler.strategy.TestingSchedulingTopology) Test(org.junit.Test)

Example 94 with ExecutionVertexID

use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink by splunk.

the class SchedulingPipelinedRegionComputeUtilTest method testOneComponentViaTwoExchanges.

/**
 * Tests that validates that a single pipelined component via a sequence of all-to-all
 * connections works correctly.
 *
 * <pre>
 *     (a1) -+-> (b1) -+-> (c1)
 *           X         X
 *     (a2) -+-> (b2) -+-> (c2)
 * </pre>
 */
@Test
public void testOneComponentViaTwoExchanges() {
    TestingSchedulingTopology topology = new TestingSchedulingTopology();
    TestingSchedulingExecutionVertex va1 = topology.newExecutionVertex();
    TestingSchedulingExecutionVertex va2 = topology.newExecutionVertex();
    TestingSchedulingExecutionVertex vb1 = topology.newExecutionVertex();
    TestingSchedulingExecutionVertex vb2 = topology.newExecutionVertex();
    TestingSchedulingExecutionVertex vc1 = topology.newExecutionVertex();
    TestingSchedulingExecutionVertex vc2 = topology.newExecutionVertex();
    topology.connect(va1, vb1, ResultPartitionType.PIPELINED).connect(va1, vb2, ResultPartitionType.PIPELINED).connect(va2, vb1, ResultPartitionType.PIPELINED).connect(va2, vb2, ResultPartitionType.PIPELINED).connect(vb1, vc1, ResultPartitionType.PIPELINED).connect(vb1, vc2, ResultPartitionType.PIPELINED).connect(vb2, vc1, ResultPartitionType.PIPELINED).connect(vb2, vc2, ResultPartitionType.PIPELINED);
    Map<ExecutionVertexID, Set<SchedulingExecutionVertex>> pipelinedRegionByVertex = computePipelinedRegionByVertex(topology);
    Set<SchedulingExecutionVertex> ra1 = pipelinedRegionByVertex.get(va1.getId());
    Set<SchedulingExecutionVertex> ra2 = pipelinedRegionByVertex.get(va2.getId());
    Set<SchedulingExecutionVertex> rb1 = pipelinedRegionByVertex.get(vb1.getId());
    Set<SchedulingExecutionVertex> rb2 = pipelinedRegionByVertex.get(vb2.getId());
    Set<SchedulingExecutionVertex> rc1 = pipelinedRegionByVertex.get(vc1.getId());
    Set<SchedulingExecutionVertex> rc2 = pipelinedRegionByVertex.get(vc2.getId());
    assertSameRegion(ra1, ra2, rb1, rb2, rc1, rc2);
}
Also used : TestingSchedulingExecutionVertex(org.apache.flink.runtime.scheduler.strategy.TestingSchedulingExecutionVertex) SchedulingExecutionVertex(org.apache.flink.runtime.scheduler.strategy.SchedulingExecutionVertex) Set(java.util.Set) TestingSchedulingExecutionVertex(org.apache.flink.runtime.scheduler.strategy.TestingSchedulingExecutionVertex) ExecutionVertexID(org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID) TestingSchedulingTopology(org.apache.flink.runtime.scheduler.strategy.TestingSchedulingTopology) Test(org.junit.Test)

Example 95 with ExecutionVertexID

use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink by splunk.

the class SchedulingPipelinedRegionComputeUtilTest method testOneComponentInstanceFromOneSource.

/**
 * Tests that validates that a single pipelined component instance from one source works
 * correctly.
 *
 * <pre>
 *                 +--(v4)
 *          +--(v2)-+
 *          |      +--(v5)
 *     (v1)--+
 *          |      +--(v6)
 *          +--(v3)-+
 *                 +--(v7)
 * </pre>
 */
@Test
public void testOneComponentInstanceFromOneSource() {
    TestingSchedulingTopology topology = new TestingSchedulingTopology();
    TestingSchedulingExecutionVertex v1 = topology.newExecutionVertex();
    TestingSchedulingExecutionVertex v2 = topology.newExecutionVertex();
    TestingSchedulingExecutionVertex v3 = topology.newExecutionVertex();
    TestingSchedulingExecutionVertex v4 = topology.newExecutionVertex();
    TestingSchedulingExecutionVertex v5 = topology.newExecutionVertex();
    TestingSchedulingExecutionVertex v6 = topology.newExecutionVertex();
    TestingSchedulingExecutionVertex v7 = topology.newExecutionVertex();
    topology.connect(v1, v2, ResultPartitionType.PIPELINED).connect(v1, v3, ResultPartitionType.PIPELINED).connect(v2, v4, ResultPartitionType.PIPELINED).connect(v2, v5, ResultPartitionType.PIPELINED).connect(v3, v6, ResultPartitionType.PIPELINED).connect(v3, v7, ResultPartitionType.PIPELINED);
    Map<ExecutionVertexID, Set<SchedulingExecutionVertex>> pipelinedRegionByVertex = computePipelinedRegionByVertex(topology);
    Set<SchedulingExecutionVertex> r1 = pipelinedRegionByVertex.get(v1.getId());
    Set<SchedulingExecutionVertex> r2 = pipelinedRegionByVertex.get(v2.getId());
    Set<SchedulingExecutionVertex> r3 = pipelinedRegionByVertex.get(v3.getId());
    Set<SchedulingExecutionVertex> r4 = pipelinedRegionByVertex.get(v4.getId());
    Set<SchedulingExecutionVertex> r5 = pipelinedRegionByVertex.get(v5.getId());
    Set<SchedulingExecutionVertex> r6 = pipelinedRegionByVertex.get(v6.getId());
    Set<SchedulingExecutionVertex> r7 = pipelinedRegionByVertex.get(v7.getId());
    assertSameRegion(r1, r2, r3, r4, r5, r6, r7);
}
Also used : TestingSchedulingExecutionVertex(org.apache.flink.runtime.scheduler.strategy.TestingSchedulingExecutionVertex) SchedulingExecutionVertex(org.apache.flink.runtime.scheduler.strategy.SchedulingExecutionVertex) Set(java.util.Set) TestingSchedulingExecutionVertex(org.apache.flink.runtime.scheduler.strategy.TestingSchedulingExecutionVertex) ExecutionVertexID(org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID) TestingSchedulingTopology(org.apache.flink.runtime.scheduler.strategy.TestingSchedulingTopology) Test(org.junit.Test)

Aggregations

ExecutionVertexID (org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID)231 Test (org.junit.Test)165 JobVertexID (org.apache.flink.runtime.jobgraph.JobVertexID)63 JobGraph (org.apache.flink.runtime.jobgraph.JobGraph)57 JobVertex (org.apache.flink.runtime.jobgraph.JobVertex)54 SchedulingExecutionVertex (org.apache.flink.runtime.scheduler.strategy.SchedulingExecutionVertex)51 Set (java.util.Set)48 IntermediateResultPartitionID (org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID)45 AdaptiveSchedulerTest (org.apache.flink.runtime.scheduler.adaptive.AdaptiveSchedulerTest)45 TestingSchedulingExecutionVertex (org.apache.flink.runtime.scheduler.strategy.TestingSchedulingExecutionVertex)45 Collection (java.util.Collection)33 TestingSchedulingTopology (org.apache.flink.runtime.scheduler.strategy.TestingSchedulingTopology)33 HashSet (java.util.HashSet)30 ExecutionVertex (org.apache.flink.runtime.executiongraph.ExecutionVertex)30 ArrayList (java.util.ArrayList)27 Map (java.util.Map)27 HashMap (java.util.HashMap)24 List (java.util.List)24 CompletableFuture (java.util.concurrent.CompletableFuture)24 TaskManagerLocation (org.apache.flink.runtime.taskmanager.TaskManagerLocation)24