use of org.apache.flink.runtime.scheduler.strategy.TestingSchedulingTopology in project flink by apache.
the class SchedulingPipelinedRegionComputeUtilTest method testEmbarrassinglyParallelCase.
/**
* Tests that validates that embarrassingly parallel chains of vertices work correctly.
*
* <pre>
* (a1) --> (b1)
*
* (a2) --> (b2)
*
* (a3) --> (b3)
* </pre>
*/
@Test
public void testEmbarrassinglyParallelCase() {
TestingSchedulingTopology topology = new TestingSchedulingTopology();
TestingSchedulingExecutionVertex va1 = topology.newExecutionVertex();
TestingSchedulingExecutionVertex va2 = topology.newExecutionVertex();
TestingSchedulingExecutionVertex va3 = topology.newExecutionVertex();
TestingSchedulingExecutionVertex vb1 = topology.newExecutionVertex();
TestingSchedulingExecutionVertex vb2 = topology.newExecutionVertex();
TestingSchedulingExecutionVertex vb3 = topology.newExecutionVertex();
topology.connect(va1, vb1, ResultPartitionType.PIPELINED).connect(va2, vb2, ResultPartitionType.PIPELINED).connect(va3, vb3, ResultPartitionType.PIPELINED);
Map<ExecutionVertexID, Set<SchedulingExecutionVertex>> pipelinedRegionByVertex = computePipelinedRegionByVertex(topology);
Set<SchedulingExecutionVertex> ra1 = pipelinedRegionByVertex.get(va1.getId());
Set<SchedulingExecutionVertex> ra2 = pipelinedRegionByVertex.get(va2.getId());
Set<SchedulingExecutionVertex> ra3 = pipelinedRegionByVertex.get(va3.getId());
Set<SchedulingExecutionVertex> rb1 = pipelinedRegionByVertex.get(vb1.getId());
Set<SchedulingExecutionVertex> rb2 = pipelinedRegionByVertex.get(vb2.getId());
Set<SchedulingExecutionVertex> rb3 = pipelinedRegionByVertex.get(vb3.getId());
assertSameRegion(ra1, rb1);
assertSameRegion(ra2, rb2);
assertSameRegion(ra3, rb3);
assertDistinctRegions(ra1, ra2, ra3);
}
use of org.apache.flink.runtime.scheduler.strategy.TestingSchedulingTopology in project flink by apache.
the class LocalInputPreferredSlotSharingStrategyTest method testGetExecutionSlotSharingGroupOfLateAttachedVertices.
@Test
public void testGetExecutionSlotSharingGroupOfLateAttachedVertices() {
JobVertexID jobVertexID1 = new JobVertexID();
JobVertexID jobVertexID2 = new JobVertexID();
JobVertexID jobVertexID3 = new JobVertexID();
final SlotSharingGroup slotSharingGroup1 = new SlotSharingGroup();
slotSharingGroup1.addVertexToGroup(jobVertexID1);
slotSharingGroup1.addVertexToGroup(jobVertexID2);
final SlotSharingGroup slotSharingGroup2 = new SlotSharingGroup();
slotSharingGroup2.addVertexToGroup(jobVertexID3);
TestingSchedulingTopology topology = new TestingSchedulingTopology();
TestingSchedulingExecutionVertex ev1 = topology.newExecutionVertex(jobVertexID1, 0);
TestingSchedulingExecutionVertex ev2 = topology.newExecutionVertex(jobVertexID2, 0);
topology.connect(ev1, ev2);
final LocalInputPreferredSlotSharingStrategy strategy = new LocalInputPreferredSlotSharingStrategy(topology, new HashSet<>(Arrays.asList(slotSharingGroup1, slotSharingGroup2)), Collections.emptySet());
assertThat(strategy.getExecutionSlotSharingGroups().size(), is(1));
assertThat(strategy.getExecutionSlotSharingGroup(ev1.getId()).getExecutionVertexIds(), containsInAnyOrder(ev1.getId(), ev2.getId()));
assertThat(strategy.getExecutionSlotSharingGroup(ev2.getId()).getExecutionVertexIds(), containsInAnyOrder(ev1.getId(), ev2.getId()));
// add new job vertices and notify scheduling topology updated
TestingSchedulingExecutionVertex ev3 = topology.newExecutionVertex(jobVertexID3, 0);
topology.connect(ev2, ev3, ResultPartitionType.BLOCKING);
strategy.notifySchedulingTopologyUpdated(topology, Collections.singletonList(ev3.getId()));
assertThat(strategy.getExecutionSlotSharingGroups().size(), is(2));
assertThat(strategy.getExecutionSlotSharingGroup(ev1.getId()).getExecutionVertexIds(), containsInAnyOrder(ev1.getId(), ev2.getId()));
assertThat(strategy.getExecutionSlotSharingGroup(ev2.getId()).getExecutionVertexIds(), containsInAnyOrder(ev1.getId(), ev2.getId()));
assertThat(strategy.getExecutionSlotSharingGroup(ev3.getId()).getExecutionVertexIds(), containsInAnyOrder(ev3.getId()));
}
use of org.apache.flink.runtime.scheduler.strategy.TestingSchedulingTopology in project flink by apache.
the class RestartPipelinedRegionFailoverStrategyTest method testRegionFailoverForDataConsumptionErrors.
/**
* Tests for scenes that a task fails for data consumption error, in which case the region
* containing the failed task, the region containing the unavailable result partition and all
* their consumer regions should be restarted.
*
* <pre>
* (v1) -+-> (v4)
* x
* (v2) -+-> (v5)
*
* (v3) -+-> (v6)
*
* ^
* |
* (blocking)
* </pre>
*
* Each vertex is in an individual region.
*/
@Test
public void testRegionFailoverForDataConsumptionErrors() throws Exception {
TestingSchedulingTopology topology = new TestingSchedulingTopology();
TestingSchedulingExecutionVertex v1 = topology.newExecutionVertex(ExecutionState.FINISHED);
TestingSchedulingExecutionVertex v2 = topology.newExecutionVertex(ExecutionState.FINISHED);
TestingSchedulingExecutionVertex v3 = topology.newExecutionVertex(ExecutionState.FINISHED);
TestingSchedulingExecutionVertex v4 = topology.newExecutionVertex(ExecutionState.RUNNING);
TestingSchedulingExecutionVertex v5 = topology.newExecutionVertex(ExecutionState.RUNNING);
TestingSchedulingExecutionVertex v6 = topology.newExecutionVertex(ExecutionState.RUNNING);
topology.connect(v1, v4, ResultPartitionType.BLOCKING);
topology.connect(v1, v5, ResultPartitionType.BLOCKING);
topology.connect(v2, v4, ResultPartitionType.BLOCKING);
topology.connect(v2, v5, ResultPartitionType.BLOCKING);
topology.connect(v3, v6, ResultPartitionType.BLOCKING);
RestartPipelinedRegionFailoverStrategy strategy = new RestartPipelinedRegionFailoverStrategy(topology);
Iterator<TestingSchedulingResultPartition> v4InputEdgeIterator = v4.getConsumedResults().iterator();
TestingSchedulingResultPartition v1out = v4InputEdgeIterator.next();
verifyThatFailedExecution(strategy, v4).partitionConnectionCause(v1out).restarts(v1, v4, v5);
TestingSchedulingResultPartition v2out = v4InputEdgeIterator.next();
verifyThatFailedExecution(strategy, v4).partitionConnectionCause(v2out).restarts(v2, v4, v5);
Iterator<TestingSchedulingResultPartition> v5InputEdgeIterator = v5.getConsumedResults().iterator();
v1out = v5InputEdgeIterator.next();
verifyThatFailedExecution(strategy, v5).partitionConnectionCause(v1out).restarts(v1, v4, v5);
v2out = v5InputEdgeIterator.next();
verifyThatFailedExecution(strategy, v5).partitionConnectionCause(v2out).restarts(v2, v4, v5);
TestingSchedulingResultPartition v3out = v6.getConsumedResults().iterator().next();
verifyThatFailedExecution(strategy, v6).partitionConnectionCause(v3out).restarts(v3, v6);
}
use of org.apache.flink.runtime.scheduler.strategy.TestingSchedulingTopology in project flink by apache.
the class RestartPipelinedRegionFailoverStrategyTest method testRegionFailoverForRegionInternalErrors.
/**
* Tests for scenes that a task fails for its own error, in which case the region containing the
* failed task and its consumer regions should be restarted.
*
* <pre>
* (v1) -+-> (v4)
* x
* (v2) -+-> (v5)
*
* (v3) -+-> (v6)
*
* ^
* |
* (blocking)
* </pre>
*
* Each vertex is in an individual region.
*/
@Test
public void testRegionFailoverForRegionInternalErrors() {
final TestingSchedulingTopology topology = new TestingSchedulingTopology();
TestingSchedulingExecutionVertex v1 = topology.newExecutionVertex(ExecutionState.FINISHED);
TestingSchedulingExecutionVertex v2 = topology.newExecutionVertex(ExecutionState.FINISHED);
TestingSchedulingExecutionVertex v3 = topology.newExecutionVertex(ExecutionState.FINISHED);
TestingSchedulingExecutionVertex v4 = topology.newExecutionVertex(ExecutionState.FINISHED);
TestingSchedulingExecutionVertex v5 = topology.newExecutionVertex(ExecutionState.SCHEDULED);
TestingSchedulingExecutionVertex v6 = topology.newExecutionVertex(ExecutionState.RUNNING);
topology.connect(v1, v4, ResultPartitionType.BLOCKING);
topology.connect(v1, v5, ResultPartitionType.BLOCKING);
topology.connect(v2, v4, ResultPartitionType.BLOCKING);
topology.connect(v2, v5, ResultPartitionType.BLOCKING);
topology.connect(v3, v6, ResultPartitionType.BLOCKING);
RestartPipelinedRegionFailoverStrategy strategy = new RestartPipelinedRegionFailoverStrategy(topology);
verifyThatFailedExecution(strategy, v1).restarts(v1, v4, v5);
verifyThatFailedExecution(strategy, v2).restarts(v2, v4, v5);
verifyThatFailedExecution(strategy, v3).restarts(v3, v6);
verifyThatFailedExecution(strategy, v4).restarts(v4);
verifyThatFailedExecution(strategy, v5).restarts(v5);
verifyThatFailedExecution(strategy, v6).restarts(v6);
}
use of org.apache.flink.runtime.scheduler.strategy.TestingSchedulingTopology in project flink by apache.
the class RestartPipelinedRegionFailoverStrategyTest method testRegionFailoverForMultipleVerticesRegions.
/**
* Tests region failover scenes for topology with multiple vertices.
*
* <pre>
* (v1) ---> (v2) --|--> (v3) ---> (v4) --|--> (v5) ---> (v6)
*
* ^ ^ ^ ^ ^
* | | | | |
* (pipelined) (blocking) (pipelined) (blocking) (pipelined)
* </pre>
*
* Component 1: 1,2; component 2: 3,4; component 3: 5,6
*/
@Test
public void testRegionFailoverForMultipleVerticesRegions() throws Exception {
TestingSchedulingTopology topology = new TestingSchedulingTopology();
TestingSchedulingExecutionVertex v1 = topology.newExecutionVertex(ExecutionState.FINISHED);
TestingSchedulingExecutionVertex v2 = topology.newExecutionVertex(ExecutionState.FINISHED);
TestingSchedulingExecutionVertex v3 = topology.newExecutionVertex(ExecutionState.RUNNING);
TestingSchedulingExecutionVertex v4 = topology.newExecutionVertex(ExecutionState.RUNNING);
TestingSchedulingExecutionVertex v5 = topology.newExecutionVertex(ExecutionState.FAILED);
TestingSchedulingExecutionVertex v6 = topology.newExecutionVertex(ExecutionState.CANCELED);
topology.connect(v1, v2, ResultPartitionType.PIPELINED);
topology.connect(v2, v3, ResultPartitionType.BLOCKING);
topology.connect(v3, v4, ResultPartitionType.PIPELINED);
topology.connect(v4, v5, ResultPartitionType.BLOCKING);
topology.connect(v5, v6, ResultPartitionType.PIPELINED);
RestartPipelinedRegionFailoverStrategy strategy = new RestartPipelinedRegionFailoverStrategy(topology);
verifyThatFailedExecution(strategy, v3).restarts(v3, v4, v5, v6);
TestingSchedulingResultPartition v2out = v3.getConsumedResults().iterator().next();
verifyThatFailedExecution(strategy, v3).partitionConnectionCause(v2out).restarts(v1, v2, v3, v4, v5, v6);
}
Aggregations