use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink by apache.
the class SchedulingPipelinedRegionComputeUtilTest method testMultipleComponentsViaCascadeOfJoins.
/**
* Cascades of joins with partially blocking, partially pipelined exchanges.
*
* <pre>
* (1)--+
* +--(5)-+
* (2)--+ |
* (blocking)
* |
* +--(7)
* |
* (blocking)
* (3)--+ |
* +--(6)-+
* (4)--+
* </pre>
*
* <p>Component 1: 1, 2, 5; component 2: 3,4,6; component 3: 7
*/
@Test
public void testMultipleComponentsViaCascadeOfJoins() {
TestingSchedulingTopology topology = new TestingSchedulingTopology();
TestingSchedulingExecutionVertex v1 = topology.newExecutionVertex();
TestingSchedulingExecutionVertex v2 = topology.newExecutionVertex();
TestingSchedulingExecutionVertex v3 = topology.newExecutionVertex();
TestingSchedulingExecutionVertex v4 = topology.newExecutionVertex();
TestingSchedulingExecutionVertex v5 = topology.newExecutionVertex();
TestingSchedulingExecutionVertex v6 = topology.newExecutionVertex();
TestingSchedulingExecutionVertex v7 = topology.newExecutionVertex();
topology.connect(v1, v5, ResultPartitionType.PIPELINED).connect(v2, v5, ResultPartitionType.PIPELINED).connect(v3, v6, ResultPartitionType.PIPELINED).connect(v4, v6, ResultPartitionType.PIPELINED).connect(v5, v7, ResultPartitionType.BLOCKING).connect(v6, v7, ResultPartitionType.BLOCKING);
Map<ExecutionVertexID, Set<SchedulingExecutionVertex>> pipelinedRegionByVertex = computePipelinedRegionByVertex(topology);
Set<SchedulingExecutionVertex> r1 = pipelinedRegionByVertex.get(v1.getId());
Set<SchedulingExecutionVertex> r2 = pipelinedRegionByVertex.get(v2.getId());
Set<SchedulingExecutionVertex> r3 = pipelinedRegionByVertex.get(v3.getId());
Set<SchedulingExecutionVertex> r4 = pipelinedRegionByVertex.get(v4.getId());
Set<SchedulingExecutionVertex> r5 = pipelinedRegionByVertex.get(v5.getId());
Set<SchedulingExecutionVertex> r6 = pipelinedRegionByVertex.get(v6.getId());
Set<SchedulingExecutionVertex> r7 = pipelinedRegionByVertex.get(v7.getId());
assertSameRegion(r1, r2, r5);
assertSameRegion(r3, r4, r6);
assertDistinctRegions(r1, r3, r7);
}
use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink by apache.
the class SchedulingPipelinedRegionComputeUtilTest method testEmbarrassinglyParallelCase.
/**
* Tests that validates that embarrassingly parallel chains of vertices work correctly.
*
* <pre>
* (a1) --> (b1)
*
* (a2) --> (b2)
*
* (a3) --> (b3)
* </pre>
*/
@Test
public void testEmbarrassinglyParallelCase() {
TestingSchedulingTopology topology = new TestingSchedulingTopology();
TestingSchedulingExecutionVertex va1 = topology.newExecutionVertex();
TestingSchedulingExecutionVertex va2 = topology.newExecutionVertex();
TestingSchedulingExecutionVertex va3 = topology.newExecutionVertex();
TestingSchedulingExecutionVertex vb1 = topology.newExecutionVertex();
TestingSchedulingExecutionVertex vb2 = topology.newExecutionVertex();
TestingSchedulingExecutionVertex vb3 = topology.newExecutionVertex();
topology.connect(va1, vb1, ResultPartitionType.PIPELINED).connect(va2, vb2, ResultPartitionType.PIPELINED).connect(va3, vb3, ResultPartitionType.PIPELINED);
Map<ExecutionVertexID, Set<SchedulingExecutionVertex>> pipelinedRegionByVertex = computePipelinedRegionByVertex(topology);
Set<SchedulingExecutionVertex> ra1 = pipelinedRegionByVertex.get(va1.getId());
Set<SchedulingExecutionVertex> ra2 = pipelinedRegionByVertex.get(va2.getId());
Set<SchedulingExecutionVertex> ra3 = pipelinedRegionByVertex.get(va3.getId());
Set<SchedulingExecutionVertex> rb1 = pipelinedRegionByVertex.get(vb1.getId());
Set<SchedulingExecutionVertex> rb2 = pipelinedRegionByVertex.get(vb2.getId());
Set<SchedulingExecutionVertex> rb3 = pipelinedRegionByVertex.get(vb3.getId());
assertSameRegion(ra1, rb1);
assertSameRegion(ra2, rb2);
assertSameRegion(ra3, rb3);
assertDistinctRegions(ra1, ra2, ra3);
}
use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink by apache.
the class DefaultSchedulerTest method restartAfterDeploymentFails.
@Test
public void restartAfterDeploymentFails() {
final JobGraph jobGraph = singleNonParallelJobVertexJobGraph();
final JobVertex onlyJobVertex = getOnlyJobVertex(jobGraph);
testExecutionVertexOperations.enableFailDeploy();
createSchedulerAndStartScheduling(jobGraph);
testExecutionVertexOperations.disableFailDeploy();
taskRestartExecutor.triggerScheduledTasks();
final List<ExecutionVertexID> deployedExecutionVertices = testExecutionVertexOperations.getDeployedVertices();
final ExecutionVertexID executionVertexId = new ExecutionVertexID(onlyJobVertex.getID(), 0);
assertThat(deployedExecutionVertices, contains(executionVertexId, executionVertexId));
}
use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink by apache.
the class DefaultSchedulerTest method handleGlobalFailureWithLocalFailure.
/**
* This test covers the use-case where a global fail-over is followed by a local task failure.
* It verifies (besides checking the expected deployments) that the assert in the global
* recovery handling of {@link SchedulerBase#restoreState} is not triggered due to version
* updates.
*/
@Test
public void handleGlobalFailureWithLocalFailure() {
final JobGraph jobGraph = singleJobVertexJobGraph(2);
final JobVertex onlyJobVertex = getOnlyJobVertex(jobGraph);
enableCheckpointing(jobGraph);
final DefaultScheduler scheduler = createSchedulerAndStartScheduling(jobGraph);
final List<ExecutionAttemptID> attemptIds = StreamSupport.stream(scheduler.requestJob().getArchivedExecutionGraph().getAllExecutionVertices().spliterator(), false).map(ArchivedExecutionVertex::getCurrentExecutionAttempt).map(ArchivedExecution::getAttemptId).collect(Collectors.toList());
final ExecutionAttemptID localFailureAttemptId = attemptIds.get(0);
scheduler.handleGlobalFailure(new Exception("global failure"));
// the local failure shouldn't affect the global fail-over
scheduler.updateTaskExecutionState(new TaskExecutionState(localFailureAttemptId, ExecutionState.FAILED, new Exception("local failure")));
for (ExecutionAttemptID attemptId : attemptIds) {
scheduler.updateTaskExecutionState(new TaskExecutionState(attemptId, ExecutionState.CANCELED));
}
taskRestartExecutor.triggerScheduledTasks();
final ExecutionVertexID executionVertexId0 = new ExecutionVertexID(onlyJobVertex.getID(), 0);
final ExecutionVertexID executionVertexId1 = new ExecutionVertexID(onlyJobVertex.getID(), 1);
assertThat("The execution vertices should be deployed in a specific order reflecting the scheduling start and the global fail-over afterwards.", testExecutionVertexOperations.getDeployedVertices(), contains(executionVertexId0, executionVertexId1, executionVertexId0, executionVertexId1));
}
use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink by apache.
the class DefaultSchedulerTest method handleGlobalFailure.
@Test
public void handleGlobalFailure() {
final JobGraph jobGraph = singleNonParallelJobVertexJobGraph();
final JobVertex onlyJobVertex = getOnlyJobVertex(jobGraph);
final DefaultScheduler scheduler = createSchedulerAndStartScheduling(jobGraph);
scheduler.handleGlobalFailure(new Exception("forced failure"));
final ArchivedExecutionVertex onlyExecutionVertex = Iterables.getOnlyElement(scheduler.requestJob().getArchivedExecutionGraph().getAllExecutionVertices());
final ExecutionAttemptID attemptId = onlyExecutionVertex.getCurrentExecutionAttempt().getAttemptId();
scheduler.updateTaskExecutionState(new TaskExecutionState(attemptId, ExecutionState.CANCELED));
taskRestartExecutor.triggerScheduledTasks();
final List<ExecutionVertexID> deployedExecutionVertices = testExecutionVertexOperations.getDeployedVertices();
final ExecutionVertexID executionVertexId = new ExecutionVertexID(onlyJobVertex.getID(), 0);
assertThat(deployedExecutionVertices, contains(executionVertexId, executionVertexId));
}
Aggregations