use of org.apache.flink.runtime.executiongraph.ExecutionGraph in project flink by apache.
the class TaskDeploymentDescriptorFactoryTest method setupExecutionGraphAndGetVertex.
private ExecutionJobVertex setupExecutionGraphAndGetVertex(JobID jobId, BlobWriter blobWriter) throws JobException, JobExecutionException {
final JobVertex v1 = createJobVertex("v1", PARALLELISM);
final JobVertex v2 = createJobVertex("v2", PARALLELISM);
v2.connectNewDataSetAsInput(v1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.BLOCKING);
final List<JobVertex> ordered = new ArrayList<>(Arrays.asList(v1, v2));
final ExecutionGraph executionGraph = createExecutionGraph(jobId, ordered, blobWriter);
return executionGraph.getJobVertex(v2.getID());
}
use of org.apache.flink.runtime.executiongraph.ExecutionGraph in project flink by apache.
the class DefaultSchedulerCheckpointCoordinatorTest method testClosingSchedulerShutsDownCheckpointCoordinatorOnFinishedExecutionGraph.
/**
* Tests that the checkpoint coordinator is shut down if the execution graph is finished.
*/
@Test
public void testClosingSchedulerShutsDownCheckpointCoordinatorOnFinishedExecutionGraph() throws Exception {
final CompletableFuture<JobStatus> counterShutdownFuture = new CompletableFuture<>();
CheckpointIDCounter counter = TestingCheckpointIDCounter.createStoreWithShutdownCheckAndNoStartAction(counterShutdownFuture);
final CompletableFuture<JobStatus> storeShutdownFuture = new CompletableFuture<>();
CompletedCheckpointStore store = TestingCompletedCheckpointStore.createStoreWithShutdownCheckAndNoCompletedCheckpoints(storeShutdownFuture);
final SchedulerBase scheduler = createSchedulerAndEnableCheckpointing(counter, store);
final ExecutionGraph graph = scheduler.getExecutionGraph();
final CheckpointCoordinator checkpointCoordinator = graph.getCheckpointCoordinator();
assertThat(checkpointCoordinator, Matchers.notNullValue());
assertThat(checkpointCoordinator.isShutdown(), is(false));
scheduler.startScheduling();
for (ExecutionVertex executionVertex : graph.getAllExecutionVertices()) {
final Execution currentExecutionAttempt = executionVertex.getCurrentExecutionAttempt();
scheduler.updateTaskExecutionState(new TaskExecutionState(currentExecutionAttempt.getAttemptId(), ExecutionState.FINISHED));
}
assertThat(graph.getTerminationFuture().get(), is(JobStatus.FINISHED));
scheduler.closeAsync().get();
assertThat(checkpointCoordinator.isShutdown(), is(true));
assertThat(counterShutdownFuture.get(), is(JobStatus.FINISHED));
assertThat(storeShutdownFuture.get(), is(JobStatus.FINISHED));
}
use of org.apache.flink.runtime.executiongraph.ExecutionGraph in project flink by apache.
the class DefaultSchedulerCheckpointCoordinatorTest method testClosingSchedulerShutsDownCheckpointCoordinatorOnSuspendedExecutionGraph.
/**
* Tests that the checkpoint coordinator is shut down if the execution graph is suspended.
*/
@Test
public void testClosingSchedulerShutsDownCheckpointCoordinatorOnSuspendedExecutionGraph() throws Exception {
final CompletableFuture<JobStatus> counterShutdownFuture = new CompletableFuture<>();
CheckpointIDCounter counter = TestingCheckpointIDCounter.createStoreWithShutdownCheckAndNoStartAction(counterShutdownFuture);
final CompletableFuture<JobStatus> storeShutdownFuture = new CompletableFuture<>();
CompletedCheckpointStore store = TestingCompletedCheckpointStore.createStoreWithShutdownCheckAndNoCompletedCheckpoints(storeShutdownFuture);
final SchedulerBase scheduler = createSchedulerAndEnableCheckpointing(counter, store);
final ExecutionGraph graph = scheduler.getExecutionGraph();
final CheckpointCoordinator checkpointCoordinator = graph.getCheckpointCoordinator();
assertThat(checkpointCoordinator, Matchers.notNullValue());
assertThat(checkpointCoordinator.isShutdown(), is(false));
graph.suspend(new Exception("Test Exception"));
scheduler.closeAsync().get();
assertThat(checkpointCoordinator.isShutdown(), is(true));
assertThat(counterShutdownFuture.get(), is(JobStatus.SUSPENDED));
assertThat(storeShutdownFuture.get(), is(JobStatus.SUSPENDED));
}
use of org.apache.flink.runtime.executiongraph.ExecutionGraph in project flink by apache.
the class VertexFinishedStateCheckerTest method testRestoringPartiallyFinishedChainsFails.
private void testRestoringPartiallyFinishedChainsFails(boolean useUidHash) throws Exception {
final JobVertexID jobVertexID1 = new JobVertexID();
final JobVertexID jobVertexID2 = new JobVertexID();
// The op1 has uidHash set.
OperatorIDPair op1 = OperatorIDPair.of(new OperatorID(), new OperatorID());
OperatorIDPair op2 = OperatorIDPair.generatedIDOnly(new OperatorID());
OperatorIDPair op3 = OperatorIDPair.generatedIDOnly(new OperatorID());
final ExecutionGraph graph = new CheckpointCoordinatorTestingUtils.CheckpointExecutionGraphBuilder().addJobVertex(jobVertexID2, 1, 1, singletonList(op3), true).addJobVertex(jobVertexID1, 1, 1, Arrays.asList(op1, op2), true).build();
Map<OperatorID, OperatorState> operatorStates = new HashMap<>();
operatorStates.put(useUidHash ? op1.getUserDefinedOperatorID().get() : op1.getGeneratedOperatorID(), new FullyFinishedOperatorState(op1.getGeneratedOperatorID(), 1, 1));
operatorStates.put(op2.getGeneratedOperatorID(), new OperatorState(op2.getGeneratedOperatorID(), 1, 1));
Set<ExecutionJobVertex> vertices = new HashSet<>();
vertices.add(graph.getJobVertex(jobVertexID1));
VertexFinishedStateChecker finishedStateChecker = new VertexFinishedStateChecker(vertices, operatorStates);
FlinkRuntimeException exception = assertThrows(FlinkRuntimeException.class, finishedStateChecker::validateOperatorsFinishedState);
assertThat(exception.getMessage(), is(equalTo("Can not restore vertex " + "anon(" + jobVertexID1 + ")" + " which contain mixed operator finished state: [ALL_RUNNING, FULLY_FINISHED]")));
}
use of org.apache.flink.runtime.executiongraph.ExecutionGraph in project flink by apache.
the class VertexFinishedStateCheckerTest method testAddingOperatorsBeforePartiallyOrFullyFinishedOne.
private void testAddingOperatorsBeforePartiallyOrFullyFinishedOne(JobVertexID firstVertexId, String firstVertexName, VertexFinishedStateChecker.VertexFinishedState firstOperatorFinishedState, JobVertexID secondVertexId, String secondVertexName, VertexFinishedStateChecker.VertexFinishedState secondOperatorFinishedState, DistributionPattern[] distributionPatterns, Class<? extends Throwable> expectedExceptionalClass, String expectedMessage) throws Exception {
OperatorIDPair op1 = OperatorIDPair.generatedIDOnly(new OperatorID());
OperatorIDPair op2 = OperatorIDPair.generatedIDOnly(new OperatorID());
JobVertex vertex1 = new JobVertex(firstVertexName, firstVertexId, singletonList(op1));
JobVertex vertex2 = new JobVertex(secondVertexName, secondVertexId, singletonList(op2));
vertex1.setInvokableClass(NoOpInvokable.class);
vertex2.setInvokableClass(NoOpInvokable.class);
final ExecutionGraph graph = new CheckpointCoordinatorTestingUtils.CheckpointExecutionGraphBuilder().addJobVertex(vertex1, true).addJobVertex(vertex2, false).setDistributionPattern(distributionPatterns[0]).build();
// Adds the additional edges
for (int i = 1; i < distributionPatterns.length; ++i) {
vertex2.connectNewDataSetAsInput(vertex1, distributionPatterns[i], ResultPartitionType.PIPELINED);
}
Map<OperatorID, OperatorState> operatorStates = new HashMap<>();
operatorStates.put(op1.getGeneratedOperatorID(), createOperatorState(op1.getGeneratedOperatorID(), firstOperatorFinishedState));
operatorStates.put(op2.getGeneratedOperatorID(), createOperatorState(op2.getGeneratedOperatorID(), secondOperatorFinishedState));
Set<ExecutionJobVertex> vertices = new HashSet<>();
vertices.add(graph.getJobVertex(vertex1.getID()));
vertices.add(graph.getJobVertex(vertex2.getID()));
VertexFinishedStateChecker finishedStateChecker = new VertexFinishedStateChecker(vertices, operatorStates);
Throwable exception = assertThrows(expectedExceptionalClass, finishedStateChecker::validateOperatorsFinishedState);
assertThat(exception.getMessage(), is(equalTo(expectedMessage)));
}
Aggregations