Search in sources :

Example 11 with ExecutionGraph

use of org.apache.flink.runtime.executiongraph.ExecutionGraph in project flink by apache.

the class CheckpointCoordinatorRestoringTest method testRestoreLatestCheckpointFailureWhenMaxParallelismChanges.

/**
 * Tests that the checkpoint restoration fails if the max parallelism of the job vertices has
 * changed.
 *
 * @throws Exception
 */
@Test(expected = IllegalStateException.class)
public void testRestoreLatestCheckpointFailureWhenMaxParallelismChanges() throws Exception {
    final JobVertexID jobVertexID1 = new JobVertexID();
    final JobVertexID jobVertexID2 = new JobVertexID();
    int parallelism1 = 3;
    int parallelism2 = 2;
    int maxParallelism1 = 42;
    int maxParallelism2 = 13;
    CompletedCheckpointStore completedCheckpointStore = new EmbeddedCompletedCheckpointStore();
    ExecutionGraph graph = new CheckpointCoordinatorTestingUtils.CheckpointExecutionGraphBuilder().addJobVertex(jobVertexID1, parallelism1, maxParallelism1).addJobVertex(jobVertexID2, parallelism2, maxParallelism2).build();
    ExecutionJobVertex jobVertex1 = graph.getJobVertex(jobVertexID1);
    ExecutionJobVertex jobVertex2 = graph.getJobVertex(jobVertexID2);
    // set up the coordinator and validate the initial state
    CheckpointCoordinator coord = new CheckpointCoordinatorBuilder().setExecutionGraph(graph).setCompletedCheckpointStore(completedCheckpointStore).setTimer(manuallyTriggeredScheduledExecutor).build();
    // trigger the checkpoint
    coord.triggerCheckpoint(false);
    manuallyTriggeredScheduledExecutor.triggerAll();
    assertEquals(1, coord.getPendingCheckpoints().size());
    long checkpointId = Iterables.getOnlyElement(coord.getPendingCheckpoints().keySet());
    List<KeyGroupRange> keyGroupPartitions1 = StateAssignmentOperation.createKeyGroupPartitions(maxParallelism1, parallelism1);
    List<KeyGroupRange> keyGroupPartitions2 = StateAssignmentOperation.createKeyGroupPartitions(maxParallelism2, parallelism2);
    for (int index = 0; index < jobVertex1.getParallelism(); index++) {
        KeyGroupsStateHandle keyGroupState = generateKeyGroupState(jobVertexID1, keyGroupPartitions1.get(index), false);
        OperatorSubtaskState operatorSubtaskState = OperatorSubtaskState.builder().setManagedKeyedState(keyGroupState).build();
        TaskStateSnapshot taskOperatorSubtaskStates = new TaskStateSnapshot();
        taskOperatorSubtaskStates.putSubtaskStateByOperatorID(OperatorID.fromJobVertexID(jobVertexID1), operatorSubtaskState);
        AcknowledgeCheckpoint acknowledgeCheckpoint = new AcknowledgeCheckpoint(graph.getJobID(), jobVertex1.getTaskVertices()[index].getCurrentExecutionAttempt().getAttemptId(), checkpointId, new CheckpointMetrics(), taskOperatorSubtaskStates);
        coord.receiveAcknowledgeMessage(acknowledgeCheckpoint, TASK_MANAGER_LOCATION_INFO);
    }
    for (int index = 0; index < jobVertex2.getParallelism(); index++) {
        KeyGroupsStateHandle keyGroupState = generateKeyGroupState(jobVertexID2, keyGroupPartitions2.get(index), false);
        OperatorSubtaskState operatorSubtaskState = OperatorSubtaskState.builder().setManagedKeyedState(keyGroupState).build();
        TaskStateSnapshot taskOperatorSubtaskStates = new TaskStateSnapshot();
        taskOperatorSubtaskStates.putSubtaskStateByOperatorID(OperatorID.fromJobVertexID(jobVertexID2), operatorSubtaskState);
        AcknowledgeCheckpoint acknowledgeCheckpoint = new AcknowledgeCheckpoint(graph.getJobID(), jobVertex2.getTaskVertices()[index].getCurrentExecutionAttempt().getAttemptId(), checkpointId, new CheckpointMetrics(), taskOperatorSubtaskStates);
        coord.receiveAcknowledgeMessage(acknowledgeCheckpoint, TASK_MANAGER_LOCATION_INFO);
    }
    List<CompletedCheckpoint> completedCheckpoints = coord.getSuccessfulCheckpoints();
    assertEquals(1, completedCheckpoints.size());
    int newMaxParallelism1 = 20;
    int newMaxParallelism2 = 42;
    ExecutionGraph newGraph = new CheckpointCoordinatorTestingUtils.CheckpointExecutionGraphBuilder().addJobVertex(jobVertexID1, parallelism1, newMaxParallelism1).addJobVertex(jobVertexID2, parallelism2, newMaxParallelism2).build();
    ExecutionJobVertex newJobVertex1 = newGraph.getJobVertex(jobVertexID1);
    ExecutionJobVertex newJobVertex2 = newGraph.getJobVertex(jobVertexID2);
    // set up the coordinator and validate the initial state
    CheckpointCoordinator newCoord = new CheckpointCoordinatorBuilder().setExecutionGraph(newGraph).setCompletedCheckpointStore(completedCheckpointStore).setTimer(manuallyTriggeredScheduledExecutor).build();
    Set<ExecutionJobVertex> tasks = new HashSet<>();
    tasks.add(newJobVertex1);
    tasks.add(newJobVertex2);
    assertTrue(newCoord.restoreLatestCheckpointedStateToAll(tasks, false));
    fail("The restoration should have failed because the max parallelism changed.");
}
Also used : JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) KeyGroupRange(org.apache.flink.runtime.state.KeyGroupRange) AcknowledgeCheckpoint(org.apache.flink.runtime.messages.checkpoint.AcknowledgeCheckpoint) CheckpointCoordinatorBuilder(org.apache.flink.runtime.checkpoint.CheckpointCoordinatorTestingUtils.CheckpointCoordinatorBuilder) KeyGroupsStateHandle(org.apache.flink.runtime.state.KeyGroupsStateHandle) AcknowledgeCheckpoint(org.apache.flink.runtime.messages.checkpoint.AcknowledgeCheckpoint) ExecutionJobVertex(org.apache.flink.runtime.executiongraph.ExecutionJobVertex) ExecutionGraph(org.apache.flink.runtime.executiongraph.ExecutionGraph) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 12 with ExecutionGraph

use of org.apache.flink.runtime.executiongraph.ExecutionGraph in project flink by apache.

the class CheckpointCoordinatorRestoringTest method testStateRecoveryWithTopologyChange.

/**
 * old topology. [operator1,operator2] * parallelism1 -> [operator3,operator4] * parallelism2
 *
 * <p>new topology
 *
 * <p>[operator5,operator1,operator3] * newParallelism1 -> [operator3, operator6] *
 * newParallelism2
 */
public void testStateRecoveryWithTopologyChange(TestScaleType scaleType) throws Exception {
    /*
         * Old topology
         * CHAIN(op1 -> op2) * parallelism1 -> CHAIN(op3 -> op4) * parallelism2
         */
    Tuple2<JobVertexID, OperatorID> id1 = generateIDPair();
    Tuple2<JobVertexID, OperatorID> id2 = generateIDPair();
    int parallelism1 = 10;
    int maxParallelism1 = 64;
    Tuple2<JobVertexID, OperatorID> id3 = generateIDPair();
    Tuple2<JobVertexID, OperatorID> id4 = generateIDPair();
    int parallelism2 = 10;
    int maxParallelism2 = 64;
    List<KeyGroupRange> keyGroupPartitions2 = StateAssignmentOperation.createKeyGroupPartitions(maxParallelism2, parallelism2);
    Map<OperatorID, OperatorState> operatorStates = new HashMap<>();
    // prepare vertex1 state
    for (Tuple2<JobVertexID, OperatorID> id : Arrays.asList(id1, id2)) {
        OperatorState taskState = new OperatorState(id.f1, parallelism1, maxParallelism1);
        operatorStates.put(id.f1, taskState);
        for (int index = 0; index < taskState.getParallelism(); index++) {
            OperatorSubtaskState subtaskState = OperatorSubtaskState.builder().setManagedOperatorState(generatePartitionableStateHandle(id.f0, index, 2, 8, false)).setRawOperatorState(generatePartitionableStateHandle(id.f0, index, 2, 8, true)).build();
            taskState.putState(index, subtaskState);
        }
    }
    List<List<ChainedStateHandle<OperatorStateHandle>>> expectedManagedOperatorStates = new ArrayList<>();
    List<List<ChainedStateHandle<OperatorStateHandle>>> expectedRawOperatorStates = new ArrayList<>();
    // prepare vertex2 state
    for (Tuple2<JobVertexID, OperatorID> id : Arrays.asList(id3, id4)) {
        OperatorState operatorState = new OperatorState(id.f1, parallelism2, maxParallelism2);
        operatorStates.put(id.f1, operatorState);
        List<ChainedStateHandle<OperatorStateHandle>> expectedManagedOperatorState = new ArrayList<>();
        List<ChainedStateHandle<OperatorStateHandle>> expectedRawOperatorState = new ArrayList<>();
        expectedManagedOperatorStates.add(expectedManagedOperatorState);
        expectedRawOperatorStates.add(expectedRawOperatorState);
        for (int index = 0; index < operatorState.getParallelism(); index++) {
            final OperatorSubtaskState.Builder stateBuilder = OperatorSubtaskState.builder();
            OperatorStateHandle subManagedOperatorState = generateChainedPartitionableStateHandle(id.f0, index, 2, 8, false).get(0);
            OperatorStateHandle subRawOperatorState = generateChainedPartitionableStateHandle(id.f0, index, 2, 8, true).get(0);
            if (id.f0.equals(id3.f0)) {
                stateBuilder.setManagedKeyedState(generateKeyGroupState(id.f0, keyGroupPartitions2.get(index), false));
            }
            if (id.f0.equals(id3.f0)) {
                stateBuilder.setRawKeyedState(generateKeyGroupState(id.f0, keyGroupPartitions2.get(index), true));
            }
            expectedManagedOperatorState.add(ChainedStateHandle.wrapSingleHandle(subManagedOperatorState));
            expectedRawOperatorState.add(ChainedStateHandle.wrapSingleHandle(subRawOperatorState));
            OperatorSubtaskState subtaskState = stateBuilder.setManagedOperatorState(subManagedOperatorState).setRawOperatorState(subRawOperatorState).build();
            operatorState.putState(index, subtaskState);
        }
    }
    /*
         * New topology
         * CHAIN(op5 -> op1 -> op2) * newParallelism1 -> CHAIN(op3 -> op6) * newParallelism2
         */
    Tuple2<JobVertexID, OperatorID> id5 = generateIDPair();
    int newParallelism1 = 10;
    Tuple2<JobVertexID, OperatorID> id6 = generateIDPair();
    int newParallelism2 = parallelism2;
    if (scaleType == TestScaleType.INCREASE_PARALLELISM) {
        newParallelism2 = 20;
    } else if (scaleType == TestScaleType.DECREASE_PARALLELISM) {
        newParallelism2 = 8;
    }
    List<KeyGroupRange> newKeyGroupPartitions2 = StateAssignmentOperation.createKeyGroupPartitions(maxParallelism2, newParallelism2);
    ExecutionGraph newGraph = new CheckpointCoordinatorTestingUtils.CheckpointExecutionGraphBuilder().addJobVertex(id5.f0, newParallelism1, maxParallelism1, Stream.of(id2.f1, id1.f1, id5.f1).map(OperatorIDPair::generatedIDOnly).collect(Collectors.toList()), true).addJobVertex(id3.f0, newParallelism2, maxParallelism2, Stream.of(id6.f1, id3.f1).map(OperatorIDPair::generatedIDOnly).collect(Collectors.toList()), true).build();
    ExecutionJobVertex newJobVertex1 = newGraph.getJobVertex(id5.f0);
    ExecutionJobVertex newJobVertex2 = newGraph.getJobVertex(id3.f0);
    Set<ExecutionJobVertex> tasks = new HashSet<>();
    tasks.add(newJobVertex1);
    tasks.add(newJobVertex2);
    CompletedCheckpoint completedCheckpoint = new CompletedCheckpoint(newGraph.getJobID(), 2, System.currentTimeMillis(), System.currentTimeMillis() + 3000, operatorStates, Collections.<MasterState>emptyList(), CheckpointProperties.forCheckpoint(CheckpointRetentionPolicy.NEVER_RETAIN_AFTER_TERMINATION), new TestCompletedCheckpointStorageLocation());
    // set up the coordinator and validate the initial state
    SharedStateRegistry sharedStateRegistry = SharedStateRegistry.DEFAULT_FACTORY.create(Executors.directExecutor(), emptyList());
    CheckpointCoordinator coord = new CheckpointCoordinatorBuilder().setExecutionGraph(newGraph).setCompletedCheckpointStore(storeFor(sharedStateRegistry, () -> {
    }, completedCheckpoint)).setTimer(manuallyTriggeredScheduledExecutor).build();
    coord.restoreLatestCheckpointedStateToAll(tasks, true);
    for (int i = 0; i < newJobVertex1.getParallelism(); i++) {
        final List<OperatorIDPair> operatorIDs = newJobVertex1.getOperatorIDs();
        JobManagerTaskRestore taskRestore = newJobVertex1.getTaskVertices()[i].getCurrentExecutionAttempt().getTaskRestore();
        Assert.assertEquals(2L, taskRestore.getRestoreCheckpointId());
        TaskStateSnapshot stateSnapshot = taskRestore.getTaskStateSnapshot();
        OperatorSubtaskState headOpState = stateSnapshot.getSubtaskStateByOperatorID(operatorIDs.get(operatorIDs.size() - 1).getGeneratedOperatorID());
        assertTrue(headOpState.getManagedKeyedState().isEmpty());
        assertTrue(headOpState.getRawKeyedState().isEmpty());
        // operator5
        {
            int operatorIndexInChain = 2;
            OperatorSubtaskState opState = stateSnapshot.getSubtaskStateByOperatorID(operatorIDs.get(operatorIndexInChain).getGeneratedOperatorID());
            assertTrue(opState.getManagedOperatorState().isEmpty());
            assertTrue(opState.getRawOperatorState().isEmpty());
        }
        // operator1
        {
            int operatorIndexInChain = 1;
            OperatorSubtaskState opState = stateSnapshot.getSubtaskStateByOperatorID(operatorIDs.get(operatorIndexInChain).getGeneratedOperatorID());
            OperatorStateHandle expectedManagedOpState = generatePartitionableStateHandle(id1.f0, i, 2, 8, false);
            OperatorStateHandle expectedRawOpState = generatePartitionableStateHandle(id1.f0, i, 2, 8, true);
            Collection<OperatorStateHandle> managedOperatorState = opState.getManagedOperatorState();
            assertEquals(1, managedOperatorState.size());
            assertTrue(CommonTestUtils.isStreamContentEqual(expectedManagedOpState.openInputStream(), managedOperatorState.iterator().next().openInputStream()));
            Collection<OperatorStateHandle> rawOperatorState = opState.getRawOperatorState();
            assertEquals(1, rawOperatorState.size());
            assertTrue(CommonTestUtils.isStreamContentEqual(expectedRawOpState.openInputStream(), rawOperatorState.iterator().next().openInputStream()));
        }
        // operator2
        {
            int operatorIndexInChain = 0;
            OperatorSubtaskState opState = stateSnapshot.getSubtaskStateByOperatorID(operatorIDs.get(operatorIndexInChain).getGeneratedOperatorID());
            OperatorStateHandle expectedManagedOpState = generatePartitionableStateHandle(id2.f0, i, 2, 8, false);
            OperatorStateHandle expectedRawOpState = generatePartitionableStateHandle(id2.f0, i, 2, 8, true);
            Collection<OperatorStateHandle> managedOperatorState = opState.getManagedOperatorState();
            assertEquals(1, managedOperatorState.size());
            assertTrue(CommonTestUtils.isStreamContentEqual(expectedManagedOpState.openInputStream(), managedOperatorState.iterator().next().openInputStream()));
            Collection<OperatorStateHandle> rawOperatorState = opState.getRawOperatorState();
            assertEquals(1, rawOperatorState.size());
            assertTrue(CommonTestUtils.isStreamContentEqual(expectedRawOpState.openInputStream(), rawOperatorState.iterator().next().openInputStream()));
        }
    }
    List<List<Collection<OperatorStateHandle>>> actualManagedOperatorStates = new ArrayList<>(newJobVertex2.getParallelism());
    List<List<Collection<OperatorStateHandle>>> actualRawOperatorStates = new ArrayList<>(newJobVertex2.getParallelism());
    for (int i = 0; i < newJobVertex2.getParallelism(); i++) {
        final List<OperatorIDPair> operatorIDs = newJobVertex2.getOperatorIDs();
        JobManagerTaskRestore taskRestore = newJobVertex2.getTaskVertices()[i].getCurrentExecutionAttempt().getTaskRestore();
        Assert.assertEquals(2L, taskRestore.getRestoreCheckpointId());
        TaskStateSnapshot stateSnapshot = taskRestore.getTaskStateSnapshot();
        // operator 3
        {
            int operatorIndexInChain = 1;
            OperatorSubtaskState opState = stateSnapshot.getSubtaskStateByOperatorID(operatorIDs.get(operatorIndexInChain).getGeneratedOperatorID());
            List<Collection<OperatorStateHandle>> actualSubManagedOperatorState = new ArrayList<>(1);
            actualSubManagedOperatorState.add(opState.getManagedOperatorState());
            List<Collection<OperatorStateHandle>> actualSubRawOperatorState = new ArrayList<>(1);
            actualSubRawOperatorState.add(opState.getRawOperatorState());
            actualManagedOperatorStates.add(actualSubManagedOperatorState);
            actualRawOperatorStates.add(actualSubRawOperatorState);
        }
        // operator 6
        {
            int operatorIndexInChain = 0;
            OperatorSubtaskState opState = stateSnapshot.getSubtaskStateByOperatorID(operatorIDs.get(operatorIndexInChain).getGeneratedOperatorID());
            assertTrue(opState.getManagedOperatorState().isEmpty());
            assertTrue(opState.getRawOperatorState().isEmpty());
        }
        KeyGroupsStateHandle originalKeyedStateBackend = generateKeyGroupState(id3.f0, newKeyGroupPartitions2.get(i), false);
        KeyGroupsStateHandle originalKeyedStateRaw = generateKeyGroupState(id3.f0, newKeyGroupPartitions2.get(i), true);
        OperatorSubtaskState headOpState = stateSnapshot.getSubtaskStateByOperatorID(operatorIDs.get(operatorIDs.size() - 1).getGeneratedOperatorID());
        Collection<KeyedStateHandle> keyedStateBackend = headOpState.getManagedKeyedState();
        Collection<KeyedStateHandle> keyGroupStateRaw = headOpState.getRawKeyedState();
        compareKeyedState(singletonList(originalKeyedStateBackend), keyedStateBackend);
        compareKeyedState(singletonList(originalKeyedStateRaw), keyGroupStateRaw);
    }
    comparePartitionableState(expectedManagedOperatorStates.get(0), actualManagedOperatorStates);
    comparePartitionableState(expectedRawOperatorStates.get(0), actualRawOperatorStates);
}
Also used : HashMap(java.util.HashMap) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) KeyGroupRange(org.apache.flink.runtime.state.KeyGroupRange) ArrayList(java.util.ArrayList) TestCompletedCheckpointStorageLocation(org.apache.flink.runtime.state.testutils.TestCompletedCheckpointStorageLocation) OperatorID(org.apache.flink.runtime.jobgraph.OperatorID) KeyedStateHandle(org.apache.flink.runtime.state.KeyedStateHandle) ChainedStateHandle(org.apache.flink.runtime.state.ChainedStateHandle) KeyGroupsStateHandle(org.apache.flink.runtime.state.KeyGroupsStateHandle) SharedStateRegistry(org.apache.flink.runtime.state.SharedStateRegistry) ExecutionJobVertex(org.apache.flink.runtime.executiongraph.ExecutionJobVertex) Collections.singletonList(java.util.Collections.singletonList) Collections.emptyList(java.util.Collections.emptyList) List(java.util.List) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) AcknowledgeCheckpoint(org.apache.flink.runtime.messages.checkpoint.AcknowledgeCheckpoint) CheckpointCoordinatorBuilder(org.apache.flink.runtime.checkpoint.CheckpointCoordinatorTestingUtils.CheckpointCoordinatorBuilder) ExecutionGraph(org.apache.flink.runtime.executiongraph.ExecutionGraph) Collection(java.util.Collection) OperatorStateHandle(org.apache.flink.runtime.state.OperatorStateHandle) OperatorIDPair(org.apache.flink.runtime.OperatorIDPair)

Example 13 with ExecutionGraph

use of org.apache.flink.runtime.executiongraph.ExecutionGraph in project flink by apache.

the class CheckpointCoordinatorRestoringTest method testRestoreFinishedStateWithoutInFlightData.

@Test
public void testRestoreFinishedStateWithoutInFlightData() throws Exception {
    // given: Operator with not empty states.
    OperatorIDPair op1 = OperatorIDPair.generatedIDOnly(new OperatorID());
    final JobVertexID jobVertexID = new JobVertexID();
    ExecutionGraph graph = new CheckpointCoordinatorTestingUtils.CheckpointExecutionGraphBuilder().addJobVertex(jobVertexID, 1, 1, singletonList(op1), true).build();
    CompletedCheckpointStore completedCheckpointStore = new EmbeddedCompletedCheckpointStore();
    Map<OperatorID, OperatorState> operatorStates = new HashMap<>();
    operatorStates.put(op1.getGeneratedOperatorID(), new FullyFinishedOperatorState(op1.getGeneratedOperatorID(), 1, 1));
    CompletedCheckpoint completedCheckpoint = new CompletedCheckpoint(graph.getJobID(), 2, System.currentTimeMillis(), System.currentTimeMillis() + 3000, operatorStates, Collections.emptyList(), CheckpointProperties.forCheckpoint(CheckpointRetentionPolicy.NEVER_RETAIN_AFTER_TERMINATION), new TestCompletedCheckpointStorageLocation());
    completedCheckpointStore.addCheckpointAndSubsumeOldestOne(completedCheckpoint, new CheckpointsCleaner(), () -> {
    });
    CheckpointCoordinator coord = new CheckpointCoordinatorBuilder().setExecutionGraph(graph).setCheckpointCoordinatorConfiguration(new CheckpointCoordinatorConfigurationBuilder().setCheckpointIdOfIgnoredInFlightData(2).build()).setCompletedCheckpointStore(completedCheckpointStore).build();
    ExecutionJobVertex vertex = graph.getJobVertex(jobVertexID);
    coord.restoreInitialCheckpointIfPresent(Collections.singleton(vertex));
    TaskStateSnapshot restoredState = vertex.getTaskVertices()[0].getCurrentExecutionAttempt().getTaskRestore().getTaskStateSnapshot();
    assertTrue(restoredState.isTaskDeployedAsFinished());
}
Also used : HashMap(java.util.HashMap) CheckpointCoordinatorConfigurationBuilder(org.apache.flink.runtime.jobgraph.tasks.CheckpointCoordinatorConfiguration.CheckpointCoordinatorConfigurationBuilder) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) TestCompletedCheckpointStorageLocation(org.apache.flink.runtime.state.testutils.TestCompletedCheckpointStorageLocation) OperatorID(org.apache.flink.runtime.jobgraph.OperatorID) CheckpointCoordinatorBuilder(org.apache.flink.runtime.checkpoint.CheckpointCoordinatorTestingUtils.CheckpointCoordinatorBuilder) ExecutionJobVertex(org.apache.flink.runtime.executiongraph.ExecutionJobVertex) ExecutionGraph(org.apache.flink.runtime.executiongraph.ExecutionGraph) OperatorIDPair(org.apache.flink.runtime.OperatorIDPair) Test(org.junit.Test)

Example 14 with ExecutionGraph

use of org.apache.flink.runtime.executiongraph.ExecutionGraph in project flink by apache.

the class CheckpointCoordinatorRestoringTest method testSuccessfulCheckpointsArePersistedToCompletedCheckpointStore.

private Collection<CompletedCheckpoint> testSuccessfulCheckpointsArePersistedToCompletedCheckpointStore(List<TestingVertex> vertices) throws Exception {
    final ExecutionGraph executionGraph = createExecutionGraph(vertices);
    final EmbeddedCompletedCheckpointStore store = new EmbeddedCompletedCheckpointStore();
    // set up the coordinator and validate the initial state
    final CheckpointCoordinator coordinator = new CheckpointCoordinatorBuilder().setExecutionGraph(executionGraph).setTimer(manuallyTriggeredScheduledExecutor).setCompletedCheckpointStore(store).build();
    // trigger the checkpoint
    coordinator.triggerCheckpoint(false);
    manuallyTriggeredScheduledExecutor.triggerAll();
    // we should have a single pending checkpoint
    assertEquals(1, coordinator.getPendingCheckpoints().size());
    final long checkpointId = Iterables.getOnlyElement(coordinator.getPendingCheckpoints().keySet());
    // acknowledge checkpoints from all vertex partitions
    for (TestingVertex vertex : vertices) {
        final ExecutionJobVertex executionVertex = Objects.requireNonNull(executionGraph.getJobVertex(vertex.getId()));
        acknowledgeCheckpoint(coordinator, executionGraph, executionVertex, checkpointId);
    }
    final List<CompletedCheckpoint> completedCheckpoints = coordinator.getSuccessfulCheckpoints();
    assertEquals(1, completedCheckpoints.size());
    // shutdown the store
    store.shutdown(JobStatus.SUSPENDED, new CheckpointsCleaner());
    return store.getAllCheckpoints();
}
Also used : ExecutionJobVertex(org.apache.flink.runtime.executiongraph.ExecutionJobVertex) ExecutionGraph(org.apache.flink.runtime.executiongraph.ExecutionGraph) CheckpointCoordinatorBuilder(org.apache.flink.runtime.checkpoint.CheckpointCoordinatorTestingUtils.CheckpointCoordinatorBuilder)

Example 15 with ExecutionGraph

use of org.apache.flink.runtime.executiongraph.ExecutionGraph in project flink by apache.

the class CheckpointCoordinatorRestoringTest method testRestoreLatestCheckpointedState.

private void testRestoreLatestCheckpointedState(List<TestingVertex> vertices, Collection<CompletedCheckpoint> completedCheckpoints) throws Exception {
    final ExecutionGraph executionGraph = createExecutionGraph(vertices);
    final EmbeddedCompletedCheckpointStore store = new EmbeddedCompletedCheckpointStore(completedCheckpoints.size(), completedCheckpoints);
    // set up the coordinator and validate the initial state
    final CheckpointCoordinator coordinator = new CheckpointCoordinatorBuilder().setExecutionGraph(executionGraph).setTimer(manuallyTriggeredScheduledExecutor).setCompletedCheckpointStore(store).build();
    final Set<ExecutionJobVertex> executionVertices = vertices.stream().map(TestingVertex::getId).map(executionGraph::getJobVertex).collect(Collectors.toSet());
    assertTrue(coordinator.restoreLatestCheckpointedStateToAll(executionVertices, false));
    // validate that all shared states are registered again after the recovery.
    for (CompletedCheckpoint completedCheckpoint : completedCheckpoints) {
        for (OperatorState taskState : completedCheckpoint.getOperatorStates().values()) {
            for (OperatorSubtaskState subtaskState : taskState.getStates()) {
                verify(subtaskState, times(2)).registerSharedStates(any(SharedStateRegistry.class), eq(completedCheckpoint.getCheckpointID()));
            }
        }
    }
    // verify the restored state
    for (ExecutionJobVertex executionVertex : executionVertices) {
        verifyStateRestore(executionVertex);
    }
}
Also used : ExecutionJobVertex(org.apache.flink.runtime.executiongraph.ExecutionJobVertex) ExecutionGraph(org.apache.flink.runtime.executiongraph.ExecutionGraph) CheckpointCoordinatorBuilder(org.apache.flink.runtime.checkpoint.CheckpointCoordinatorTestingUtils.CheckpointCoordinatorBuilder) SharedStateRegistry(org.apache.flink.runtime.state.SharedStateRegistry)

Aggregations

ExecutionGraph (org.apache.flink.runtime.executiongraph.ExecutionGraph)120 Test (org.junit.Test)96 JobVertexID (org.apache.flink.runtime.jobgraph.JobVertexID)77 ExecutionVertex (org.apache.flink.runtime.executiongraph.ExecutionVertex)53 CheckpointCoordinatorBuilder (org.apache.flink.runtime.checkpoint.CheckpointCoordinatorTestingUtils.CheckpointCoordinatorBuilder)40 ExecutionAttemptID (org.apache.flink.runtime.executiongraph.ExecutionAttemptID)36 AcknowledgeCheckpoint (org.apache.flink.runtime.messages.checkpoint.AcknowledgeCheckpoint)35 ExecutionJobVertex (org.apache.flink.runtime.executiongraph.ExecutionJobVertex)31 JobVertex (org.apache.flink.runtime.jobgraph.JobVertex)24 OperatorID (org.apache.flink.runtime.jobgraph.OperatorID)24 HashMap (java.util.HashMap)20 CompletableFuture (java.util.concurrent.CompletableFuture)19 JobID (org.apache.flink.api.common.JobID)19 ArrayList (java.util.ArrayList)17 HashSet (java.util.HashSet)17 JobGraph (org.apache.flink.runtime.jobgraph.JobGraph)17 DeclineCheckpoint (org.apache.flink.runtime.messages.checkpoint.DeclineCheckpoint)17 ExecutionException (java.util.concurrent.ExecutionException)13 Executor (java.util.concurrent.Executor)13 IOException (java.io.IOException)12