Search in sources :

Example 1 with TaskStateManager

use of org.apache.flink.runtime.state.TaskStateManager in project flink by apache.

the class MemoryStateBackend method createKeyedStateBackend.

@Override
public <K> AbstractKeyedStateBackend<K> createKeyedStateBackend(Environment env, JobID jobID, String operatorIdentifier, TypeSerializer<K> keySerializer, int numberOfKeyGroups, KeyGroupRange keyGroupRange, TaskKvStateRegistry kvStateRegistry, TtlTimeProvider ttlTimeProvider, MetricGroup metricGroup, @Nonnull Collection<KeyedStateHandle> stateHandles, CloseableRegistry cancelStreamRegistry) throws BackendBuildingException {
    TaskStateManager taskStateManager = env.getTaskStateManager();
    HeapPriorityQueueSetFactory priorityQueueSetFactory = new HeapPriorityQueueSetFactory(keyGroupRange, numberOfKeyGroups, 128);
    LatencyTrackingStateConfig latencyTrackingStateConfig = latencyTrackingConfigBuilder.setMetricGroup(metricGroup).build();
    return new HeapKeyedStateBackendBuilder<>(kvStateRegistry, keySerializer, env.getUserCodeClassLoader().asClassLoader(), numberOfKeyGroups, keyGroupRange, env.getExecutionConfig(), ttlTimeProvider, latencyTrackingStateConfig, stateHandles, AbstractStateBackend.getCompressionDecorator(env.getExecutionConfig()), taskStateManager.createLocalRecoveryConfig(), priorityQueueSetFactory, isUsingAsynchronousSnapshots(), cancelStreamRegistry).build();
}
Also used : HeapPriorityQueueSetFactory(org.apache.flink.runtime.state.heap.HeapPriorityQueueSetFactory) LatencyTrackingStateConfig(org.apache.flink.runtime.state.metrics.LatencyTrackingStateConfig) TaskStateManager(org.apache.flink.runtime.state.TaskStateManager)

Example 2 with TaskStateManager

use of org.apache.flink.runtime.state.TaskStateManager in project flink by apache.

the class TaskExecutor method submitTask.

// ----------------------------------------------------------------------
// Task lifecycle RPCs
// ----------------------------------------------------------------------
@Override
public CompletableFuture<Acknowledge> submitTask(TaskDeploymentDescriptor tdd, JobMasterId jobMasterId, Time timeout) {
    try {
        final JobID jobId = tdd.getJobId();
        final ExecutionAttemptID executionAttemptID = tdd.getExecutionAttemptId();
        final JobTable.Connection jobManagerConnection = jobTable.getConnection(jobId).orElseThrow(() -> {
            final String message = "Could not submit task because there is no JobManager " + "associated for the job " + jobId + '.';
            log.debug(message);
            return new TaskSubmissionException(message);
        });
        if (!Objects.equals(jobManagerConnection.getJobMasterId(), jobMasterId)) {
            final String message = "Rejecting the task submission because the job manager leader id " + jobMasterId + " does not match the expected job manager leader id " + jobManagerConnection.getJobMasterId() + '.';
            log.debug(message);
            throw new TaskSubmissionException(message);
        }
        if (!taskSlotTable.tryMarkSlotActive(jobId, tdd.getAllocationId())) {
            final String message = "No task slot allocated for job ID " + jobId + " and allocation ID " + tdd.getAllocationId() + '.';
            log.debug(message);
            throw new TaskSubmissionException(message);
        }
        // re-integrate offloaded data:
        try {
            tdd.loadBigData(taskExecutorBlobService.getPermanentBlobService());
        } catch (IOException | ClassNotFoundException e) {
            throw new TaskSubmissionException("Could not re-integrate offloaded TaskDeploymentDescriptor data.", e);
        }
        // deserialize the pre-serialized information
        final JobInformation jobInformation;
        final TaskInformation taskInformation;
        try {
            jobInformation = tdd.getSerializedJobInformation().deserializeValue(getClass().getClassLoader());
            taskInformation = tdd.getSerializedTaskInformation().deserializeValue(getClass().getClassLoader());
        } catch (IOException | ClassNotFoundException e) {
            throw new TaskSubmissionException("Could not deserialize the job or task information.", e);
        }
        if (!jobId.equals(jobInformation.getJobId())) {
            throw new TaskSubmissionException("Inconsistent job ID information inside TaskDeploymentDescriptor (" + tdd.getJobId() + " vs. " + jobInformation.getJobId() + ")");
        }
        TaskManagerJobMetricGroup jobGroup = taskManagerMetricGroup.addJob(jobInformation.getJobId(), jobInformation.getJobName());
        // note that a pre-existing job group can NOT be closed concurrently - this is done by
        // the same TM thread in removeJobMetricsGroup
        TaskMetricGroup taskMetricGroup = jobGroup.addTask(taskInformation.getJobVertexId(), tdd.getExecutionAttemptId(), taskInformation.getTaskName(), tdd.getSubtaskIndex(), tdd.getAttemptNumber());
        InputSplitProvider inputSplitProvider = new RpcInputSplitProvider(jobManagerConnection.getJobManagerGateway(), taskInformation.getJobVertexId(), tdd.getExecutionAttemptId(), taskManagerConfiguration.getRpcTimeout());
        final TaskOperatorEventGateway taskOperatorEventGateway = new RpcTaskOperatorEventGateway(jobManagerConnection.getJobManagerGateway(), executionAttemptID, (t) -> runAsync(() -> failTask(executionAttemptID, t)));
        TaskManagerActions taskManagerActions = jobManagerConnection.getTaskManagerActions();
        CheckpointResponder checkpointResponder = jobManagerConnection.getCheckpointResponder();
        GlobalAggregateManager aggregateManager = jobManagerConnection.getGlobalAggregateManager();
        LibraryCacheManager.ClassLoaderHandle classLoaderHandle = jobManagerConnection.getClassLoaderHandle();
        ResultPartitionConsumableNotifier resultPartitionConsumableNotifier = jobManagerConnection.getResultPartitionConsumableNotifier();
        PartitionProducerStateChecker partitionStateChecker = jobManagerConnection.getPartitionStateChecker();
        final TaskLocalStateStore localStateStore = localStateStoresManager.localStateStoreForSubtask(jobId, tdd.getAllocationId(), taskInformation.getJobVertexId(), tdd.getSubtaskIndex());
        // TODO: Pass config value from user program and do overriding here.
        final StateChangelogStorage<?> changelogStorage;
        try {
            changelogStorage = changelogStoragesManager.stateChangelogStorageForJob(jobId, taskManagerConfiguration.getConfiguration(), jobGroup);
        } catch (IOException e) {
            throw new TaskSubmissionException(e);
        }
        final JobManagerTaskRestore taskRestore = tdd.getTaskRestore();
        final TaskStateManager taskStateManager = new TaskStateManagerImpl(jobId, tdd.getExecutionAttemptId(), localStateStore, changelogStorage, taskRestore, checkpointResponder);
        MemoryManager memoryManager;
        try {
            memoryManager = taskSlotTable.getTaskMemoryManager(tdd.getAllocationId());
        } catch (SlotNotFoundException e) {
            throw new TaskSubmissionException("Could not submit task.", e);
        }
        Task task = new Task(jobInformation, taskInformation, tdd.getExecutionAttemptId(), tdd.getAllocationId(), tdd.getSubtaskIndex(), tdd.getAttemptNumber(), tdd.getProducedPartitions(), tdd.getInputGates(), memoryManager, taskExecutorServices.getIOManager(), taskExecutorServices.getShuffleEnvironment(), taskExecutorServices.getKvStateService(), taskExecutorServices.getBroadcastVariableManager(), taskExecutorServices.getTaskEventDispatcher(), externalResourceInfoProvider, taskStateManager, taskManagerActions, inputSplitProvider, checkpointResponder, taskOperatorEventGateway, aggregateManager, classLoaderHandle, fileCache, taskManagerConfiguration, taskMetricGroup, resultPartitionConsumableNotifier, partitionStateChecker, getRpcService().getScheduledExecutor());
        taskMetricGroup.gauge(MetricNames.IS_BACK_PRESSURED, task::isBackPressured);
        log.info("Received task {} ({}), deploy into slot with allocation id {}.", task.getTaskInfo().getTaskNameWithSubtasks(), tdd.getExecutionAttemptId(), tdd.getAllocationId());
        boolean taskAdded;
        try {
            taskAdded = taskSlotTable.addTask(task);
        } catch (SlotNotFoundException | SlotNotActiveException e) {
            throw new TaskSubmissionException("Could not submit task.", e);
        }
        if (taskAdded) {
            task.startTaskThread();
            setupResultPartitionBookkeeping(tdd.getJobId(), tdd.getProducedPartitions(), task.getTerminationFuture());
            return CompletableFuture.completedFuture(Acknowledge.get());
        } else {
            final String message = "TaskManager already contains a task for id " + task.getExecutionId() + '.';
            log.debug(message);
            throw new TaskSubmissionException(message);
        }
    } catch (TaskSubmissionException e) {
        return FutureUtils.completedExceptionally(e);
    }
}
Also used : SlotNotFoundException(org.apache.flink.runtime.taskexecutor.slot.SlotNotFoundException) TaskStateManagerImpl(org.apache.flink.runtime.state.TaskStateManagerImpl) Task(org.apache.flink.runtime.taskmanager.Task) SlotNotActiveException(org.apache.flink.runtime.taskexecutor.slot.SlotNotActiveException) RpcInputSplitProvider(org.apache.flink.runtime.taskexecutor.rpc.RpcInputSplitProvider) RpcTaskOperatorEventGateway(org.apache.flink.runtime.taskexecutor.rpc.RpcTaskOperatorEventGateway) TaskOperatorEventGateway(org.apache.flink.runtime.jobgraph.tasks.TaskOperatorEventGateway) JobManagerTaskRestore(org.apache.flink.runtime.checkpoint.JobManagerTaskRestore) RpcTaskOperatorEventGateway(org.apache.flink.runtime.taskexecutor.rpc.RpcTaskOperatorEventGateway) TaskManagerActions(org.apache.flink.runtime.taskmanager.TaskManagerActions) TaskSubmissionException(org.apache.flink.runtime.taskexecutor.exceptions.TaskSubmissionException) InputSplitProvider(org.apache.flink.runtime.jobgraph.tasks.InputSplitProvider) RpcInputSplitProvider(org.apache.flink.runtime.taskexecutor.rpc.RpcInputSplitProvider) ResultPartitionConsumableNotifier(org.apache.flink.runtime.io.network.partition.ResultPartitionConsumableNotifier) RpcResultPartitionConsumableNotifier(org.apache.flink.runtime.taskexecutor.rpc.RpcResultPartitionConsumableNotifier) JobInformation(org.apache.flink.runtime.executiongraph.JobInformation) ExecutionAttemptID(org.apache.flink.runtime.executiongraph.ExecutionAttemptID) TaskInformation(org.apache.flink.runtime.executiongraph.TaskInformation) TaskLocalStateStore(org.apache.flink.runtime.state.TaskLocalStateStore) TaskMetricGroup(org.apache.flink.runtime.metrics.groups.TaskMetricGroup) RpcCheckpointResponder(org.apache.flink.runtime.taskexecutor.rpc.RpcCheckpointResponder) CheckpointResponder(org.apache.flink.runtime.taskmanager.CheckpointResponder) TaskManagerJobMetricGroup(org.apache.flink.runtime.metrics.groups.TaskManagerJobMetricGroup) IOException(java.io.IOException) LibraryCacheManager(org.apache.flink.runtime.execution.librarycache.LibraryCacheManager) TaskStateManager(org.apache.flink.runtime.state.TaskStateManager) MemoryManager(org.apache.flink.runtime.memory.MemoryManager) RpcGlobalAggregateManager(org.apache.flink.runtime.taskexecutor.rpc.RpcGlobalAggregateManager) JobID(org.apache.flink.api.common.JobID)

Example 3 with TaskStateManager

use of org.apache.flink.runtime.state.TaskStateManager in project flink by apache.

the class StateInitializationContextImplTest method setUp.

@Before
public void setUp() throws Exception {
    this.writtenKeyGroups = 0;
    this.writtenOperatorStates = new HashSet<>();
    this.closableRegistry = new CloseableRegistry();
    ByteArrayOutputStreamWithPos out = new ByteArrayOutputStreamWithPos(64);
    List<KeyedStateHandle> keyedStateHandles = new ArrayList<>(NUM_HANDLES);
    int prev = 0;
    for (int i = 0; i < NUM_HANDLES; ++i) {
        out.reset();
        int size = i % 4;
        int end = prev + size;
        DataOutputView dov = new DataOutputViewStreamWrapper(out);
        KeyGroupRangeOffsets offsets = new KeyGroupRangeOffsets(i == 9 ? KeyGroupRange.EMPTY_KEY_GROUP_RANGE : new KeyGroupRange(prev, end));
        prev = end + 1;
        for (int kg : offsets.getKeyGroupRange()) {
            offsets.setKeyGroupOffset(kg, out.getPosition());
            dov.writeInt(kg);
            ++writtenKeyGroups;
        }
        KeyedStateHandle handle = new KeyGroupsStateHandle(offsets, new ByteStateHandleCloseChecking("kg-" + i, out.toByteArray()));
        keyedStateHandles.add(handle);
    }
    List<OperatorStateHandle> operatorStateHandles = new ArrayList<>(NUM_HANDLES);
    for (int i = 0; i < NUM_HANDLES; ++i) {
        int size = i % 4;
        out.reset();
        DataOutputView dov = new DataOutputViewStreamWrapper(out);
        LongArrayList offsets = new LongArrayList(size);
        for (int s = 0; s < size; ++s) {
            offsets.add(out.getPosition());
            int val = i * NUM_HANDLES + s;
            dov.writeInt(val);
            writtenOperatorStates.add(val);
        }
        Map<String, OperatorStateHandle.StateMetaInfo> offsetsMap = new HashMap<>();
        offsetsMap.put(DefaultOperatorStateBackend.DEFAULT_OPERATOR_STATE_NAME, new OperatorStateHandle.StateMetaInfo(offsets.toArray(), OperatorStateHandle.Mode.SPLIT_DISTRIBUTE));
        OperatorStateHandle operatorStateHandle = new OperatorStreamStateHandle(offsetsMap, new ByteStateHandleCloseChecking("os-" + i, out.toByteArray()));
        operatorStateHandles.add(operatorStateHandle);
    }
    OperatorSubtaskState operatorSubtaskState = OperatorSubtaskState.builder().setRawOperatorState(new StateObjectCollection<>(operatorStateHandles)).setRawKeyedState(new StateObjectCollection<>(keyedStateHandles)).build();
    OperatorID operatorID = new OperatorID();
    TaskStateSnapshot taskStateSnapshot = new TaskStateSnapshot();
    taskStateSnapshot.putSubtaskStateByOperatorID(operatorID, operatorSubtaskState);
    JobManagerTaskRestore jobManagerTaskRestore = new JobManagerTaskRestore(0L, taskStateSnapshot);
    TaskStateManager manager = new TaskStateManagerImpl(new JobID(), new ExecutionAttemptID(), new TestTaskLocalStateStore(), new InMemoryStateChangelogStorage(), jobManagerTaskRestore, mock(CheckpointResponder.class));
    DummyEnvironment environment = new DummyEnvironment("test", 1, 0, prev);
    environment.setTaskStateManager(manager);
    StateBackend stateBackend = new MemoryStateBackend(1024);
    StreamTaskStateInitializer streamTaskStateManager = new StreamTaskStateInitializerImpl(environment, stateBackend, TtlTimeProvider.DEFAULT, new InternalTimeServiceManager.Provider() {

        @Override
        public <K> InternalTimeServiceManager<K> create(CheckpointableKeyedStateBackend<K> keyedStatedBackend, ClassLoader userClassloader, KeyContext keyContext, ProcessingTimeService processingTimeService, Iterable<KeyGroupStatePartitionStreamProvider> rawKeyedStates) throws Exception {
            // stream.
            return null;
        }
    });
    AbstractStreamOperator<?> mockOperator = mock(AbstractStreamOperator.class);
    when(mockOperator.getOperatorID()).thenReturn(operatorID);
    StreamOperatorStateContext stateContext = streamTaskStateManager.streamOperatorStateContext(operatorID, "TestOperatorClass", mock(ProcessingTimeService.class), mockOperator, // consumed by the timer service.
    IntSerializer.INSTANCE, closableRegistry, new UnregisteredMetricsGroup(), 1.0, false);
    OptionalLong restoredCheckpointId = stateContext.getRestoredCheckpointId();
    this.initializationContext = new StateInitializationContextImpl(restoredCheckpointId.isPresent() ? restoredCheckpointId.getAsLong() : null, stateContext.operatorStateBackend(), mock(KeyedStateStore.class), stateContext.rawKeyedStateInputs(), stateContext.rawOperatorStateInputs());
}
Also used : HashMap(java.util.HashMap) KeyGroupRangeOffsets(org.apache.flink.runtime.state.KeyGroupRangeOffsets) LongArrayList(org.apache.flink.runtime.util.LongArrayList) ArrayList(java.util.ArrayList) KeyGroupRange(org.apache.flink.runtime.state.KeyGroupRange) CloseableRegistry(org.apache.flink.core.fs.CloseableRegistry) OperatorSubtaskState(org.apache.flink.runtime.checkpoint.OperatorSubtaskState) StateBackend(org.apache.flink.runtime.state.StateBackend) DefaultOperatorStateBackend(org.apache.flink.runtime.state.DefaultOperatorStateBackend) MemoryStateBackend(org.apache.flink.runtime.state.memory.MemoryStateBackend) CheckpointableKeyedStateBackend(org.apache.flink.runtime.state.CheckpointableKeyedStateBackend) KeyGroupStatePartitionStreamProvider(org.apache.flink.runtime.state.KeyGroupStatePartitionStreamProvider) InMemoryStateChangelogStorage(org.apache.flink.runtime.state.changelog.inmemory.InMemoryStateChangelogStorage) ByteArrayOutputStreamWithPos(org.apache.flink.core.memory.ByteArrayOutputStreamWithPos) TestTaskLocalStateStore(org.apache.flink.runtime.state.TestTaskLocalStateStore) DataOutputView(org.apache.flink.core.memory.DataOutputView) StateObjectCollection(org.apache.flink.runtime.checkpoint.StateObjectCollection) OperatorStreamStateHandle(org.apache.flink.runtime.state.OperatorStreamStateHandle) StateInitializationContextImpl(org.apache.flink.runtime.state.StateInitializationContextImpl) OperatorStateHandle(org.apache.flink.runtime.state.OperatorStateHandle) JobID(org.apache.flink.api.common.JobID) TaskStateManagerImpl(org.apache.flink.runtime.state.TaskStateManagerImpl) UnregisteredMetricsGroup(org.apache.flink.metrics.groups.UnregisteredMetricsGroup) MemoryStateBackend(org.apache.flink.runtime.state.memory.MemoryStateBackend) JobManagerTaskRestore(org.apache.flink.runtime.checkpoint.JobManagerTaskRestore) OperatorID(org.apache.flink.runtime.jobgraph.OperatorID) KeyedStateHandle(org.apache.flink.runtime.state.KeyedStateHandle) KeyGroupsStateHandle(org.apache.flink.runtime.state.KeyGroupsStateHandle) TaskStateSnapshot(org.apache.flink.runtime.checkpoint.TaskStateSnapshot) ProcessingTimeService(org.apache.flink.streaming.runtime.tasks.ProcessingTimeService) ExecutionAttemptID(org.apache.flink.runtime.executiongraph.ExecutionAttemptID) LongArrayList(org.apache.flink.runtime.util.LongArrayList) CheckpointResponder(org.apache.flink.runtime.taskmanager.CheckpointResponder) DummyEnvironment(org.apache.flink.runtime.operators.testutils.DummyEnvironment) TaskStateManager(org.apache.flink.runtime.state.TaskStateManager) IOException(java.io.IOException) DataOutputViewStreamWrapper(org.apache.flink.core.memory.DataOutputViewStreamWrapper) OptionalLong(java.util.OptionalLong) Before(org.junit.Before)

Example 4 with TaskStateManager

use of org.apache.flink.runtime.state.TaskStateManager in project flink by apache.

the class StreamTaskStateInitializerImplTest method streamTaskStateManager.

private StreamTaskStateInitializer streamTaskStateManager(StateBackend stateBackend, JobManagerTaskRestore jobManagerTaskRestore, boolean createTimerServiceManager) {
    JobID jobID = new JobID(42L, 43L);
    ExecutionAttemptID executionAttemptID = new ExecutionAttemptID();
    TestCheckpointResponder checkpointResponderMock = new TestCheckpointResponder();
    TaskLocalStateStore taskLocalStateStore = new TestTaskLocalStateStore();
    InMemoryStateChangelogStorage changelogStorage = new InMemoryStateChangelogStorage();
    TaskStateManager taskStateManager = TaskStateManagerImplTest.taskStateManager(jobID, executionAttemptID, checkpointResponderMock, jobManagerTaskRestore, taskLocalStateStore, changelogStorage);
    DummyEnvironment dummyEnvironment = new DummyEnvironment("test-task", 1, 0);
    dummyEnvironment.setTaskStateManager(taskStateManager);
    if (createTimerServiceManager) {
        return new StreamTaskStateInitializerImpl(dummyEnvironment, stateBackend);
    } else {
        return new StreamTaskStateInitializerImpl(dummyEnvironment, stateBackend, TtlTimeProvider.DEFAULT, new InternalTimeServiceManager.Provider() {

            @Override
            public <K> InternalTimeServiceManager<K> create(CheckpointableKeyedStateBackend<K> keyedStatedBackend, ClassLoader userClassloader, KeyContext keyContext, ProcessingTimeService processingTimeService, Iterable<KeyGroupStatePartitionStreamProvider> rawKeyedStates) throws Exception {
                return null;
            }
        });
    }
}
Also used : TestTaskLocalStateStore(org.apache.flink.runtime.state.TestTaskLocalStateStore) ExecutionAttemptID(org.apache.flink.runtime.executiongraph.ExecutionAttemptID) TaskLocalStateStore(org.apache.flink.runtime.state.TaskLocalStateStore) TestTaskLocalStateStore(org.apache.flink.runtime.state.TestTaskLocalStateStore) DummyEnvironment(org.apache.flink.runtime.operators.testutils.DummyEnvironment) TaskStateManager(org.apache.flink.runtime.state.TaskStateManager) KeyGroupStatePartitionStreamProvider(org.apache.flink.runtime.state.KeyGroupStatePartitionStreamProvider) InMemoryStateChangelogStorage(org.apache.flink.runtime.state.changelog.inmemory.InMemoryStateChangelogStorage) TestProcessingTimeService(org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService) ProcessingTimeService(org.apache.flink.streaming.runtime.tasks.ProcessingTimeService) TestCheckpointResponder(org.apache.flink.runtime.taskmanager.TestCheckpointResponder) JobID(org.apache.flink.api.common.JobID)

Example 5 with TaskStateManager

use of org.apache.flink.runtime.state.TaskStateManager in project flink by apache.

the class StreamTaskTest method testAsyncCheckpointingConcurrentCloseAfterAcknowledge.

/**
 * FLINK-5667
 *
 * <p>Tests that a concurrent cancel operation does not discard the state handles of an
 * acknowledged checkpoint. The situation can only happen if the cancel call is executed after
 * Environment.acknowledgeCheckpoint() and before the CloseableRegistry.unregisterClosable()
 * call.
 */
@Test
public void testAsyncCheckpointingConcurrentCloseAfterAcknowledge() throws Exception {
    final OneShotLatch acknowledgeCheckpointLatch = new OneShotLatch();
    final OneShotLatch completeAcknowledge = new OneShotLatch();
    CheckpointResponder checkpointResponder = mock(CheckpointResponder.class);
    doAnswer(new Answer() {

        @Override
        public Object answer(InvocationOnMock invocation) {
            acknowledgeCheckpointLatch.trigger();
            // block here so that we can issue the concurrent cancel call
            while (true) {
                try {
                    // wait until we successfully await (no pun intended)
                    completeAcknowledge.await();
                    // when await() returns normally, we break out of the loop
                    break;
                } catch (InterruptedException e) {
                // survive interruptions that arise from thread pool
                // shutdown
                // production code cannot actually throw
                // InterruptedException from
                // checkpoint acknowledgement
                }
            }
            return null;
        }
    }).when(checkpointResponder).acknowledgeCheckpoint(any(JobID.class), any(ExecutionAttemptID.class), anyLong(), any(CheckpointMetrics.class), any(TaskStateSnapshot.class));
    TaskStateManager taskStateManager = new TaskStateManagerImpl(new JobID(1L, 2L), new ExecutionAttemptID(), mock(TaskLocalStateStoreImpl.class), new InMemoryStateChangelogStorage(), null, checkpointResponder);
    KeyedStateHandle managedKeyedStateHandle = mock(KeyedStateHandle.class);
    KeyedStateHandle rawKeyedStateHandle = mock(KeyedStateHandle.class);
    OperatorStateHandle managedOperatorStateHandle = mock(OperatorStreamStateHandle.class);
    OperatorStateHandle rawOperatorStateHandle = mock(OperatorStreamStateHandle.class);
    OperatorSnapshotFutures operatorSnapshotResult = new OperatorSnapshotFutures(DoneFuture.of(SnapshotResult.of(managedKeyedStateHandle)), DoneFuture.of(SnapshotResult.of(rawKeyedStateHandle)), DoneFuture.of(SnapshotResult.of(managedOperatorStateHandle)), DoneFuture.of(SnapshotResult.of(rawOperatorStateHandle)), DoneFuture.of(SnapshotResult.empty()), DoneFuture.of(SnapshotResult.empty()));
    try (MockEnvironment mockEnvironment = new MockEnvironmentBuilder().setTaskName("mock-task").setTaskStateManager(taskStateManager).build()) {
        RunningTask<MockStreamTask> task = runTask(() -> createMockStreamTask(mockEnvironment, operatorChain(streamOperatorWithSnapshot(operatorSnapshotResult))));
        MockStreamTask streamTask = task.streamTask;
        waitTaskIsRunning(streamTask, task.invocationFuture);
        final long checkpointId = 42L;
        streamTask.triggerCheckpointAsync(new CheckpointMetaData(checkpointId, 1L), CheckpointOptions.forCheckpointWithDefaultLocation());
        acknowledgeCheckpointLatch.await();
        ArgumentCaptor<TaskStateSnapshot> subtaskStateCaptor = ArgumentCaptor.forClass(TaskStateSnapshot.class);
        // check that the checkpoint has been completed
        verify(checkpointResponder).acknowledgeCheckpoint(any(JobID.class), any(ExecutionAttemptID.class), eq(checkpointId), any(CheckpointMetrics.class), subtaskStateCaptor.capture());
        TaskStateSnapshot subtaskStates = subtaskStateCaptor.getValue();
        OperatorSubtaskState subtaskState = subtaskStates.getSubtaskStateMappings().iterator().next().getValue();
        // check that the subtask state contains the expected state handles
        assertEquals(singleton(managedKeyedStateHandle), subtaskState.getManagedKeyedState());
        assertEquals(singleton(rawKeyedStateHandle), subtaskState.getRawKeyedState());
        assertEquals(singleton(managedOperatorStateHandle), subtaskState.getManagedOperatorState());
        assertEquals(singleton(rawOperatorStateHandle), subtaskState.getRawOperatorState());
        // check that the state handles have not been discarded
        verify(managedKeyedStateHandle, never()).discardState();
        verify(rawKeyedStateHandle, never()).discardState();
        verify(managedOperatorStateHandle, never()).discardState();
        verify(rawOperatorStateHandle, never()).discardState();
        streamTask.cancel();
        completeAcknowledge.trigger();
        // canceling the stream task after it has acknowledged the checkpoint should not discard
        // the state handles
        verify(managedKeyedStateHandle, never()).discardState();
        verify(rawKeyedStateHandle, never()).discardState();
        verify(managedOperatorStateHandle, never()).discardState();
        verify(rawOperatorStateHandle, never()).discardState();
        task.waitForTaskCompletion(true);
    }
}
Also used : OperatorSnapshotFutures(org.apache.flink.streaming.api.operators.OperatorSnapshotFutures) TaskStateManagerImpl(org.apache.flink.runtime.state.TaskStateManagerImpl) MockEnvironmentBuilder(org.apache.flink.runtime.operators.testutils.MockEnvironmentBuilder) CheckpointMetrics(org.apache.flink.runtime.checkpoint.CheckpointMetrics) KeyedStateHandle(org.apache.flink.runtime.state.KeyedStateHandle) OperatorSubtaskState(org.apache.flink.runtime.checkpoint.OperatorSubtaskState) TaskStateSnapshot(org.apache.flink.runtime.checkpoint.TaskStateSnapshot) InMemoryStateChangelogStorage(org.apache.flink.runtime.state.changelog.inmemory.InMemoryStateChangelogStorage) MockEnvironment(org.apache.flink.runtime.operators.testutils.MockEnvironment) TaskLocalStateStoreImpl(org.apache.flink.runtime.state.TaskLocalStateStoreImpl) OneShotLatch(org.apache.flink.core.testutils.OneShotLatch) ExecutionAttemptID(org.apache.flink.runtime.executiongraph.ExecutionAttemptID) CheckpointResponder(org.apache.flink.runtime.taskmanager.CheckpointResponder) TaskStateManager(org.apache.flink.runtime.state.TaskStateManager) CheckpointMetaData(org.apache.flink.runtime.checkpoint.CheckpointMetaData) Mockito.doAnswer(org.mockito.Mockito.doAnswer) Answer(org.mockito.stubbing.Answer) InvocationOnMock(org.mockito.invocation.InvocationOnMock) OperatorStateHandle(org.apache.flink.runtime.state.OperatorStateHandle) JobID(org.apache.flink.api.common.JobID) Test(org.junit.Test)

Aggregations

TaskStateManager (org.apache.flink.runtime.state.TaskStateManager)8 JobID (org.apache.flink.api.common.JobID)5 ExecutionAttemptID (org.apache.flink.runtime.executiongraph.ExecutionAttemptID)5 TaskStateManagerImpl (org.apache.flink.runtime.state.TaskStateManagerImpl)4 InMemoryStateChangelogStorage (org.apache.flink.runtime.state.changelog.inmemory.InMemoryStateChangelogStorage)4 CheckpointResponder (org.apache.flink.runtime.taskmanager.CheckpointResponder)4 OperatorSubtaskState (org.apache.flink.runtime.checkpoint.OperatorSubtaskState)3 TaskStateSnapshot (org.apache.flink.runtime.checkpoint.TaskStateSnapshot)3 IOException (java.io.IOException)2 ArrayList (java.util.ArrayList)2 OneShotLatch (org.apache.flink.core.testutils.OneShotLatch)2 CheckpointMetaData (org.apache.flink.runtime.checkpoint.CheckpointMetaData)2 CheckpointMetrics (org.apache.flink.runtime.checkpoint.CheckpointMetrics)2 JobManagerTaskRestore (org.apache.flink.runtime.checkpoint.JobManagerTaskRestore)2 DummyEnvironment (org.apache.flink.runtime.operators.testutils.DummyEnvironment)2 MockEnvironment (org.apache.flink.runtime.operators.testutils.MockEnvironment)2 MockEnvironmentBuilder (org.apache.flink.runtime.operators.testutils.MockEnvironmentBuilder)2 KeyGroupStatePartitionStreamProvider (org.apache.flink.runtime.state.KeyGroupStatePartitionStreamProvider)2 KeyedStateHandle (org.apache.flink.runtime.state.KeyedStateHandle)2 LocalRecoveryConfig (org.apache.flink.runtime.state.LocalRecoveryConfig)2