Search in sources :

Example 21 with KeyedStateHandle

use of org.apache.flink.runtime.state.KeyedStateHandle in project flink by apache.

the class InterruptSensitiveRestoreTest method createTask.

// ------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------
private static Task createTask(StreamConfig streamConfig, Configuration taskConfig, StreamStateHandle state, int mode) throws IOException {
    ShuffleEnvironment<?, ?> shuffleEnvironment = new NettyShuffleEnvironmentBuilder().build();
    Collection<KeyedStateHandle> keyedStateFromBackend = Collections.emptyList();
    Collection<KeyedStateHandle> keyedStateFromStream = Collections.emptyList();
    Collection<OperatorStateHandle> operatorStateBackend = Collections.emptyList();
    Collection<OperatorStateHandle> operatorStateStream = Collections.emptyList();
    Map<String, OperatorStateHandle.StateMetaInfo> operatorStateMetadata = new HashMap<>(1);
    OperatorStateHandle.StateMetaInfo metaInfo = new OperatorStateHandle.StateMetaInfo(new long[] { 0 }, OperatorStateHandle.Mode.SPLIT_DISTRIBUTE);
    operatorStateMetadata.put(DefaultOperatorStateBackend.DEFAULT_OPERATOR_STATE_NAME, metaInfo);
    KeyGroupRangeOffsets keyGroupRangeOffsets = new KeyGroupRangeOffsets(new KeyGroupRange(0, 0));
    Collection<OperatorStateHandle> operatorStateHandles = Collections.singletonList(new OperatorStreamStateHandle(operatorStateMetadata, state));
    List<KeyedStateHandle> keyedStateHandles = Collections.singletonList(new KeyGroupsStateHandle(keyGroupRangeOffsets, state));
    switch(mode) {
        case OPERATOR_MANAGED:
            operatorStateBackend = operatorStateHandles;
            break;
        case OPERATOR_RAW:
            operatorStateStream = operatorStateHandles;
            break;
        case KEYED_MANAGED:
            keyedStateFromBackend = keyedStateHandles;
            break;
        case KEYED_RAW:
            keyedStateFromStream = keyedStateHandles;
            break;
        default:
            throw new IllegalArgumentException();
    }
    OperatorSubtaskState operatorSubtaskState = OperatorSubtaskState.builder().setManagedOperatorState(new StateObjectCollection<>(operatorStateBackend)).setRawOperatorState(new StateObjectCollection<>(operatorStateStream)).setManagedKeyedState(new StateObjectCollection<>(keyedStateFromBackend)).setRawKeyedState(new StateObjectCollection<>(keyedStateFromStream)).build();
    JobVertexID jobVertexID = new JobVertexID();
    OperatorID operatorID = OperatorID.fromJobVertexID(jobVertexID);
    streamConfig.setOperatorID(operatorID);
    TaskStateSnapshot stateSnapshot = new TaskStateSnapshot();
    stateSnapshot.putSubtaskStateByOperatorID(operatorID, operatorSubtaskState);
    JobManagerTaskRestore taskRestore = new JobManagerTaskRestore(1L, stateSnapshot);
    JobInformation jobInformation = new JobInformation(new JobID(), "test job name", new SerializedValue<>(new ExecutionConfig()), new Configuration(), Collections.emptyList(), Collections.emptyList());
    TaskInformation taskInformation = new TaskInformation(jobVertexID, "test task name", 1, 1, SourceStreamTask.class.getName(), taskConfig);
    TestTaskStateManager taskStateManager = TestTaskStateManager.builder().setReportedCheckpointId(taskRestore.getRestoreCheckpointId()).setJobManagerTaskStateSnapshotsByCheckpointId(Collections.singletonMap(taskRestore.getRestoreCheckpointId(), taskRestore.getTaskStateSnapshot())).build();
    return new Task(jobInformation, taskInformation, new ExecutionAttemptID(), new AllocationID(), 0, 0, Collections.<ResultPartitionDeploymentDescriptor>emptyList(), Collections.<InputGateDeploymentDescriptor>emptyList(), mock(MemoryManager.class), mock(IOManager.class), shuffleEnvironment, new KvStateService(new KvStateRegistry(), null, null), mock(BroadcastVariableManager.class), new TaskEventDispatcher(), ExternalResourceInfoProvider.NO_EXTERNAL_RESOURCES, taskStateManager, mock(TaskManagerActions.class), mock(InputSplitProvider.class), mock(CheckpointResponder.class), new NoOpTaskOperatorEventGateway(), new TestGlobalAggregateManager(), TestingClassLoaderLease.newBuilder().build(), new FileCache(new String[] { EnvironmentInformation.getTemporaryFileDirectory() }, VoidPermanentBlobService.INSTANCE), new TestingTaskManagerRuntimeInfo(), UnregisteredMetricGroups.createUnregisteredTaskMetricGroup(), new NoOpResultPartitionConsumableNotifier(), mock(PartitionProducerStateChecker.class), mock(Executor.class));
}
Also used : KvStateRegistry(org.apache.flink.runtime.query.KvStateRegistry) Task(org.apache.flink.runtime.taskmanager.Task) Configuration(org.apache.flink.configuration.Configuration) HashMap(java.util.HashMap) KeyGroupRangeOffsets(org.apache.flink.runtime.state.KeyGroupRangeOffsets) KeyGroupRange(org.apache.flink.runtime.state.KeyGroupRange) NettyShuffleEnvironmentBuilder(org.apache.flink.runtime.io.network.NettyShuffleEnvironmentBuilder) OperatorSubtaskState(org.apache.flink.runtime.checkpoint.OperatorSubtaskState) TaskManagerActions(org.apache.flink.runtime.taskmanager.TaskManagerActions) NoOpTaskOperatorEventGateway(org.apache.flink.runtime.taskmanager.NoOpTaskOperatorEventGateway) TestingTaskManagerRuntimeInfo(org.apache.flink.runtime.util.TestingTaskManagerRuntimeInfo) BroadcastVariableManager(org.apache.flink.runtime.broadcast.BroadcastVariableManager) PartitionProducerStateChecker(org.apache.flink.runtime.taskexecutor.PartitionProducerStateChecker) TaskInformation(org.apache.flink.runtime.executiongraph.TaskInformation) IOManager(org.apache.flink.runtime.io.disk.iomanager.IOManager) TestGlobalAggregateManager(org.apache.flink.runtime.taskexecutor.TestGlobalAggregateManager) FileCache(org.apache.flink.runtime.filecache.FileCache) StateObjectCollection(org.apache.flink.runtime.checkpoint.StateObjectCollection) OperatorStreamStateHandle(org.apache.flink.runtime.state.OperatorStreamStateHandle) OperatorStateHandle(org.apache.flink.runtime.state.OperatorStateHandle) JobID(org.apache.flink.api.common.JobID) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) JobManagerTaskRestore(org.apache.flink.runtime.checkpoint.JobManagerTaskRestore) OperatorID(org.apache.flink.runtime.jobgraph.OperatorID) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) KeyedStateHandle(org.apache.flink.runtime.state.KeyedStateHandle) KeyGroupsStateHandle(org.apache.flink.runtime.state.KeyGroupsStateHandle) KvStateService(org.apache.flink.runtime.taskexecutor.KvStateService) TaskStateSnapshot(org.apache.flink.runtime.checkpoint.TaskStateSnapshot) Executor(java.util.concurrent.Executor) InputSplitProvider(org.apache.flink.runtime.jobgraph.tasks.InputSplitProvider) JobInformation(org.apache.flink.runtime.executiongraph.JobInformation) ExecutionAttemptID(org.apache.flink.runtime.executiongraph.ExecutionAttemptID) CheckpointResponder(org.apache.flink.runtime.taskmanager.CheckpointResponder) AllocationID(org.apache.flink.runtime.clusterframework.types.AllocationID) NoOpResultPartitionConsumableNotifier(org.apache.flink.runtime.io.network.partition.NoOpResultPartitionConsumableNotifier) MemoryManager(org.apache.flink.runtime.memory.MemoryManager) TestTaskStateManager(org.apache.flink.runtime.state.TestTaskStateManager) TaskEventDispatcher(org.apache.flink.runtime.io.network.TaskEventDispatcher)

Example 22 with KeyedStateHandle

use of org.apache.flink.runtime.state.KeyedStateHandle in project flink by apache.

the class AbstractStreamOperatorTestHarness method repartitionOperatorState.

/**
 * Returns the reshaped the state handles to include only those key-group states in the local
 * key-group range and the operator states that would be assigned to the local subtask.
 */
public static OperatorSubtaskState repartitionOperatorState(final OperatorSubtaskState operatorStateHandles, final int numKeyGroups, final int oldParallelism, final int newParallelism, final int subtaskIndex) {
    Preconditions.checkNotNull(operatorStateHandles, "the previous operatorStateHandles should not be null.");
    // create a new OperatorStateHandles that only contains the state for our key-groups
    List<KeyGroupRange> keyGroupPartitions = StateAssignmentOperation.createKeyGroupPartitions(numKeyGroups, newParallelism);
    KeyGroupRange localKeyGroupRange = keyGroupPartitions.get(subtaskIndex);
    List<KeyedStateHandle> localManagedKeyGroupState = new ArrayList<>();
    StateAssignmentOperation.extractIntersectingState(operatorStateHandles.getManagedKeyedState(), localKeyGroupRange, localManagedKeyGroupState);
    List<KeyedStateHandle> localRawKeyGroupState = new ArrayList<>();
    StateAssignmentOperation.extractIntersectingState(operatorStateHandles.getRawKeyedState(), localKeyGroupRange, localRawKeyGroupState);
    StateObjectCollection<OperatorStateHandle> managedOperatorStates = operatorStateHandles.getManagedOperatorState();
    Collection<OperatorStateHandle> localManagedOperatorState;
    if (!managedOperatorStates.isEmpty()) {
        List<List<OperatorStateHandle>> managedOperatorState = managedOperatorStates.stream().map(Collections::singletonList).collect(Collectors.toList());
        localManagedOperatorState = operatorStateRepartitioner.repartitionState(managedOperatorState, oldParallelism, newParallelism).get(subtaskIndex);
    } else {
        localManagedOperatorState = Collections.emptyList();
    }
    StateObjectCollection<OperatorStateHandle> rawOperatorStates = operatorStateHandles.getRawOperatorState();
    Collection<OperatorStateHandle> localRawOperatorState;
    if (!rawOperatorStates.isEmpty()) {
        List<List<OperatorStateHandle>> rawOperatorState = rawOperatorStates.stream().map(Collections::singletonList).collect(Collectors.toList());
        localRawOperatorState = operatorStateRepartitioner.repartitionState(rawOperatorState, oldParallelism, newParallelism).get(subtaskIndex);
    } else {
        localRawOperatorState = Collections.emptyList();
    }
    return OperatorSubtaskState.builder().setManagedOperatorState(new StateObjectCollection<>(nullToEmptyCollection(localManagedOperatorState))).setRawOperatorState(new StateObjectCollection<>(nullToEmptyCollection(localRawOperatorState))).setManagedKeyedState(new StateObjectCollection<>(nullToEmptyCollection(localManagedKeyGroupState))).setRawKeyedState(new StateObjectCollection<>(nullToEmptyCollection(localRawKeyGroupState))).build();
}
Also used : StateObjectCollection(org.apache.flink.runtime.checkpoint.StateObjectCollection) KeyGroupRange(org.apache.flink.runtime.state.KeyGroupRange) ArrayList(java.util.ArrayList) List(java.util.List) ArrayList(java.util.ArrayList) LinkedList(java.util.LinkedList) OperatorStateHandle(org.apache.flink.runtime.state.OperatorStateHandle) KeyedStateHandle(org.apache.flink.runtime.state.KeyedStateHandle)

Example 23 with KeyedStateHandle

use of org.apache.flink.runtime.state.KeyedStateHandle in project flink by apache.

the class AbstractStreamOperatorTestHarness method repackageState.

/**
 * Takes the different {@link OperatorSubtaskState} created by calling {@link #snapshot(long,
 * long)} on different instances of {@link AbstractStreamOperatorTestHarness} (each one
 * representing one subtask) and repacks them into a single {@link OperatorSubtaskState} so that
 * the parallelism of the test can change arbitrarily (i.e. be able to scale both up and down).
 *
 * <p>After repacking the partial states, remember to use {@link
 * #repartitionOperatorState(OperatorSubtaskState, int, int, int, int)} to reshape the state
 * handles to include only those key-group states in the local key-group range and the operator
 * states that would be assigned to the local subtask. Bear in mind that for parallelism greater
 * than one, you have to use the constructor {@link
 * #AbstractStreamOperatorTestHarness(StreamOperator, int, int, int)}.
 *
 * <p><b>NOTE: </b> each of the {@code handles} in the argument list is assumed to be from a
 * single task of a single operator (i.e. chain length of one).
 *
 * <p>For an example of how to use it, have a look at {@link
 * AbstractStreamOperatorTest#testStateAndTimerStateShufflingScalingDown()}.
 *
 * @param handles the different states to be merged.
 * @return the resulting state, or {@code null} if no partial states are specified.
 */
public static OperatorSubtaskState repackageState(OperatorSubtaskState... handles) throws Exception {
    if (handles.length < 1) {
        return null;
    } else if (handles.length == 1) {
        return handles[0];
    }
    List<OperatorStateHandle> mergedManagedOperatorState = new ArrayList<>(handles.length);
    List<OperatorStateHandle> mergedRawOperatorState = new ArrayList<>(handles.length);
    List<KeyedStateHandle> mergedManagedKeyedState = new ArrayList<>(handles.length);
    List<KeyedStateHandle> mergedRawKeyedState = new ArrayList<>(handles.length);
    for (OperatorSubtaskState handle : handles) {
        Collection<OperatorStateHandle> managedOperatorState = handle.getManagedOperatorState();
        Collection<OperatorStateHandle> rawOperatorState = handle.getRawOperatorState();
        Collection<KeyedStateHandle> managedKeyedState = handle.getManagedKeyedState();
        Collection<KeyedStateHandle> rawKeyedState = handle.getRawKeyedState();
        mergedManagedOperatorState.addAll(managedOperatorState);
        mergedRawOperatorState.addAll(rawOperatorState);
        mergedManagedKeyedState.addAll(managedKeyedState);
        mergedRawKeyedState.addAll(rawKeyedState);
    }
    return OperatorSubtaskState.builder().setManagedOperatorState(new StateObjectCollection<>(mergedManagedOperatorState)).setRawOperatorState(new StateObjectCollection<>(mergedRawOperatorState)).setManagedKeyedState(new StateObjectCollection<>(mergedManagedKeyedState)).setRawKeyedState(new StateObjectCollection<>(mergedRawKeyedState)).build();
}
Also used : StateObjectCollection(org.apache.flink.runtime.checkpoint.StateObjectCollection) ArrayList(java.util.ArrayList) OperatorStateHandle(org.apache.flink.runtime.state.OperatorStateHandle) KeyedStateHandle(org.apache.flink.runtime.state.KeyedStateHandle) OperatorSubtaskState(org.apache.flink.runtime.checkpoint.OperatorSubtaskState)

Example 24 with KeyedStateHandle

use of org.apache.flink.runtime.state.KeyedStateHandle in project flink by apache.

the class StreamTaskTest method testAsyncCheckpointingConcurrentCloseAfterAcknowledge.

/**
 * FLINK-5667
 *
 * <p>Tests that a concurrent cancel operation does not discard the state handles of an
 * acknowledged checkpoint. The situation can only happen if the cancel call is executed after
 * Environment.acknowledgeCheckpoint() and before the CloseableRegistry.unregisterClosable()
 * call.
 */
@Test
public void testAsyncCheckpointingConcurrentCloseAfterAcknowledge() throws Exception {
    final OneShotLatch acknowledgeCheckpointLatch = new OneShotLatch();
    final OneShotLatch completeAcknowledge = new OneShotLatch();
    CheckpointResponder checkpointResponder = mock(CheckpointResponder.class);
    doAnswer(new Answer() {

        @Override
        public Object answer(InvocationOnMock invocation) {
            acknowledgeCheckpointLatch.trigger();
            // block here so that we can issue the concurrent cancel call
            while (true) {
                try {
                    // wait until we successfully await (no pun intended)
                    completeAcknowledge.await();
                    // when await() returns normally, we break out of the loop
                    break;
                } catch (InterruptedException e) {
                // survive interruptions that arise from thread pool
                // shutdown
                // production code cannot actually throw
                // InterruptedException from
                // checkpoint acknowledgement
                }
            }
            return null;
        }
    }).when(checkpointResponder).acknowledgeCheckpoint(any(JobID.class), any(ExecutionAttemptID.class), anyLong(), any(CheckpointMetrics.class), any(TaskStateSnapshot.class));
    TaskStateManager taskStateManager = new TaskStateManagerImpl(new JobID(1L, 2L), new ExecutionAttemptID(), mock(TaskLocalStateStoreImpl.class), new InMemoryStateChangelogStorage(), null, checkpointResponder);
    KeyedStateHandle managedKeyedStateHandle = mock(KeyedStateHandle.class);
    KeyedStateHandle rawKeyedStateHandle = mock(KeyedStateHandle.class);
    OperatorStateHandle managedOperatorStateHandle = mock(OperatorStreamStateHandle.class);
    OperatorStateHandle rawOperatorStateHandle = mock(OperatorStreamStateHandle.class);
    OperatorSnapshotFutures operatorSnapshotResult = new OperatorSnapshotFutures(DoneFuture.of(SnapshotResult.of(managedKeyedStateHandle)), DoneFuture.of(SnapshotResult.of(rawKeyedStateHandle)), DoneFuture.of(SnapshotResult.of(managedOperatorStateHandle)), DoneFuture.of(SnapshotResult.of(rawOperatorStateHandle)), DoneFuture.of(SnapshotResult.empty()), DoneFuture.of(SnapshotResult.empty()));
    try (MockEnvironment mockEnvironment = new MockEnvironmentBuilder().setTaskName("mock-task").setTaskStateManager(taskStateManager).build()) {
        RunningTask<MockStreamTask> task = runTask(() -> createMockStreamTask(mockEnvironment, operatorChain(streamOperatorWithSnapshot(operatorSnapshotResult))));
        MockStreamTask streamTask = task.streamTask;
        waitTaskIsRunning(streamTask, task.invocationFuture);
        final long checkpointId = 42L;
        streamTask.triggerCheckpointAsync(new CheckpointMetaData(checkpointId, 1L), CheckpointOptions.forCheckpointWithDefaultLocation());
        acknowledgeCheckpointLatch.await();
        ArgumentCaptor<TaskStateSnapshot> subtaskStateCaptor = ArgumentCaptor.forClass(TaskStateSnapshot.class);
        // check that the checkpoint has been completed
        verify(checkpointResponder).acknowledgeCheckpoint(any(JobID.class), any(ExecutionAttemptID.class), eq(checkpointId), any(CheckpointMetrics.class), subtaskStateCaptor.capture());
        TaskStateSnapshot subtaskStates = subtaskStateCaptor.getValue();
        OperatorSubtaskState subtaskState = subtaskStates.getSubtaskStateMappings().iterator().next().getValue();
        // check that the subtask state contains the expected state handles
        assertEquals(singleton(managedKeyedStateHandle), subtaskState.getManagedKeyedState());
        assertEquals(singleton(rawKeyedStateHandle), subtaskState.getRawKeyedState());
        assertEquals(singleton(managedOperatorStateHandle), subtaskState.getManagedOperatorState());
        assertEquals(singleton(rawOperatorStateHandle), subtaskState.getRawOperatorState());
        // check that the state handles have not been discarded
        verify(managedKeyedStateHandle, never()).discardState();
        verify(rawKeyedStateHandle, never()).discardState();
        verify(managedOperatorStateHandle, never()).discardState();
        verify(rawOperatorStateHandle, never()).discardState();
        streamTask.cancel();
        completeAcknowledge.trigger();
        // canceling the stream task after it has acknowledged the checkpoint should not discard
        // the state handles
        verify(managedKeyedStateHandle, never()).discardState();
        verify(rawKeyedStateHandle, never()).discardState();
        verify(managedOperatorStateHandle, never()).discardState();
        verify(rawOperatorStateHandle, never()).discardState();
        task.waitForTaskCompletion(true);
    }
}
Also used : OperatorSnapshotFutures(org.apache.flink.streaming.api.operators.OperatorSnapshotFutures) TaskStateManagerImpl(org.apache.flink.runtime.state.TaskStateManagerImpl) MockEnvironmentBuilder(org.apache.flink.runtime.operators.testutils.MockEnvironmentBuilder) CheckpointMetrics(org.apache.flink.runtime.checkpoint.CheckpointMetrics) KeyedStateHandle(org.apache.flink.runtime.state.KeyedStateHandle) OperatorSubtaskState(org.apache.flink.runtime.checkpoint.OperatorSubtaskState) TaskStateSnapshot(org.apache.flink.runtime.checkpoint.TaskStateSnapshot) InMemoryStateChangelogStorage(org.apache.flink.runtime.state.changelog.inmemory.InMemoryStateChangelogStorage) MockEnvironment(org.apache.flink.runtime.operators.testutils.MockEnvironment) TaskLocalStateStoreImpl(org.apache.flink.runtime.state.TaskLocalStateStoreImpl) OneShotLatch(org.apache.flink.core.testutils.OneShotLatch) ExecutionAttemptID(org.apache.flink.runtime.executiongraph.ExecutionAttemptID) CheckpointResponder(org.apache.flink.runtime.taskmanager.CheckpointResponder) TaskStateManager(org.apache.flink.runtime.state.TaskStateManager) CheckpointMetaData(org.apache.flink.runtime.checkpoint.CheckpointMetaData) Mockito.doAnswer(org.mockito.Mockito.doAnswer) Answer(org.mockito.stubbing.Answer) InvocationOnMock(org.mockito.invocation.InvocationOnMock) OperatorStateHandle(org.apache.flink.runtime.state.OperatorStateHandle) JobID(org.apache.flink.api.common.JobID) Test(org.junit.Test)

Example 25 with KeyedStateHandle

use of org.apache.flink.runtime.state.KeyedStateHandle in project flink by apache.

the class MetadataV2V3SerializerBase method deserializeKeyedStateHandle.

@VisibleForTesting
@Nullable
static KeyedStateHandle deserializeKeyedStateHandle(DataInputStream dis, @Nullable DeserializationContext context) throws IOException {
    final int type = dis.readByte();
    if (NULL_HANDLE == type) {
        return null;
    } else if (KEY_GROUPS_HANDLE == type || KEY_GROUPS_HANDLE_V2 == type || SAVEPOINT_KEY_GROUPS_HANDLE == type) {
        int startKeyGroup = dis.readInt();
        int numKeyGroups = dis.readInt();
        KeyGroupRange keyGroupRange = KeyGroupRange.of(startKeyGroup, startKeyGroup + numKeyGroups - 1);
        long[] offsets = new long[numKeyGroups];
        for (int i = 0; i < numKeyGroups; ++i) {
            offsets[i] = dis.readLong();
        }
        KeyGroupRangeOffsets keyGroupRangeOffsets = new KeyGroupRangeOffsets(keyGroupRange, offsets);
        StreamStateHandle stateHandle = deserializeStreamStateHandle(dis, context);
        if (SAVEPOINT_KEY_GROUPS_HANDLE == type) {
            return new KeyGroupsSavepointStateHandle(keyGroupRangeOffsets, stateHandle);
        } else {
            StateHandleID stateHandleID = KEY_GROUPS_HANDLE_V2 == type ? new StateHandleID(dis.readUTF()) : StateHandleID.randomStateHandleId();
            return KeyGroupsStateHandle.restore(keyGroupRangeOffsets, stateHandle, stateHandleID);
        }
    } else if (INCREMENTAL_KEY_GROUPS_HANDLE == type || INCREMENTAL_KEY_GROUPS_HANDLE_V2 == type) {
        return deserializeIncrementalStateHandle(dis, context, type);
    } else if (CHANGELOG_HANDLE == type) {
        int startKeyGroup = dis.readInt();
        int numKeyGroups = dis.readInt();
        KeyGroupRange keyGroupRange = KeyGroupRange.of(startKeyGroup, startKeyGroup + numKeyGroups - 1);
        long checkpointedSize = dis.readLong();
        int baseSize = dis.readInt();
        List<KeyedStateHandle> base = new ArrayList<>(baseSize);
        for (int i = 0; i < baseSize; i++) {
            KeyedStateHandle handle = deserializeKeyedStateHandle(dis, context);
            if (handle != null) {
                base.add(handle);
            } else {
                LOG.warn("Unexpected null keyed state handle of materialized part when deserializing changelog state-backend handle");
            }
        }
        int deltaSize = dis.readInt();
        List<ChangelogStateHandle> delta = new ArrayList<>(deltaSize);
        for (int i = 0; i < deltaSize; i++) {
            delta.add((ChangelogStateHandle) deserializeKeyedStateHandle(dis, context));
        }
        long materializationID = dis.readLong();
        StateHandleID stateHandleId = new StateHandleID(dis.readUTF());
        return ChangelogStateBackendHandleImpl.restore(base, delta, keyGroupRange, materializationID, checkpointedSize, stateHandleId);
    } else if (CHANGELOG_BYTE_INCREMENT_HANDLE == type) {
        int start = dis.readInt();
        int numKeyGroups = dis.readInt();
        KeyGroupRange keyGroupRange = KeyGroupRange.of(start, start + numKeyGroups - 1);
        long from = dis.readLong();
        long to = dis.readLong();
        int size = dis.readInt();
        List<StateChange> changes = new ArrayList<>(size);
        for (int i = 0; i < size; i++) {
            int keyGroup = dis.readInt();
            int bytesSize = dis.readInt();
            byte[] bytes = new byte[bytesSize];
            IOUtils.readFully(dis, bytes, 0, bytesSize);
            changes.add(new StateChange(keyGroup, bytes));
        }
        StateHandleID stateHandleId = new StateHandleID(dis.readUTF());
        return InMemoryChangelogStateHandle.restore(changes, SequenceNumber.of(from), SequenceNumber.of(to), keyGroupRange, stateHandleId);
    } else if (CHANGELOG_FILE_INCREMENT_HANDLE == type) {
        int start = dis.readInt();
        int numKeyGroups = dis.readInt();
        KeyGroupRange keyGroupRange = KeyGroupRange.of(start, start + numKeyGroups - 1);
        int numHandles = dis.readInt();
        List<Tuple2<StreamStateHandle, Long>> streamHandleAndOffset = new ArrayList<>(numHandles);
        for (int i = 0; i < numHandles; i++) {
            long o = dis.readLong();
            StreamStateHandle h = deserializeStreamStateHandle(dis, context);
            streamHandleAndOffset.add(Tuple2.of(h, o));
        }
        long size = dis.readLong();
        long checkpointedSize = dis.readLong();
        StateHandleID stateHandleId = new StateHandleID(dis.readUTF());
        return ChangelogStateHandleStreamImpl.restore(streamHandleAndOffset, keyGroupRange, size, checkpointedSize, stateHandleId);
    } else {
        throw new IllegalStateException("Reading invalid KeyedStateHandle, type: " + type);
    }
}
Also used : KeyGroupRangeOffsets(org.apache.flink.runtime.state.KeyGroupRangeOffsets) KeyGroupRange(org.apache.flink.runtime.state.KeyGroupRange) ArrayList(java.util.ArrayList) IncrementalRemoteKeyedStateHandle(org.apache.flink.runtime.state.IncrementalRemoteKeyedStateHandle) KeyedStateHandle(org.apache.flink.runtime.state.KeyedStateHandle) KeyGroupsSavepointStateHandle(org.apache.flink.runtime.state.KeyGroupsSavepointStateHandle) OperatorStreamStateHandle(org.apache.flink.runtime.state.OperatorStreamStateHandle) StreamStateHandle(org.apache.flink.runtime.state.StreamStateHandle) ByteStreamStateHandle(org.apache.flink.runtime.state.memory.ByteStreamStateHandle) StateChange(org.apache.flink.runtime.state.changelog.StateChange) StateHandleID(org.apache.flink.runtime.state.StateHandleID) InMemoryChangelogStateHandle(org.apache.flink.runtime.state.changelog.inmemory.InMemoryChangelogStateHandle) ChangelogStateHandle(org.apache.flink.runtime.state.changelog.ChangelogStateHandle) List(java.util.List) ArrayList(java.util.ArrayList) VisibleForTesting(org.apache.flink.annotation.VisibleForTesting) Nullable(javax.annotation.Nullable)

Aggregations

KeyedStateHandle (org.apache.flink.runtime.state.KeyedStateHandle)49 Test (org.junit.Test)16 OperatorStateHandle (org.apache.flink.runtime.state.OperatorStateHandle)15 KeyGroupRange (org.apache.flink.runtime.state.KeyGroupRange)14 ArrayList (java.util.ArrayList)10 KeyGroupsStateHandle (org.apache.flink.runtime.state.KeyGroupsStateHandle)10 IncrementalRemoteKeyedStateHandle (org.apache.flink.runtime.state.IncrementalRemoteKeyedStateHandle)9 OperatorSubtaskState (org.apache.flink.runtime.checkpoint.OperatorSubtaskState)8 SnapshotResult (org.apache.flink.runtime.state.SnapshotResult)8 HashMap (java.util.HashMap)7 OperatorID (org.apache.flink.runtime.jobgraph.OperatorID)7 OperatorStreamStateHandle (org.apache.flink.runtime.state.OperatorStreamStateHandle)7 StateObjectCollection (org.apache.flink.runtime.checkpoint.StateObjectCollection)6 JobVertexID (org.apache.flink.runtime.jobgraph.JobVertexID)6 List (java.util.List)5 CloseableRegistry (org.apache.flink.core.fs.CloseableRegistry)5 InputChannelStateHandle (org.apache.flink.runtime.state.InputChannelStateHandle)5 ResultSubpartitionStateHandle (org.apache.flink.runtime.state.ResultSubpartitionStateHandle)5 StreamStateHandle (org.apache.flink.runtime.state.StreamStateHandle)5 Map (java.util.Map)4