Search in sources :

Example 11 with KeyGroupsStateHandle

use of org.apache.flink.runtime.state.KeyGroupsStateHandle in project flink by apache.

the class SavepointV1Serializer method serializeSubtaskState.

private static void serializeSubtaskState(SubtaskState subtaskState, DataOutputStream dos) throws IOException {
    dos.writeLong(-1);
    ChainedStateHandle<StreamStateHandle> nonPartitionableState = subtaskState.getLegacyOperatorState();
    int len = nonPartitionableState != null ? nonPartitionableState.getLength() : 0;
    dos.writeInt(len);
    for (int i = 0; i < len; ++i) {
        StreamStateHandle stateHandle = nonPartitionableState.get(i);
        serializeStreamStateHandle(stateHandle, dos);
    }
    ChainedStateHandle<OperatorStateHandle> operatorStateBackend = subtaskState.getManagedOperatorState();
    len = operatorStateBackend != null ? operatorStateBackend.getLength() : 0;
    dos.writeInt(len);
    for (int i = 0; i < len; ++i) {
        OperatorStateHandle stateHandle = operatorStateBackend.get(i);
        serializeOperatorStateHandle(stateHandle, dos);
    }
    ChainedStateHandle<OperatorStateHandle> operatorStateFromStream = subtaskState.getRawOperatorState();
    len = operatorStateFromStream != null ? operatorStateFromStream.getLength() : 0;
    dos.writeInt(len);
    for (int i = 0; i < len; ++i) {
        OperatorStateHandle stateHandle = operatorStateFromStream.get(i);
        serializeOperatorStateHandle(stateHandle, dos);
    }
    KeyGroupsStateHandle keyedStateBackend = subtaskState.getManagedKeyedState();
    serializeKeyGroupStateHandle(keyedStateBackend, dos);
    KeyGroupsStateHandle keyedStateStream = subtaskState.getRawKeyedState();
    serializeKeyGroupStateHandle(keyedStateStream, dos);
}
Also used : StreamStateHandle(org.apache.flink.runtime.state.StreamStateHandle) ByteStreamStateHandle(org.apache.flink.runtime.state.memory.ByteStreamStateHandle) OperatorStateHandle(org.apache.flink.runtime.state.OperatorStateHandle) KeyGroupsStateHandle(org.apache.flink.runtime.state.KeyGroupsStateHandle)

Example 12 with KeyGroupsStateHandle

use of org.apache.flink.runtime.state.KeyGroupsStateHandle in project flink by apache.

the class StateAssignmentOperation method assignTaskStatesToOperatorInstances.

private static void assignTaskStatesToOperatorInstances(TaskState taskState, ExecutionJobVertex executionJobVertex) {
    final int oldParallelism = taskState.getParallelism();
    final int newParallelism = executionJobVertex.getParallelism();
    List<KeyGroupRange> keyGroupPartitions = createKeyGroupPartitions(executionJobVertex.getMaxParallelism(), newParallelism);
    final int chainLength = taskState.getChainLength();
    // operator chain idx -> list of the stored op states from all parallel instances for this chain idx
    @SuppressWarnings("unchecked") List<OperatorStateHandle>[] parallelOpStatesBackend = new List[chainLength];
    @SuppressWarnings("unchecked") List<OperatorStateHandle>[] parallelOpStatesStream = new List[chainLength];
    List<KeyGroupsStateHandle> parallelKeyedStatesBackend = new ArrayList<>(oldParallelism);
    List<KeyGroupsStateHandle> parallelKeyedStateStream = new ArrayList<>(oldParallelism);
    for (int p = 0; p < oldParallelism; ++p) {
        SubtaskState subtaskState = taskState.getState(p);
        if (null != subtaskState) {
            collectParallelStatesByChainOperator(parallelOpStatesBackend, subtaskState.getManagedOperatorState());
            collectParallelStatesByChainOperator(parallelOpStatesStream, subtaskState.getRawOperatorState());
            KeyGroupsStateHandle keyedStateBackend = subtaskState.getManagedKeyedState();
            if (null != keyedStateBackend) {
                parallelKeyedStatesBackend.add(keyedStateBackend);
            }
            KeyGroupsStateHandle keyedStateStream = subtaskState.getRawKeyedState();
            if (null != keyedStateStream) {
                parallelKeyedStateStream.add(keyedStateStream);
            }
        }
    }
    // operator chain index -> lists with collected states (one collection for each parallel subtasks)
    @SuppressWarnings("unchecked") List<Collection<OperatorStateHandle>>[] partitionedParallelStatesBackend = new List[chainLength];
    @SuppressWarnings("unchecked") List<Collection<OperatorStateHandle>>[] partitionedParallelStatesStream = new List[chainLength];
    //TODO here we can employ different redistribution strategies for state, e.g. union state.
    // For now we only offer round robin as the default.
    OperatorStateRepartitioner opStateRepartitioner = RoundRobinOperatorStateRepartitioner.INSTANCE;
    for (int chainIdx = 0; chainIdx < chainLength; ++chainIdx) {
        List<OperatorStateHandle> chainOpParallelStatesBackend = parallelOpStatesBackend[chainIdx];
        List<OperatorStateHandle> chainOpParallelStatesStream = parallelOpStatesStream[chainIdx];
        partitionedParallelStatesBackend[chainIdx] = applyRepartitioner(opStateRepartitioner, chainOpParallelStatesBackend, oldParallelism, newParallelism);
        partitionedParallelStatesStream[chainIdx] = applyRepartitioner(opStateRepartitioner, chainOpParallelStatesStream, oldParallelism, newParallelism);
    }
    for (int subTaskIdx = 0; subTaskIdx < newParallelism; ++subTaskIdx) {
        // non-partitioned state
        ChainedStateHandle<StreamStateHandle> nonPartitionableState = null;
        if (oldParallelism == newParallelism) {
            if (taskState.getState(subTaskIdx) != null) {
                nonPartitionableState = taskState.getState(subTaskIdx).getLegacyOperatorState();
            }
        }
        // partitionable state
        @SuppressWarnings("unchecked") Collection<OperatorStateHandle>[] iab = new Collection[chainLength];
        @SuppressWarnings("unchecked") Collection<OperatorStateHandle>[] ias = new Collection[chainLength];
        List<Collection<OperatorStateHandle>> operatorStateFromBackend = Arrays.asList(iab);
        List<Collection<OperatorStateHandle>> operatorStateFromStream = Arrays.asList(ias);
        for (int chainIdx = 0; chainIdx < partitionedParallelStatesBackend.length; ++chainIdx) {
            List<Collection<OperatorStateHandle>> redistributedOpStateBackend = partitionedParallelStatesBackend[chainIdx];
            List<Collection<OperatorStateHandle>> redistributedOpStateStream = partitionedParallelStatesStream[chainIdx];
            if (redistributedOpStateBackend != null) {
                operatorStateFromBackend.set(chainIdx, redistributedOpStateBackend.get(subTaskIdx));
            }
            if (redistributedOpStateStream != null) {
                operatorStateFromStream.set(chainIdx, redistributedOpStateStream.get(subTaskIdx));
            }
        }
        Execution currentExecutionAttempt = executionJobVertex.getTaskVertices()[subTaskIdx].getCurrentExecutionAttempt();
        List<KeyGroupsStateHandle> newKeyedStatesBackend;
        List<KeyGroupsStateHandle> newKeyedStateStream;
        if (oldParallelism == newParallelism) {
            SubtaskState subtaskState = taskState.getState(subTaskIdx);
            if (subtaskState != null) {
                KeyGroupsStateHandle oldKeyedStatesBackend = subtaskState.getManagedKeyedState();
                KeyGroupsStateHandle oldKeyedStatesStream = subtaskState.getRawKeyedState();
                newKeyedStatesBackend = oldKeyedStatesBackend != null ? Collections.singletonList(oldKeyedStatesBackend) : null;
                newKeyedStateStream = oldKeyedStatesStream != null ? Collections.singletonList(oldKeyedStatesStream) : null;
            } else {
                newKeyedStatesBackend = null;
                newKeyedStateStream = null;
            }
        } else {
            KeyGroupRange subtaskKeyGroupIds = keyGroupPartitions.get(subTaskIdx);
            newKeyedStatesBackend = getKeyGroupsStateHandles(parallelKeyedStatesBackend, subtaskKeyGroupIds);
            newKeyedStateStream = getKeyGroupsStateHandles(parallelKeyedStateStream, subtaskKeyGroupIds);
        }
        TaskStateHandles taskStateHandles = new TaskStateHandles(nonPartitionableState, operatorStateFromBackend, operatorStateFromStream, newKeyedStatesBackend, newKeyedStateStream);
        currentExecutionAttempt.setInitialState(taskStateHandles);
    }
}
Also used : KeyGroupRange(org.apache.flink.runtime.state.KeyGroupRange) ArrayList(java.util.ArrayList) KeyGroupsStateHandle(org.apache.flink.runtime.state.KeyGroupsStateHandle) TaskStateHandles(org.apache.flink.runtime.state.TaskStateHandles) StreamStateHandle(org.apache.flink.runtime.state.StreamStateHandle) Execution(org.apache.flink.runtime.executiongraph.Execution) Collection(java.util.Collection) ArrayList(java.util.ArrayList) List(java.util.List) OperatorStateHandle(org.apache.flink.runtime.state.OperatorStateHandle)

Example 13 with KeyGroupsStateHandle

use of org.apache.flink.runtime.state.KeyGroupsStateHandle in project flink by apache.

the class AbstractStreamOperatorTest method testFailingBackendSnapshotMethod.

/**
	 * Tests that a failing snapshot method call to the keyed state backend will trigger the closing
	 * of the StateSnapshotContextSynchronousImpl and the cancellation of the
	 * OperatorSnapshotResult. The latter is supposed to also cancel all assigned futures.
	 */
@Test
public void testFailingBackendSnapshotMethod() throws Exception {
    final long checkpointId = 42L;
    final long timestamp = 1L;
    final Exception failingException = new Exception("Test exception");
    final CloseableRegistry closeableRegistry = new CloseableRegistry();
    RunnableFuture<KeyGroupsStateHandle> futureKeyGroupStateHandle = mock(RunnableFuture.class);
    RunnableFuture<OperatorStateHandle> futureOperatorStateHandle = mock(RunnableFuture.class);
    StateSnapshotContextSynchronousImpl context = mock(StateSnapshotContextSynchronousImpl.class);
    when(context.getKeyedStateStreamFuture()).thenReturn(futureKeyGroupStateHandle);
    when(context.getOperatorStateStreamFuture()).thenReturn(futureOperatorStateHandle);
    OperatorSnapshotResult operatorSnapshotResult = spy(new OperatorSnapshotResult());
    whenNew(StateSnapshotContextSynchronousImpl.class).withAnyArguments().thenReturn(context);
    whenNew(OperatorSnapshotResult.class).withAnyArguments().thenReturn(operatorSnapshotResult);
    CheckpointStreamFactory streamFactory = mock(CheckpointStreamFactory.class);
    StreamTask<Void, AbstractStreamOperator<Void>> containingTask = mock(StreamTask.class);
    when(containingTask.getCancelables()).thenReturn(closeableRegistry);
    AbstractStreamOperator<Void> operator = mock(AbstractStreamOperator.class);
    when(operator.snapshotState(anyLong(), anyLong(), any(CheckpointOptions.class))).thenCallRealMethod();
    // The amount of mocking in this test makes it necessary to make the
    // getCheckpointStreamFactory method visible for the test and to
    // overwrite its behaviour.
    when(operator.getCheckpointStreamFactory(any(CheckpointOptions.class))).thenReturn(streamFactory);
    doReturn(containingTask).when(operator).getContainingTask();
    RunnableFuture<OperatorStateHandle> futureManagedOperatorStateHandle = mock(RunnableFuture.class);
    OperatorStateBackend operatorStateBackend = mock(OperatorStateBackend.class);
    when(operatorStateBackend.snapshot(eq(checkpointId), eq(timestamp), eq(streamFactory), any(CheckpointOptions.class))).thenReturn(futureManagedOperatorStateHandle);
    AbstractKeyedStateBackend<?> keyedStateBackend = mock(AbstractKeyedStateBackend.class);
    when(keyedStateBackend.snapshot(eq(checkpointId), eq(timestamp), eq(streamFactory), eq(CheckpointOptions.forFullCheckpoint()))).thenThrow(failingException);
    Whitebox.setInternalState(operator, "operatorStateBackend", operatorStateBackend);
    Whitebox.setInternalState(operator, "keyedStateBackend", keyedStateBackend);
    Whitebox.setInternalState(operator, "checkpointStreamFactory", streamFactory);
    try {
        operator.snapshotState(checkpointId, timestamp, CheckpointOptions.forFullCheckpoint());
        fail("Exception expected.");
    } catch (Exception e) {
        assertEquals(failingException, e.getCause());
    }
    // verify that the context has been closed, the operator snapshot result has been cancelled
    // and that all futures have been cancelled.
    verify(context).close();
    verify(operatorSnapshotResult).cancel();
    verify(futureKeyGroupStateHandle).cancel(anyBoolean());
    verify(futureOperatorStateHandle).cancel(anyBoolean());
    verify(futureKeyGroupStateHandle).cancel(anyBoolean());
}
Also used : CheckpointStreamFactory(org.apache.flink.runtime.state.CheckpointStreamFactory) StateSnapshotContextSynchronousImpl(org.apache.flink.runtime.state.StateSnapshotContextSynchronousImpl) CloseableRegistry(org.apache.flink.core.fs.CloseableRegistry) IOException(java.io.IOException) KeyGroupsStateHandle(org.apache.flink.runtime.state.KeyGroupsStateHandle) CheckpointOptions(org.apache.flink.runtime.checkpoint.CheckpointOptions) OperatorStateBackend(org.apache.flink.runtime.state.OperatorStateBackend) OperatorStateHandle(org.apache.flink.runtime.state.OperatorStateHandle) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Example 14 with KeyGroupsStateHandle

use of org.apache.flink.runtime.state.KeyGroupsStateHandle in project flink by apache.

the class StateInitializationContextImplTest method setUp.

@Before
public void setUp() throws Exception {
    this.writtenKeyGroups = 0;
    this.writtenOperatorStates = new HashSet<>();
    this.closableRegistry = new CloseableRegistry();
    OperatorStateStore stateStore = mock(OperatorStateStore.class);
    ByteArrayOutputStreamWithPos out = new ByteArrayOutputStreamWithPos(64);
    List<KeyGroupsStateHandle> keyGroupsStateHandles = new ArrayList<>(NUM_HANDLES);
    int prev = 0;
    for (int i = 0; i < NUM_HANDLES; ++i) {
        out.reset();
        int size = i % 4;
        int end = prev + size;
        DataOutputView dov = new DataOutputViewStreamWrapper(out);
        KeyGroupRangeOffsets offsets = new KeyGroupRangeOffsets(i == 9 ? KeyGroupRange.EMPTY_KEY_GROUP_RANGE : new KeyGroupRange(prev, end));
        prev = end + 1;
        for (int kg : offsets.getKeyGroupRange()) {
            offsets.setKeyGroupOffset(kg, out.getPosition());
            dov.writeInt(kg);
            ++writtenKeyGroups;
        }
        KeyGroupsStateHandle handle = new KeyGroupsStateHandle(offsets, new ByteStateHandleCloseChecking("kg-" + i, out.toByteArray()));
        keyGroupsStateHandles.add(handle);
    }
    List<OperatorStateHandle> operatorStateHandles = new ArrayList<>(NUM_HANDLES);
    for (int i = 0; i < NUM_HANDLES; ++i) {
        int size = i % 4;
        out.reset();
        DataOutputView dov = new DataOutputViewStreamWrapper(out);
        LongArrayList offsets = new LongArrayList(size);
        for (int s = 0; s < size; ++s) {
            offsets.add(out.getPosition());
            int val = i * NUM_HANDLES + s;
            dov.writeInt(val);
            writtenOperatorStates.add(val);
        }
        Map<String, OperatorStateHandle.StateMetaInfo> offsetsMap = new HashMap<>();
        offsetsMap.put(DefaultOperatorStateBackend.DEFAULT_OPERATOR_STATE_NAME, new OperatorStateHandle.StateMetaInfo(offsets.toArray(), OperatorStateHandle.Mode.SPLIT_DISTRIBUTE));
        OperatorStateHandle operatorStateHandle = new OperatorStateHandle(offsetsMap, new ByteStateHandleCloseChecking("os-" + i, out.toByteArray()));
        operatorStateHandles.add(operatorStateHandle);
    }
    this.initializationContext = new StateInitializationContextImpl(true, stateStore, mock(KeyedStateStore.class), keyGroupsStateHandles, operatorStateHandles, closableRegistry);
}
Also used : OperatorStateStore(org.apache.flink.api.common.state.OperatorStateStore) HashMap(java.util.HashMap) KeyGroupRangeOffsets(org.apache.flink.runtime.state.KeyGroupRangeOffsets) LongArrayList(org.apache.flink.runtime.util.LongArrayList) ArrayList(java.util.ArrayList) LongArrayList(org.apache.flink.runtime.util.LongArrayList) KeyGroupRange(org.apache.flink.runtime.state.KeyGroupRange) DataOutputView(org.apache.flink.core.memory.DataOutputView) CloseableRegistry(org.apache.flink.core.fs.CloseableRegistry) KeyGroupsStateHandle(org.apache.flink.runtime.state.KeyGroupsStateHandle) DataOutputViewStreamWrapper(org.apache.flink.core.memory.DataOutputViewStreamWrapper) StateInitializationContextImpl(org.apache.flink.runtime.state.StateInitializationContextImpl) OperatorStateHandle(org.apache.flink.runtime.state.OperatorStateHandle) ByteArrayOutputStreamWithPos(org.apache.flink.core.memory.ByteArrayOutputStreamWithPos) Before(org.junit.Before)

Example 15 with KeyGroupsStateHandle

use of org.apache.flink.runtime.state.KeyGroupsStateHandle in project flink by apache.

the class MigrationV0ToV1Test method testSavepointMigrationV0ToV1.

/**
	 * Simple test of savepoint methods.
	 */
@Test
public void testSavepointMigrationV0ToV1() throws Exception {
    String target = tmp.getRoot().getAbsolutePath();
    assertEquals(0, tmp.getRoot().listFiles().length);
    long checkpointId = ThreadLocalRandom.current().nextLong(Integer.MAX_VALUE);
    int numTaskStates = 4;
    int numSubtaskStates = 16;
    Collection<org.apache.flink.migration.runtime.checkpoint.TaskState> expected = createTaskStatesOld(numTaskStates, numSubtaskStates);
    SavepointV0 savepoint = new SavepointV0(checkpointId, expected);
    assertEquals(SavepointV0.VERSION, savepoint.getVersion());
    assertEquals(checkpointId, savepoint.getCheckpointId());
    assertEquals(expected, savepoint.getOldTaskStates());
    assertFalse(savepoint.getOldTaskStates().isEmpty());
    Exception latestException = null;
    Path path = null;
    FSDataOutputStream fdos = null;
    FileSystem fs = null;
    try {
        // Try to create a FS output stream
        for (int attempt = 0; attempt < 10; attempt++) {
            path = new Path(target, FileUtils.getRandomFilename("savepoint-"));
            if (fs == null) {
                fs = FileSystem.get(path.toUri());
            }
            try {
                fdos = fs.create(path, false);
                break;
            } catch (Exception e) {
                latestException = e;
            }
        }
        if (fdos == null) {
            throw new IOException("Failed to create file output stream at " + path, latestException);
        }
        try (DataOutputStream dos = new DataOutputStream(fdos)) {
            dos.writeInt(SavepointStore.MAGIC_NUMBER);
            dos.writeInt(savepoint.getVersion());
            SavepointV0Serializer.INSTANCE.serializeOld(savepoint, dos);
        }
        ClassLoader cl = Thread.currentThread().getContextClassLoader();
        Savepoint sp = SavepointStore.loadSavepoint(path.toString(), cl);
        int t = 0;
        for (TaskState taskState : sp.getTaskStates()) {
            for (int p = 0; p < taskState.getParallelism(); ++p) {
                SubtaskState subtaskState = taskState.getState(p);
                ChainedStateHandle<StreamStateHandle> legacyOperatorState = subtaskState.getLegacyOperatorState();
                for (int c = 0; c < legacyOperatorState.getLength(); ++c) {
                    StreamStateHandle stateHandle = legacyOperatorState.get(c);
                    try (InputStream is = stateHandle.openInputStream()) {
                        Tuple4<Integer, Integer, Integer, Integer> expTestState = new Tuple4<>(0, t, p, c);
                        Tuple4<Integer, Integer, Integer, Integer> actTestState;
                        //check function state
                        if (p % 4 != 0) {
                            assertEquals(1, is.read());
                            actTestState = InstantiationUtil.deserializeObject(is, cl);
                            assertEquals(expTestState, actTestState);
                        } else {
                            assertEquals(0, is.read());
                        }
                        //check operator state
                        expTestState.f0 = 1;
                        actTestState = InstantiationUtil.deserializeObject(is, cl);
                        assertEquals(expTestState, actTestState);
                    }
                }
                //check keyed state
                KeyGroupsStateHandle keyGroupsStateHandle = subtaskState.getManagedKeyedState();
                if (t % 3 != 0) {
                    assertEquals(1, keyGroupsStateHandle.getNumberOfKeyGroups());
                    assertEquals(p, keyGroupsStateHandle.getGroupRangeOffsets().getKeyGroupRange().getStartKeyGroup());
                    ByteStreamStateHandle stateHandle = (ByteStreamStateHandle) keyGroupsStateHandle.getDelegateStateHandle();
                    HashMap<String, KvStateSnapshot<?, ?, ?, ?>> testKeyedState = MigrationInstantiationUtil.deserializeObject(stateHandle.getData(), cl);
                    assertEquals(2, testKeyedState.size());
                    for (KvStateSnapshot<?, ?, ?, ?> snapshot : testKeyedState.values()) {
                        MemValueState.Snapshot<?, ?, ?> castedSnapshot = (MemValueState.Snapshot<?, ?, ?>) snapshot;
                        byte[] data = castedSnapshot.getData();
                        assertEquals(t, data[0]);
                        assertEquals(p, data[1]);
                    }
                } else {
                    assertEquals(null, keyGroupsStateHandle);
                }
            }
            ++t;
        }
        savepoint.dispose();
    } finally {
        // Dispose
        SavepointStore.removeSavepointFile(path.toString());
    }
}
Also used : FSDataOutputStream(org.apache.flink.core.fs.FSDataOutputStream) DataOutputStream(java.io.DataOutputStream) ByteStreamStateHandle(org.apache.flink.runtime.state.memory.ByteStreamStateHandle) KeyGroupsStateHandle(org.apache.flink.runtime.state.KeyGroupsStateHandle) StreamStateHandle(org.apache.flink.runtime.state.StreamStateHandle) ByteStreamStateHandle(org.apache.flink.runtime.state.memory.ByteStreamStateHandle) SavepointV0(org.apache.flink.migration.runtime.checkpoint.savepoint.SavepointV0) FileSystem(org.apache.flink.core.fs.FileSystem) FSDataOutputStream(org.apache.flink.core.fs.FSDataOutputStream) Path(org.apache.flink.core.fs.Path) InputStream(java.io.InputStream) MemValueState(org.apache.flink.migration.runtime.state.memory.MemValueState) IOException(java.io.IOException) KvStateSnapshot(org.apache.flink.migration.runtime.state.KvStateSnapshot) IOException(java.io.IOException) Tuple4(org.apache.flink.api.java.tuple.Tuple4) KvStateSnapshot(org.apache.flink.migration.runtime.state.KvStateSnapshot) SubtaskState(org.apache.flink.runtime.checkpoint.SubtaskState) StreamTaskState(org.apache.flink.migration.streaming.runtime.tasks.StreamTaskState) TaskState(org.apache.flink.runtime.checkpoint.TaskState) Test(org.junit.Test)

Aggregations

KeyGroupsStateHandle (org.apache.flink.runtime.state.KeyGroupsStateHandle)35 OperatorStateHandle (org.apache.flink.runtime.state.OperatorStateHandle)20 StreamStateHandle (org.apache.flink.runtime.state.StreamStateHandle)17 ArrayList (java.util.ArrayList)14 Test (org.junit.Test)14 JobID (org.apache.flink.api.common.JobID)11 KeyGroupRange (org.apache.flink.runtime.state.KeyGroupRange)11 HashMap (java.util.HashMap)10 ByteStreamStateHandle (org.apache.flink.runtime.state.memory.ByteStreamStateHandle)10 JobVertexID (org.apache.flink.runtime.jobgraph.JobVertexID)7 AcknowledgeCheckpoint (org.apache.flink.runtime.messages.checkpoint.AcknowledgeCheckpoint)7 CheckpointStreamFactory (org.apache.flink.runtime.state.CheckpointStreamFactory)7 DeclineCheckpoint (org.apache.flink.runtime.messages.checkpoint.DeclineCheckpoint)6 IOException (java.io.IOException)5 ExecutionJobVertex (org.apache.flink.runtime.executiongraph.ExecutionJobVertex)5 ExecutionVertex (org.apache.flink.runtime.executiongraph.ExecutionVertex)5 ChainedStateHandle (org.apache.flink.runtime.state.ChainedStateHandle)5 KeyGroupRangeOffsets (org.apache.flink.runtime.state.KeyGroupRangeOffsets)5 TaskStateHandles (org.apache.flink.runtime.state.TaskStateHandles)5 Collection (java.util.Collection)4