Search in sources :

Example 1 with ChainedStateHandle

use of org.apache.flink.runtime.state.ChainedStateHandle in project flink by apache.

the class SavepointV1Test method createTaskStates.

static Collection<TaskState> createTaskStates(int numTaskStates, int numSubtasksPerTask) throws IOException {
    Random random = new Random(numTaskStates * 31 + numSubtasksPerTask);
    List<TaskState> taskStates = new ArrayList<>(numTaskStates);
    for (int stateIdx = 0; stateIdx < numTaskStates; ++stateIdx) {
        int chainLength = 1 + random.nextInt(8);
        TaskState taskState = new TaskState(new JobVertexID(), numSubtasksPerTask, 128, chainLength);
        int noNonPartitionableStateAtIndex = random.nextInt(chainLength);
        int noOperatorStateBackendAtIndex = random.nextInt(chainLength);
        int noOperatorStateStreamAtIndex = random.nextInt(chainLength);
        boolean hasKeyedBackend = random.nextInt(4) != 0;
        boolean hasKeyedStream = random.nextInt(4) != 0;
        for (int subtaskIdx = 0; subtaskIdx < numSubtasksPerTask; subtaskIdx++) {
            List<StreamStateHandle> nonPartitionableStates = new ArrayList<>(chainLength);
            List<OperatorStateHandle> operatorStatesBackend = new ArrayList<>(chainLength);
            List<OperatorStateHandle> operatorStatesStream = new ArrayList<>(chainLength);
            for (int chainIdx = 0; chainIdx < chainLength; ++chainIdx) {
                StreamStateHandle nonPartitionableState = new TestByteStreamStateHandleDeepCompare("a-" + chainIdx, ("Hi-" + chainIdx).getBytes(ConfigConstants.DEFAULT_CHARSET));
                StreamStateHandle operatorStateBackend = new TestByteStreamStateHandleDeepCompare("b-" + chainIdx, ("Beautiful-" + chainIdx).getBytes(ConfigConstants.DEFAULT_CHARSET));
                StreamStateHandle operatorStateStream = new TestByteStreamStateHandleDeepCompare("b-" + chainIdx, ("Beautiful-" + chainIdx).getBytes(ConfigConstants.DEFAULT_CHARSET));
                Map<String, OperatorStateHandle.StateMetaInfo> offsetsMap = new HashMap<>();
                offsetsMap.put("A", new OperatorStateHandle.StateMetaInfo(new long[] { 0, 10, 20 }, OperatorStateHandle.Mode.SPLIT_DISTRIBUTE));
                offsetsMap.put("B", new OperatorStateHandle.StateMetaInfo(new long[] { 30, 40, 50 }, OperatorStateHandle.Mode.SPLIT_DISTRIBUTE));
                offsetsMap.put("C", new OperatorStateHandle.StateMetaInfo(new long[] { 60, 70, 80 }, OperatorStateHandle.Mode.BROADCAST));
                if (chainIdx != noNonPartitionableStateAtIndex) {
                    nonPartitionableStates.add(nonPartitionableState);
                }
                if (chainIdx != noOperatorStateBackendAtIndex) {
                    OperatorStateHandle operatorStateHandleBackend = new OperatorStateHandle(offsetsMap, operatorStateBackend);
                    operatorStatesBackend.add(operatorStateHandleBackend);
                }
                if (chainIdx != noOperatorStateStreamAtIndex) {
                    OperatorStateHandle operatorStateHandleStream = new OperatorStateHandle(offsetsMap, operatorStateStream);
                    operatorStatesStream.add(operatorStateHandleStream);
                }
            }
            KeyGroupsStateHandle keyedStateBackend = null;
            KeyGroupsStateHandle keyedStateStream = null;
            if (hasKeyedBackend) {
                keyedStateBackend = new KeyGroupsStateHandle(new KeyGroupRangeOffsets(1, 1, new long[] { 42 }), new TestByteStreamStateHandleDeepCompare("c", "Hello".getBytes(ConfigConstants.DEFAULT_CHARSET)));
            }
            if (hasKeyedStream) {
                keyedStateStream = new KeyGroupsStateHandle(new KeyGroupRangeOffsets(1, 1, new long[] { 23 }), new TestByteStreamStateHandleDeepCompare("d", "World".getBytes(ConfigConstants.DEFAULT_CHARSET)));
            }
            taskState.putState(subtaskIdx, new SubtaskState(new ChainedStateHandle<>(nonPartitionableStates), new ChainedStateHandle<>(operatorStatesBackend), new ChainedStateHandle<>(operatorStatesStream), keyedStateStream, keyedStateBackend));
        }
        taskStates.add(taskState);
    }
    return taskStates;
}
Also used : HashMap(java.util.HashMap) KeyGroupRangeOffsets(org.apache.flink.runtime.state.KeyGroupRangeOffsets) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) TestByteStreamStateHandleDeepCompare(org.apache.flink.runtime.util.TestByteStreamStateHandleDeepCompare) ArrayList(java.util.ArrayList) KeyGroupsStateHandle(org.apache.flink.runtime.state.KeyGroupsStateHandle) ChainedStateHandle(org.apache.flink.runtime.state.ChainedStateHandle) StreamStateHandle(org.apache.flink.runtime.state.StreamStateHandle) Random(java.util.Random) ThreadLocalRandom(java.util.concurrent.ThreadLocalRandom) SubtaskState(org.apache.flink.runtime.checkpoint.SubtaskState) OperatorStateHandle(org.apache.flink.runtime.state.OperatorStateHandle) TaskState(org.apache.flink.runtime.checkpoint.TaskState)

Example 2 with ChainedStateHandle

use of org.apache.flink.runtime.state.ChainedStateHandle in project flink by apache.

the class SavepointV0Serializer method convertSubtaskState.

private org.apache.flink.runtime.checkpoint.SubtaskState convertSubtaskState(SubtaskState subtaskState, int parallelInstanceIdx, ClassLoader userClassLoader, long checkpointID) throws Exception {
    SerializedValue<StateHandle<?>> serializedValue = subtaskState.getState();
    StreamTaskStateList stateList = (StreamTaskStateList) serializedValue.deserializeValue(userClassLoader);
    StreamTaskState[] streamTaskStates = stateList.getState(userClassLoader);
    List<StreamStateHandle> newChainStateList = Arrays.asList(new StreamStateHandle[streamTaskStates.length]);
    KeyGroupsStateHandle newKeyedState = null;
    for (int chainIdx = 0; chainIdx < streamTaskStates.length; ++chainIdx) {
        StreamTaskState streamTaskState = streamTaskStates[chainIdx];
        if (streamTaskState == null) {
            continue;
        }
        newChainStateList.set(chainIdx, convertOperatorAndFunctionState(streamTaskState));
        HashMap<String, KvStateSnapshot<?, ?, ?, ?>> oldKeyedState = streamTaskState.getKvStates();
        if (null != oldKeyedState) {
            Preconditions.checkState(null == newKeyedState, "Found more than one keyed state in chain");
            newKeyedState = convertKeyedBackendState(oldKeyedState, parallelInstanceIdx, checkpointID);
        }
    }
    ChainedStateHandle<StreamStateHandle> newChainedState = new ChainedStateHandle<>(newChainStateList);
    ChainedStateHandle<OperatorStateHandle> nopChain = new ChainedStateHandle<>(Arrays.asList(new OperatorStateHandle[newChainedState.getLength()]));
    return new org.apache.flink.runtime.checkpoint.SubtaskState(newChainedState, nopChain, nopChain, newKeyedState, null);
}
Also used : KvStateSnapshot(org.apache.flink.migration.runtime.state.KvStateSnapshot) KeyGroupsStateHandle(org.apache.flink.runtime.state.KeyGroupsStateHandle) ChainedStateHandle(org.apache.flink.runtime.state.ChainedStateHandle) MigrationStreamStateHandle(org.apache.flink.migration.state.MigrationStreamStateHandle) StreamStateHandle(org.apache.flink.runtime.state.StreamStateHandle) ByteStreamStateHandle(org.apache.flink.runtime.state.memory.ByteStreamStateHandle) MultiStreamStateHandle(org.apache.flink.runtime.state.MultiStreamStateHandle) SubtaskState(org.apache.flink.migration.runtime.checkpoint.SubtaskState) StreamTaskStateList(org.apache.flink.migration.streaming.runtime.tasks.StreamTaskStateList) MigrationKeyGroupStateHandle(org.apache.flink.migration.state.MigrationKeyGroupStateHandle) KeyGroupsStateHandle(org.apache.flink.runtime.state.KeyGroupsStateHandle) ChainedStateHandle(org.apache.flink.runtime.state.ChainedStateHandle) FileStateHandle(org.apache.flink.runtime.state.filesystem.FileStateHandle) AbstractFileStateHandle(org.apache.flink.migration.runtime.state.filesystem.AbstractFileStateHandle) OperatorStateHandle(org.apache.flink.runtime.state.OperatorStateHandle) MigrationStreamStateHandle(org.apache.flink.migration.state.MigrationStreamStateHandle) SerializedStateHandle(org.apache.flink.migration.runtime.state.memory.SerializedStateHandle) StreamStateHandle(org.apache.flink.runtime.state.StreamStateHandle) ByteStreamStateHandle(org.apache.flink.runtime.state.memory.ByteStreamStateHandle) StateHandle(org.apache.flink.migration.runtime.state.StateHandle) MultiStreamStateHandle(org.apache.flink.runtime.state.MultiStreamStateHandle) OperatorStateHandle(org.apache.flink.runtime.state.OperatorStateHandle) StreamTaskState(org.apache.flink.migration.streaming.runtime.tasks.StreamTaskState)

Example 3 with ChainedStateHandle

use of org.apache.flink.runtime.state.ChainedStateHandle in project flink by apache.

the class CheckpointCoordinatorTest method testRestoreLatestCheckpointedStateWithChangingParallelism.

/**
	 * Tests the checkpoint restoration with changing parallelism of job vertex with partitioned
	 * state.
	 *
	 * @throws Exception
	 */
private void testRestoreLatestCheckpointedStateWithChangingParallelism(boolean scaleOut) throws Exception {
    final JobID jid = new JobID();
    final long timestamp = System.currentTimeMillis();
    final JobVertexID jobVertexID1 = new JobVertexID();
    final JobVertexID jobVertexID2 = new JobVertexID();
    int parallelism1 = 3;
    int parallelism2 = scaleOut ? 2 : 13;
    int maxParallelism1 = 42;
    int maxParallelism2 = 13;
    int newParallelism2 = scaleOut ? 13 : 2;
    final ExecutionJobVertex jobVertex1 = mockExecutionJobVertex(jobVertexID1, parallelism1, maxParallelism1);
    final ExecutionJobVertex jobVertex2 = mockExecutionJobVertex(jobVertexID2, parallelism2, maxParallelism2);
    List<ExecutionVertex> allExecutionVertices = new ArrayList<>(parallelism1 + parallelism2);
    allExecutionVertices.addAll(Arrays.asList(jobVertex1.getTaskVertices()));
    allExecutionVertices.addAll(Arrays.asList(jobVertex2.getTaskVertices()));
    ExecutionVertex[] arrayExecutionVertices = allExecutionVertices.toArray(new ExecutionVertex[allExecutionVertices.size()]);
    // set up the coordinator and validate the initial state
    CheckpointCoordinator coord = new CheckpointCoordinator(jid, 600000, 600000, 0, Integer.MAX_VALUE, ExternalizedCheckpointSettings.none(), arrayExecutionVertices, arrayExecutionVertices, arrayExecutionVertices, new StandaloneCheckpointIDCounter(), new StandaloneCompletedCheckpointStore(1), null, Executors.directExecutor());
    // trigger the checkpoint
    coord.triggerCheckpoint(timestamp, false);
    assertTrue(coord.getPendingCheckpoints().keySet().size() == 1);
    long checkpointId = Iterables.getOnlyElement(coord.getPendingCheckpoints().keySet());
    CheckpointMetaData checkpointMetaData = new CheckpointMetaData(checkpointId, 0L);
    List<KeyGroupRange> keyGroupPartitions1 = StateAssignmentOperation.createKeyGroupPartitions(maxParallelism1, parallelism1);
    List<KeyGroupRange> keyGroupPartitions2 = StateAssignmentOperation.createKeyGroupPartitions(maxParallelism2, parallelism2);
    //vertex 1
    for (int index = 0; index < jobVertex1.getParallelism(); index++) {
        ChainedStateHandle<StreamStateHandle> valueSizeTuple = generateStateForVertex(jobVertexID1, index);
        ChainedStateHandle<OperatorStateHandle> opStateBackend = generateChainedPartitionableStateHandle(jobVertexID1, index, 2, 8, false);
        KeyGroupsStateHandle keyedStateBackend = generateKeyGroupState(jobVertexID1, keyGroupPartitions1.get(index), false);
        KeyGroupsStateHandle keyedStateRaw = generateKeyGroupState(jobVertexID1, keyGroupPartitions1.get(index), true);
        SubtaskState checkpointStateHandles = new SubtaskState(valueSizeTuple, opStateBackend, null, keyedStateBackend, keyedStateRaw);
        AcknowledgeCheckpoint acknowledgeCheckpoint = new AcknowledgeCheckpoint(jid, jobVertex1.getTaskVertices()[index].getCurrentExecutionAttempt().getAttemptId(), checkpointId, new CheckpointMetrics(), checkpointStateHandles);
        coord.receiveAcknowledgeMessage(acknowledgeCheckpoint);
    }
    //vertex 2
    final List<ChainedStateHandle<OperatorStateHandle>> expectedOpStatesBackend = new ArrayList<>(jobVertex2.getParallelism());
    final List<ChainedStateHandle<OperatorStateHandle>> expectedOpStatesRaw = new ArrayList<>(jobVertex2.getParallelism());
    for (int index = 0; index < jobVertex2.getParallelism(); index++) {
        KeyGroupsStateHandle keyedStateBackend = generateKeyGroupState(jobVertexID2, keyGroupPartitions2.get(index), false);
        KeyGroupsStateHandle keyedStateRaw = generateKeyGroupState(jobVertexID2, keyGroupPartitions2.get(index), true);
        ChainedStateHandle<OperatorStateHandle> opStateBackend = generateChainedPartitionableStateHandle(jobVertexID2, index, 2, 8, false);
        ChainedStateHandle<OperatorStateHandle> opStateRaw = generateChainedPartitionableStateHandle(jobVertexID2, index, 2, 8, true);
        expectedOpStatesBackend.add(opStateBackend);
        expectedOpStatesRaw.add(opStateRaw);
        SubtaskState checkpointStateHandles = new SubtaskState(new ChainedStateHandle<>(Collections.<StreamStateHandle>singletonList(null)), opStateBackend, opStateRaw, keyedStateBackend, keyedStateRaw);
        AcknowledgeCheckpoint acknowledgeCheckpoint = new AcknowledgeCheckpoint(jid, jobVertex2.getTaskVertices()[index].getCurrentExecutionAttempt().getAttemptId(), checkpointId, new CheckpointMetrics(), checkpointStateHandles);
        coord.receiveAcknowledgeMessage(acknowledgeCheckpoint);
    }
    List<CompletedCheckpoint> completedCheckpoints = coord.getSuccessfulCheckpoints();
    assertEquals(1, completedCheckpoints.size());
    Map<JobVertexID, ExecutionJobVertex> tasks = new HashMap<>();
    List<KeyGroupRange> newKeyGroupPartitions2 = StateAssignmentOperation.createKeyGroupPartitions(maxParallelism2, newParallelism2);
    final ExecutionJobVertex newJobVertex1 = mockExecutionJobVertex(jobVertexID1, parallelism1, maxParallelism1);
    // rescale vertex 2
    final ExecutionJobVertex newJobVertex2 = mockExecutionJobVertex(jobVertexID2, newParallelism2, maxParallelism2);
    tasks.put(jobVertexID1, newJobVertex1);
    tasks.put(jobVertexID2, newJobVertex2);
    coord.restoreLatestCheckpointedState(tasks, true, false);
    // verify the restored state
    verifyStateRestore(jobVertexID1, newJobVertex1, keyGroupPartitions1);
    List<List<Collection<OperatorStateHandle>>> actualOpStatesBackend = new ArrayList<>(newJobVertex2.getParallelism());
    List<List<Collection<OperatorStateHandle>>> actualOpStatesRaw = new ArrayList<>(newJobVertex2.getParallelism());
    for (int i = 0; i < newJobVertex2.getParallelism(); i++) {
        KeyGroupsStateHandle originalKeyedStateBackend = generateKeyGroupState(jobVertexID2, newKeyGroupPartitions2.get(i), false);
        KeyGroupsStateHandle originalKeyedStateRaw = generateKeyGroupState(jobVertexID2, newKeyGroupPartitions2.get(i), true);
        TaskStateHandles taskStateHandles = newJobVertex2.getTaskVertices()[i].getCurrentExecutionAttempt().getTaskStateHandles();
        ChainedStateHandle<StreamStateHandle> operatorState = taskStateHandles.getLegacyOperatorState();
        List<Collection<OperatorStateHandle>> opStateBackend = taskStateHandles.getManagedOperatorState();
        List<Collection<OperatorStateHandle>> opStateRaw = taskStateHandles.getRawOperatorState();
        Collection<KeyGroupsStateHandle> keyGroupStateBackend = taskStateHandles.getManagedKeyedState();
        Collection<KeyGroupsStateHandle> keyGroupStateRaw = taskStateHandles.getRawKeyedState();
        actualOpStatesBackend.add(opStateBackend);
        actualOpStatesRaw.add(opStateRaw);
        assertNull(operatorState);
        compareKeyedState(Collections.singletonList(originalKeyedStateBackend), keyGroupStateBackend);
        compareKeyedState(Collections.singletonList(originalKeyedStateRaw), keyGroupStateRaw);
    }
    comparePartitionableState(expectedOpStatesBackend, actualOpStatesBackend);
    comparePartitionableState(expectedOpStatesRaw, actualOpStatesRaw);
}
Also used : HashMap(java.util.HashMap) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) ArrayList(java.util.ArrayList) KeyGroupRange(org.apache.flink.runtime.state.KeyGroupRange) ExecutionVertex(org.apache.flink.runtime.executiongraph.ExecutionVertex) KeyGroupsStateHandle(org.apache.flink.runtime.state.KeyGroupsStateHandle) ChainedStateHandle(org.apache.flink.runtime.state.ChainedStateHandle) StreamStateHandle(org.apache.flink.runtime.state.StreamStateHandle) ByteStreamStateHandle(org.apache.flink.runtime.state.memory.ByteStreamStateHandle) ExecutionJobVertex(org.apache.flink.runtime.executiongraph.ExecutionJobVertex) List(java.util.List) ArrayList(java.util.ArrayList) AcknowledgeCheckpoint(org.apache.flink.runtime.messages.checkpoint.AcknowledgeCheckpoint) DeclineCheckpoint(org.apache.flink.runtime.messages.checkpoint.DeclineCheckpoint) TaskStateHandles(org.apache.flink.runtime.state.TaskStateHandles) AcknowledgeCheckpoint(org.apache.flink.runtime.messages.checkpoint.AcknowledgeCheckpoint) Collection(java.util.Collection) OperatorStateHandle(org.apache.flink.runtime.state.OperatorStateHandle) JobID(org.apache.flink.api.common.JobID)

Example 4 with ChainedStateHandle

use of org.apache.flink.runtime.state.ChainedStateHandle in project flink by apache.

the class SavepointV1Serializer method deserializeSubtaskState.

private static SubtaskState deserializeSubtaskState(DataInputStream dis) throws IOException {
    // Duration field has been removed from SubtaskState
    long ignoredDuration = dis.readLong();
    int len = dis.readInt();
    List<StreamStateHandle> nonPartitionableState = new ArrayList<>(len);
    for (int i = 0; i < len; ++i) {
        StreamStateHandle streamStateHandle = deserializeStreamStateHandle(dis);
        nonPartitionableState.add(streamStateHandle);
    }
    len = dis.readInt();
    List<OperatorStateHandle> operatorStateBackend = new ArrayList<>(len);
    for (int i = 0; i < len; ++i) {
        OperatorStateHandle streamStateHandle = deserializeOperatorStateHandle(dis);
        operatorStateBackend.add(streamStateHandle);
    }
    len = dis.readInt();
    List<OperatorStateHandle> operatorStateStream = new ArrayList<>(len);
    for (int i = 0; i < len; ++i) {
        OperatorStateHandle streamStateHandle = deserializeOperatorStateHandle(dis);
        operatorStateStream.add(streamStateHandle);
    }
    KeyGroupsStateHandle keyedStateBackend = deserializeKeyGroupStateHandle(dis);
    KeyGroupsStateHandle keyedStateStream = deserializeKeyGroupStateHandle(dis);
    ChainedStateHandle<StreamStateHandle> nonPartitionableStateChain = new ChainedStateHandle<>(nonPartitionableState);
    ChainedStateHandle<OperatorStateHandle> operatorStateBackendChain = new ChainedStateHandle<>(operatorStateBackend);
    ChainedStateHandle<OperatorStateHandle> operatorStateStreamChain = new ChainedStateHandle<>(operatorStateStream);
    return new SubtaskState(nonPartitionableStateChain, operatorStateBackendChain, operatorStateStreamChain, keyedStateBackend, keyedStateStream);
}
Also used : StreamStateHandle(org.apache.flink.runtime.state.StreamStateHandle) ByteStreamStateHandle(org.apache.flink.runtime.state.memory.ByteStreamStateHandle) SubtaskState(org.apache.flink.runtime.checkpoint.SubtaskState) ArrayList(java.util.ArrayList) OperatorStateHandle(org.apache.flink.runtime.state.OperatorStateHandle) KeyGroupsStateHandle(org.apache.flink.runtime.state.KeyGroupsStateHandle) ChainedStateHandle(org.apache.flink.runtime.state.ChainedStateHandle)

Example 5 with ChainedStateHandle

use of org.apache.flink.runtime.state.ChainedStateHandle in project flink by apache.

the class CheckpointCoordinatorRestoringTest method testStateRecoveryWithTopologyChange.

/**
 * old topology. [operator1,operator2] * parallelism1 -> [operator3,operator4] * parallelism2
 *
 * <p>new topology
 *
 * <p>[operator5,operator1,operator3] * newParallelism1 -> [operator3, operator6] *
 * newParallelism2
 */
public void testStateRecoveryWithTopologyChange(TestScaleType scaleType) throws Exception {
    /*
         * Old topology
         * CHAIN(op1 -> op2) * parallelism1 -> CHAIN(op3 -> op4) * parallelism2
         */
    Tuple2<JobVertexID, OperatorID> id1 = generateIDPair();
    Tuple2<JobVertexID, OperatorID> id2 = generateIDPair();
    int parallelism1 = 10;
    int maxParallelism1 = 64;
    Tuple2<JobVertexID, OperatorID> id3 = generateIDPair();
    Tuple2<JobVertexID, OperatorID> id4 = generateIDPair();
    int parallelism2 = 10;
    int maxParallelism2 = 64;
    List<KeyGroupRange> keyGroupPartitions2 = StateAssignmentOperation.createKeyGroupPartitions(maxParallelism2, parallelism2);
    Map<OperatorID, OperatorState> operatorStates = new HashMap<>();
    // prepare vertex1 state
    for (Tuple2<JobVertexID, OperatorID> id : Arrays.asList(id1, id2)) {
        OperatorState taskState = new OperatorState(id.f1, parallelism1, maxParallelism1);
        operatorStates.put(id.f1, taskState);
        for (int index = 0; index < taskState.getParallelism(); index++) {
            OperatorSubtaskState subtaskState = OperatorSubtaskState.builder().setManagedOperatorState(generatePartitionableStateHandle(id.f0, index, 2, 8, false)).setRawOperatorState(generatePartitionableStateHandle(id.f0, index, 2, 8, true)).build();
            taskState.putState(index, subtaskState);
        }
    }
    List<List<ChainedStateHandle<OperatorStateHandle>>> expectedManagedOperatorStates = new ArrayList<>();
    List<List<ChainedStateHandle<OperatorStateHandle>>> expectedRawOperatorStates = new ArrayList<>();
    // prepare vertex2 state
    for (Tuple2<JobVertexID, OperatorID> id : Arrays.asList(id3, id4)) {
        OperatorState operatorState = new OperatorState(id.f1, parallelism2, maxParallelism2);
        operatorStates.put(id.f1, operatorState);
        List<ChainedStateHandle<OperatorStateHandle>> expectedManagedOperatorState = new ArrayList<>();
        List<ChainedStateHandle<OperatorStateHandle>> expectedRawOperatorState = new ArrayList<>();
        expectedManagedOperatorStates.add(expectedManagedOperatorState);
        expectedRawOperatorStates.add(expectedRawOperatorState);
        for (int index = 0; index < operatorState.getParallelism(); index++) {
            final OperatorSubtaskState.Builder stateBuilder = OperatorSubtaskState.builder();
            OperatorStateHandle subManagedOperatorState = generateChainedPartitionableStateHandle(id.f0, index, 2, 8, false).get(0);
            OperatorStateHandle subRawOperatorState = generateChainedPartitionableStateHandle(id.f0, index, 2, 8, true).get(0);
            if (id.f0.equals(id3.f0)) {
                stateBuilder.setManagedKeyedState(generateKeyGroupState(id.f0, keyGroupPartitions2.get(index), false));
            }
            if (id.f0.equals(id3.f0)) {
                stateBuilder.setRawKeyedState(generateKeyGroupState(id.f0, keyGroupPartitions2.get(index), true));
            }
            expectedManagedOperatorState.add(ChainedStateHandle.wrapSingleHandle(subManagedOperatorState));
            expectedRawOperatorState.add(ChainedStateHandle.wrapSingleHandle(subRawOperatorState));
            OperatorSubtaskState subtaskState = stateBuilder.setManagedOperatorState(subManagedOperatorState).setRawOperatorState(subRawOperatorState).build();
            operatorState.putState(index, subtaskState);
        }
    }
    /*
         * New topology
         * CHAIN(op5 -> op1 -> op2) * newParallelism1 -> CHAIN(op3 -> op6) * newParallelism2
         */
    Tuple2<JobVertexID, OperatorID> id5 = generateIDPair();
    int newParallelism1 = 10;
    Tuple2<JobVertexID, OperatorID> id6 = generateIDPair();
    int newParallelism2 = parallelism2;
    if (scaleType == TestScaleType.INCREASE_PARALLELISM) {
        newParallelism2 = 20;
    } else if (scaleType == TestScaleType.DECREASE_PARALLELISM) {
        newParallelism2 = 8;
    }
    List<KeyGroupRange> newKeyGroupPartitions2 = StateAssignmentOperation.createKeyGroupPartitions(maxParallelism2, newParallelism2);
    ExecutionGraph newGraph = new CheckpointCoordinatorTestingUtils.CheckpointExecutionGraphBuilder().addJobVertex(id5.f0, newParallelism1, maxParallelism1, Stream.of(id2.f1, id1.f1, id5.f1).map(OperatorIDPair::generatedIDOnly).collect(Collectors.toList()), true).addJobVertex(id3.f0, newParallelism2, maxParallelism2, Stream.of(id6.f1, id3.f1).map(OperatorIDPair::generatedIDOnly).collect(Collectors.toList()), true).build();
    ExecutionJobVertex newJobVertex1 = newGraph.getJobVertex(id5.f0);
    ExecutionJobVertex newJobVertex2 = newGraph.getJobVertex(id3.f0);
    Set<ExecutionJobVertex> tasks = new HashSet<>();
    tasks.add(newJobVertex1);
    tasks.add(newJobVertex2);
    CompletedCheckpoint completedCheckpoint = new CompletedCheckpoint(newGraph.getJobID(), 2, System.currentTimeMillis(), System.currentTimeMillis() + 3000, operatorStates, Collections.<MasterState>emptyList(), CheckpointProperties.forCheckpoint(CheckpointRetentionPolicy.NEVER_RETAIN_AFTER_TERMINATION), new TestCompletedCheckpointStorageLocation());
    // set up the coordinator and validate the initial state
    SharedStateRegistry sharedStateRegistry = SharedStateRegistry.DEFAULT_FACTORY.create(Executors.directExecutor(), emptyList());
    CheckpointCoordinator coord = new CheckpointCoordinatorBuilder().setExecutionGraph(newGraph).setCompletedCheckpointStore(storeFor(sharedStateRegistry, () -> {
    }, completedCheckpoint)).setTimer(manuallyTriggeredScheduledExecutor).build();
    coord.restoreLatestCheckpointedStateToAll(tasks, true);
    for (int i = 0; i < newJobVertex1.getParallelism(); i++) {
        final List<OperatorIDPair> operatorIDs = newJobVertex1.getOperatorIDs();
        JobManagerTaskRestore taskRestore = newJobVertex1.getTaskVertices()[i].getCurrentExecutionAttempt().getTaskRestore();
        Assert.assertEquals(2L, taskRestore.getRestoreCheckpointId());
        TaskStateSnapshot stateSnapshot = taskRestore.getTaskStateSnapshot();
        OperatorSubtaskState headOpState = stateSnapshot.getSubtaskStateByOperatorID(operatorIDs.get(operatorIDs.size() - 1).getGeneratedOperatorID());
        assertTrue(headOpState.getManagedKeyedState().isEmpty());
        assertTrue(headOpState.getRawKeyedState().isEmpty());
        // operator5
        {
            int operatorIndexInChain = 2;
            OperatorSubtaskState opState = stateSnapshot.getSubtaskStateByOperatorID(operatorIDs.get(operatorIndexInChain).getGeneratedOperatorID());
            assertTrue(opState.getManagedOperatorState().isEmpty());
            assertTrue(opState.getRawOperatorState().isEmpty());
        }
        // operator1
        {
            int operatorIndexInChain = 1;
            OperatorSubtaskState opState = stateSnapshot.getSubtaskStateByOperatorID(operatorIDs.get(operatorIndexInChain).getGeneratedOperatorID());
            OperatorStateHandle expectedManagedOpState = generatePartitionableStateHandle(id1.f0, i, 2, 8, false);
            OperatorStateHandle expectedRawOpState = generatePartitionableStateHandle(id1.f0, i, 2, 8, true);
            Collection<OperatorStateHandle> managedOperatorState = opState.getManagedOperatorState();
            assertEquals(1, managedOperatorState.size());
            assertTrue(CommonTestUtils.isStreamContentEqual(expectedManagedOpState.openInputStream(), managedOperatorState.iterator().next().openInputStream()));
            Collection<OperatorStateHandle> rawOperatorState = opState.getRawOperatorState();
            assertEquals(1, rawOperatorState.size());
            assertTrue(CommonTestUtils.isStreamContentEqual(expectedRawOpState.openInputStream(), rawOperatorState.iterator().next().openInputStream()));
        }
        // operator2
        {
            int operatorIndexInChain = 0;
            OperatorSubtaskState opState = stateSnapshot.getSubtaskStateByOperatorID(operatorIDs.get(operatorIndexInChain).getGeneratedOperatorID());
            OperatorStateHandle expectedManagedOpState = generatePartitionableStateHandle(id2.f0, i, 2, 8, false);
            OperatorStateHandle expectedRawOpState = generatePartitionableStateHandle(id2.f0, i, 2, 8, true);
            Collection<OperatorStateHandle> managedOperatorState = opState.getManagedOperatorState();
            assertEquals(1, managedOperatorState.size());
            assertTrue(CommonTestUtils.isStreamContentEqual(expectedManagedOpState.openInputStream(), managedOperatorState.iterator().next().openInputStream()));
            Collection<OperatorStateHandle> rawOperatorState = opState.getRawOperatorState();
            assertEquals(1, rawOperatorState.size());
            assertTrue(CommonTestUtils.isStreamContentEqual(expectedRawOpState.openInputStream(), rawOperatorState.iterator().next().openInputStream()));
        }
    }
    List<List<Collection<OperatorStateHandle>>> actualManagedOperatorStates = new ArrayList<>(newJobVertex2.getParallelism());
    List<List<Collection<OperatorStateHandle>>> actualRawOperatorStates = new ArrayList<>(newJobVertex2.getParallelism());
    for (int i = 0; i < newJobVertex2.getParallelism(); i++) {
        final List<OperatorIDPair> operatorIDs = newJobVertex2.getOperatorIDs();
        JobManagerTaskRestore taskRestore = newJobVertex2.getTaskVertices()[i].getCurrentExecutionAttempt().getTaskRestore();
        Assert.assertEquals(2L, taskRestore.getRestoreCheckpointId());
        TaskStateSnapshot stateSnapshot = taskRestore.getTaskStateSnapshot();
        // operator 3
        {
            int operatorIndexInChain = 1;
            OperatorSubtaskState opState = stateSnapshot.getSubtaskStateByOperatorID(operatorIDs.get(operatorIndexInChain).getGeneratedOperatorID());
            List<Collection<OperatorStateHandle>> actualSubManagedOperatorState = new ArrayList<>(1);
            actualSubManagedOperatorState.add(opState.getManagedOperatorState());
            List<Collection<OperatorStateHandle>> actualSubRawOperatorState = new ArrayList<>(1);
            actualSubRawOperatorState.add(opState.getRawOperatorState());
            actualManagedOperatorStates.add(actualSubManagedOperatorState);
            actualRawOperatorStates.add(actualSubRawOperatorState);
        }
        // operator 6
        {
            int operatorIndexInChain = 0;
            OperatorSubtaskState opState = stateSnapshot.getSubtaskStateByOperatorID(operatorIDs.get(operatorIndexInChain).getGeneratedOperatorID());
            assertTrue(opState.getManagedOperatorState().isEmpty());
            assertTrue(opState.getRawOperatorState().isEmpty());
        }
        KeyGroupsStateHandle originalKeyedStateBackend = generateKeyGroupState(id3.f0, newKeyGroupPartitions2.get(i), false);
        KeyGroupsStateHandle originalKeyedStateRaw = generateKeyGroupState(id3.f0, newKeyGroupPartitions2.get(i), true);
        OperatorSubtaskState headOpState = stateSnapshot.getSubtaskStateByOperatorID(operatorIDs.get(operatorIDs.size() - 1).getGeneratedOperatorID());
        Collection<KeyedStateHandle> keyedStateBackend = headOpState.getManagedKeyedState();
        Collection<KeyedStateHandle> keyGroupStateRaw = headOpState.getRawKeyedState();
        compareKeyedState(singletonList(originalKeyedStateBackend), keyedStateBackend);
        compareKeyedState(singletonList(originalKeyedStateRaw), keyGroupStateRaw);
    }
    comparePartitionableState(expectedManagedOperatorStates.get(0), actualManagedOperatorStates);
    comparePartitionableState(expectedRawOperatorStates.get(0), actualRawOperatorStates);
}
Also used : HashMap(java.util.HashMap) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) KeyGroupRange(org.apache.flink.runtime.state.KeyGroupRange) ArrayList(java.util.ArrayList) TestCompletedCheckpointStorageLocation(org.apache.flink.runtime.state.testutils.TestCompletedCheckpointStorageLocation) OperatorID(org.apache.flink.runtime.jobgraph.OperatorID) KeyedStateHandle(org.apache.flink.runtime.state.KeyedStateHandle) ChainedStateHandle(org.apache.flink.runtime.state.ChainedStateHandle) KeyGroupsStateHandle(org.apache.flink.runtime.state.KeyGroupsStateHandle) SharedStateRegistry(org.apache.flink.runtime.state.SharedStateRegistry) ExecutionJobVertex(org.apache.flink.runtime.executiongraph.ExecutionJobVertex) Collections.singletonList(java.util.Collections.singletonList) Collections.emptyList(java.util.Collections.emptyList) List(java.util.List) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) AcknowledgeCheckpoint(org.apache.flink.runtime.messages.checkpoint.AcknowledgeCheckpoint) CheckpointCoordinatorBuilder(org.apache.flink.runtime.checkpoint.CheckpointCoordinatorTestingUtils.CheckpointCoordinatorBuilder) ExecutionGraph(org.apache.flink.runtime.executiongraph.ExecutionGraph) Collection(java.util.Collection) OperatorStateHandle(org.apache.flink.runtime.state.OperatorStateHandle) OperatorIDPair(org.apache.flink.runtime.OperatorIDPair)

Aggregations

ChainedStateHandle (org.apache.flink.runtime.state.ChainedStateHandle)6 KeyGroupsStateHandle (org.apache.flink.runtime.state.KeyGroupsStateHandle)6 OperatorStateHandle (org.apache.flink.runtime.state.OperatorStateHandle)6 ArrayList (java.util.ArrayList)5 JobVertexID (org.apache.flink.runtime.jobgraph.JobVertexID)4 StreamStateHandle (org.apache.flink.runtime.state.StreamStateHandle)4 Collection (java.util.Collection)3 HashMap (java.util.HashMap)3 List (java.util.List)3 ExecutionJobVertex (org.apache.flink.runtime.executiongraph.ExecutionJobVertex)3 AcknowledgeCheckpoint (org.apache.flink.runtime.messages.checkpoint.AcknowledgeCheckpoint)3 KeyGroupRange (org.apache.flink.runtime.state.KeyGroupRange)3 ByteStreamStateHandle (org.apache.flink.runtime.state.memory.ByteStreamStateHandle)3 Collections.emptyList (java.util.Collections.emptyList)2 Collections.singletonList (java.util.Collections.singletonList)2 HashSet (java.util.HashSet)2 Random (java.util.Random)2 OperatorIDPair (org.apache.flink.runtime.OperatorIDPair)2 CheckpointCoordinatorBuilder (org.apache.flink.runtime.checkpoint.CheckpointCoordinatorTestingUtils.CheckpointCoordinatorBuilder)2 SubtaskState (org.apache.flink.runtime.checkpoint.SubtaskState)2