Search in sources :

Example 11 with KeyGroupRange

use of org.apache.flink.runtime.state.KeyGroupRange in project flink by apache.

the class AbstractMigrationRestoreStrategy method deserialize.

@Override
public StateTable<K, N, S> deserialize(String stateName, HeapKeyedStateBackend<K> stateBackend) throws IOException {
    Preconditions.checkNotNull(stateName, "State name is null. Cannot deserialize snapshot.");
    Preconditions.checkNotNull(stateBackend, "State backend is null. Cannot deserialize snapshot.");
    final KeyGroupRange keyGroupRange = stateBackend.getKeyGroupRange();
    Preconditions.checkState(1 == keyGroupRange.getNumberOfKeyGroups(), "Unexpected number of key-groups for restoring from Flink 1.1");
    TypeSerializer<N> patchedNamespaceSerializer = this.namespaceSerializer;
    if (patchedNamespaceSerializer instanceof VoidSerializer) {
        patchedNamespaceSerializer = (TypeSerializer<N>) VoidNamespaceSerializer.INSTANCE;
    }
    RegisteredBackendStateMetaInfo<N, S> registeredBackendStateMetaInfo = new RegisteredBackendStateMetaInfo<>(StateDescriptor.Type.UNKNOWN, stateName, patchedNamespaceSerializer, stateSerializer);
    final StateTable<K, N, S> stateTable = stateBackend.newStateTable(registeredBackendStateMetaInfo);
    final DataInputView inView = openDataInputView();
    final int keyGroup = keyGroupRange.getStartKeyGroup();
    final int numNamespaces = inView.readInt();
    for (int i = 0; i < numNamespaces; i++) {
        N namespace = namespaceSerializer.deserialize(inView);
        if (null == namespace) {
            namespace = (N) VoidNamespace.INSTANCE;
        }
        final int numKV = inView.readInt();
        for (int j = 0; j < numKV; j++) {
            K key = keySerializer.deserialize(inView);
            S value = stateSerializer.deserialize(inView);
            stateTable.put(key, keyGroup, namespace, value);
        }
    }
    return stateTable;
}
Also used : DataInputView(org.apache.flink.core.memory.DataInputView) VoidSerializer(org.apache.flink.api.common.typeutils.base.VoidSerializer) RegisteredBackendStateMetaInfo(org.apache.flink.runtime.state.RegisteredBackendStateMetaInfo) KeyGroupRange(org.apache.flink.runtime.state.KeyGroupRange)

Example 12 with KeyGroupRange

use of org.apache.flink.runtime.state.KeyGroupRange in project flink by apache.

the class CheckpointCoordinatorTest method testRestoreLatestCheckpointedStateWithChangingParallelism.

/**
	 * Tests the checkpoint restoration with changing parallelism of job vertex with partitioned
	 * state.
	 *
	 * @throws Exception
	 */
private void testRestoreLatestCheckpointedStateWithChangingParallelism(boolean scaleOut) throws Exception {
    final JobID jid = new JobID();
    final long timestamp = System.currentTimeMillis();
    final JobVertexID jobVertexID1 = new JobVertexID();
    final JobVertexID jobVertexID2 = new JobVertexID();
    int parallelism1 = 3;
    int parallelism2 = scaleOut ? 2 : 13;
    int maxParallelism1 = 42;
    int maxParallelism2 = 13;
    int newParallelism2 = scaleOut ? 13 : 2;
    final ExecutionJobVertex jobVertex1 = mockExecutionJobVertex(jobVertexID1, parallelism1, maxParallelism1);
    final ExecutionJobVertex jobVertex2 = mockExecutionJobVertex(jobVertexID2, parallelism2, maxParallelism2);
    List<ExecutionVertex> allExecutionVertices = new ArrayList<>(parallelism1 + parallelism2);
    allExecutionVertices.addAll(Arrays.asList(jobVertex1.getTaskVertices()));
    allExecutionVertices.addAll(Arrays.asList(jobVertex2.getTaskVertices()));
    ExecutionVertex[] arrayExecutionVertices = allExecutionVertices.toArray(new ExecutionVertex[allExecutionVertices.size()]);
    // set up the coordinator and validate the initial state
    CheckpointCoordinator coord = new CheckpointCoordinator(jid, 600000, 600000, 0, Integer.MAX_VALUE, ExternalizedCheckpointSettings.none(), arrayExecutionVertices, arrayExecutionVertices, arrayExecutionVertices, new StandaloneCheckpointIDCounter(), new StandaloneCompletedCheckpointStore(1), null, Executors.directExecutor());
    // trigger the checkpoint
    coord.triggerCheckpoint(timestamp, false);
    assertTrue(coord.getPendingCheckpoints().keySet().size() == 1);
    long checkpointId = Iterables.getOnlyElement(coord.getPendingCheckpoints().keySet());
    CheckpointMetaData checkpointMetaData = new CheckpointMetaData(checkpointId, 0L);
    List<KeyGroupRange> keyGroupPartitions1 = StateAssignmentOperation.createKeyGroupPartitions(maxParallelism1, parallelism1);
    List<KeyGroupRange> keyGroupPartitions2 = StateAssignmentOperation.createKeyGroupPartitions(maxParallelism2, parallelism2);
    //vertex 1
    for (int index = 0; index < jobVertex1.getParallelism(); index++) {
        ChainedStateHandle<StreamStateHandle> valueSizeTuple = generateStateForVertex(jobVertexID1, index);
        ChainedStateHandle<OperatorStateHandle> opStateBackend = generateChainedPartitionableStateHandle(jobVertexID1, index, 2, 8, false);
        KeyGroupsStateHandle keyedStateBackend = generateKeyGroupState(jobVertexID1, keyGroupPartitions1.get(index), false);
        KeyGroupsStateHandle keyedStateRaw = generateKeyGroupState(jobVertexID1, keyGroupPartitions1.get(index), true);
        SubtaskState checkpointStateHandles = new SubtaskState(valueSizeTuple, opStateBackend, null, keyedStateBackend, keyedStateRaw);
        AcknowledgeCheckpoint acknowledgeCheckpoint = new AcknowledgeCheckpoint(jid, jobVertex1.getTaskVertices()[index].getCurrentExecutionAttempt().getAttemptId(), checkpointId, new CheckpointMetrics(), checkpointStateHandles);
        coord.receiveAcknowledgeMessage(acknowledgeCheckpoint);
    }
    //vertex 2
    final List<ChainedStateHandle<OperatorStateHandle>> expectedOpStatesBackend = new ArrayList<>(jobVertex2.getParallelism());
    final List<ChainedStateHandle<OperatorStateHandle>> expectedOpStatesRaw = new ArrayList<>(jobVertex2.getParallelism());
    for (int index = 0; index < jobVertex2.getParallelism(); index++) {
        KeyGroupsStateHandle keyedStateBackend = generateKeyGroupState(jobVertexID2, keyGroupPartitions2.get(index), false);
        KeyGroupsStateHandle keyedStateRaw = generateKeyGroupState(jobVertexID2, keyGroupPartitions2.get(index), true);
        ChainedStateHandle<OperatorStateHandle> opStateBackend = generateChainedPartitionableStateHandle(jobVertexID2, index, 2, 8, false);
        ChainedStateHandle<OperatorStateHandle> opStateRaw = generateChainedPartitionableStateHandle(jobVertexID2, index, 2, 8, true);
        expectedOpStatesBackend.add(opStateBackend);
        expectedOpStatesRaw.add(opStateRaw);
        SubtaskState checkpointStateHandles = new SubtaskState(new ChainedStateHandle<>(Collections.<StreamStateHandle>singletonList(null)), opStateBackend, opStateRaw, keyedStateBackend, keyedStateRaw);
        AcknowledgeCheckpoint acknowledgeCheckpoint = new AcknowledgeCheckpoint(jid, jobVertex2.getTaskVertices()[index].getCurrentExecutionAttempt().getAttemptId(), checkpointId, new CheckpointMetrics(), checkpointStateHandles);
        coord.receiveAcknowledgeMessage(acknowledgeCheckpoint);
    }
    List<CompletedCheckpoint> completedCheckpoints = coord.getSuccessfulCheckpoints();
    assertEquals(1, completedCheckpoints.size());
    Map<JobVertexID, ExecutionJobVertex> tasks = new HashMap<>();
    List<KeyGroupRange> newKeyGroupPartitions2 = StateAssignmentOperation.createKeyGroupPartitions(maxParallelism2, newParallelism2);
    final ExecutionJobVertex newJobVertex1 = mockExecutionJobVertex(jobVertexID1, parallelism1, maxParallelism1);
    // rescale vertex 2
    final ExecutionJobVertex newJobVertex2 = mockExecutionJobVertex(jobVertexID2, newParallelism2, maxParallelism2);
    tasks.put(jobVertexID1, newJobVertex1);
    tasks.put(jobVertexID2, newJobVertex2);
    coord.restoreLatestCheckpointedState(tasks, true, false);
    // verify the restored state
    verifyStateRestore(jobVertexID1, newJobVertex1, keyGroupPartitions1);
    List<List<Collection<OperatorStateHandle>>> actualOpStatesBackend = new ArrayList<>(newJobVertex2.getParallelism());
    List<List<Collection<OperatorStateHandle>>> actualOpStatesRaw = new ArrayList<>(newJobVertex2.getParallelism());
    for (int i = 0; i < newJobVertex2.getParallelism(); i++) {
        KeyGroupsStateHandle originalKeyedStateBackend = generateKeyGroupState(jobVertexID2, newKeyGroupPartitions2.get(i), false);
        KeyGroupsStateHandle originalKeyedStateRaw = generateKeyGroupState(jobVertexID2, newKeyGroupPartitions2.get(i), true);
        TaskStateHandles taskStateHandles = newJobVertex2.getTaskVertices()[i].getCurrentExecutionAttempt().getTaskStateHandles();
        ChainedStateHandle<StreamStateHandle> operatorState = taskStateHandles.getLegacyOperatorState();
        List<Collection<OperatorStateHandle>> opStateBackend = taskStateHandles.getManagedOperatorState();
        List<Collection<OperatorStateHandle>> opStateRaw = taskStateHandles.getRawOperatorState();
        Collection<KeyGroupsStateHandle> keyGroupStateBackend = taskStateHandles.getManagedKeyedState();
        Collection<KeyGroupsStateHandle> keyGroupStateRaw = taskStateHandles.getRawKeyedState();
        actualOpStatesBackend.add(opStateBackend);
        actualOpStatesRaw.add(opStateRaw);
        assertNull(operatorState);
        compareKeyedState(Collections.singletonList(originalKeyedStateBackend), keyGroupStateBackend);
        compareKeyedState(Collections.singletonList(originalKeyedStateRaw), keyGroupStateRaw);
    }
    comparePartitionableState(expectedOpStatesBackend, actualOpStatesBackend);
    comparePartitionableState(expectedOpStatesRaw, actualOpStatesRaw);
}
Also used : HashMap(java.util.HashMap) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) ArrayList(java.util.ArrayList) KeyGroupRange(org.apache.flink.runtime.state.KeyGroupRange) ExecutionVertex(org.apache.flink.runtime.executiongraph.ExecutionVertex) KeyGroupsStateHandle(org.apache.flink.runtime.state.KeyGroupsStateHandle) ChainedStateHandle(org.apache.flink.runtime.state.ChainedStateHandle) StreamStateHandle(org.apache.flink.runtime.state.StreamStateHandle) ByteStreamStateHandle(org.apache.flink.runtime.state.memory.ByteStreamStateHandle) ExecutionJobVertex(org.apache.flink.runtime.executiongraph.ExecutionJobVertex) List(java.util.List) ArrayList(java.util.ArrayList) AcknowledgeCheckpoint(org.apache.flink.runtime.messages.checkpoint.AcknowledgeCheckpoint) DeclineCheckpoint(org.apache.flink.runtime.messages.checkpoint.DeclineCheckpoint) TaskStateHandles(org.apache.flink.runtime.state.TaskStateHandles) AcknowledgeCheckpoint(org.apache.flink.runtime.messages.checkpoint.AcknowledgeCheckpoint) Collection(java.util.Collection) OperatorStateHandle(org.apache.flink.runtime.state.OperatorStateHandle) JobID(org.apache.flink.api.common.JobID)

Example 13 with KeyGroupRange

use of org.apache.flink.runtime.state.KeyGroupRange in project flink by apache.

the class CheckpointCoordinatorTest method testRestoreLatestCheckpointedState.

/**
	 * Tests that the checkpointed partitioned and non-partitioned state is assigned properly to
	 * the {@link Execution} upon recovery.
	 *
	 * @throws Exception
	 */
@Test
public void testRestoreLatestCheckpointedState() throws Exception {
    final JobID jid = new JobID();
    final long timestamp = System.currentTimeMillis();
    final JobVertexID jobVertexID1 = new JobVertexID();
    final JobVertexID jobVertexID2 = new JobVertexID();
    int parallelism1 = 3;
    int parallelism2 = 2;
    int maxParallelism1 = 42;
    int maxParallelism2 = 13;
    final ExecutionJobVertex jobVertex1 = mockExecutionJobVertex(jobVertexID1, parallelism1, maxParallelism1);
    final ExecutionJobVertex jobVertex2 = mockExecutionJobVertex(jobVertexID2, parallelism2, maxParallelism2);
    List<ExecutionVertex> allExecutionVertices = new ArrayList<>(parallelism1 + parallelism2);
    allExecutionVertices.addAll(Arrays.asList(jobVertex1.getTaskVertices()));
    allExecutionVertices.addAll(Arrays.asList(jobVertex2.getTaskVertices()));
    ExecutionVertex[] arrayExecutionVertices = allExecutionVertices.toArray(new ExecutionVertex[allExecutionVertices.size()]);
    // set up the coordinator and validate the initial state
    CheckpointCoordinator coord = new CheckpointCoordinator(jid, 600000, 600000, 0, Integer.MAX_VALUE, ExternalizedCheckpointSettings.none(), arrayExecutionVertices, arrayExecutionVertices, arrayExecutionVertices, new StandaloneCheckpointIDCounter(), new StandaloneCompletedCheckpointStore(1), null, Executors.directExecutor());
    // trigger the checkpoint
    coord.triggerCheckpoint(timestamp, false);
    assertTrue(coord.getPendingCheckpoints().keySet().size() == 1);
    long checkpointId = Iterables.getOnlyElement(coord.getPendingCheckpoints().keySet());
    CheckpointMetaData checkpointMetaData = new CheckpointMetaData(checkpointId, 0L);
    List<KeyGroupRange> keyGroupPartitions1 = StateAssignmentOperation.createKeyGroupPartitions(maxParallelism1, parallelism1);
    List<KeyGroupRange> keyGroupPartitions2 = StateAssignmentOperation.createKeyGroupPartitions(maxParallelism2, parallelism2);
    for (int index = 0; index < jobVertex1.getParallelism(); index++) {
        ChainedStateHandle<StreamStateHandle> nonPartitionedState = generateStateForVertex(jobVertexID1, index);
        ChainedStateHandle<OperatorStateHandle> partitionableState = generateChainedPartitionableStateHandle(jobVertexID1, index, 2, 8, false);
        KeyGroupsStateHandle partitionedKeyGroupState = generateKeyGroupState(jobVertexID1, keyGroupPartitions1.get(index), false);
        SubtaskState checkpointStateHandles = new SubtaskState(nonPartitionedState, partitionableState, null, partitionedKeyGroupState, null);
        AcknowledgeCheckpoint acknowledgeCheckpoint = new AcknowledgeCheckpoint(jid, jobVertex1.getTaskVertices()[index].getCurrentExecutionAttempt().getAttemptId(), checkpointId, new CheckpointMetrics(), checkpointStateHandles);
        coord.receiveAcknowledgeMessage(acknowledgeCheckpoint);
    }
    for (int index = 0; index < jobVertex2.getParallelism(); index++) {
        ChainedStateHandle<StreamStateHandle> nonPartitionedState = generateStateForVertex(jobVertexID2, index);
        ChainedStateHandle<OperatorStateHandle> partitionableState = generateChainedPartitionableStateHandle(jobVertexID2, index, 2, 8, false);
        KeyGroupsStateHandle partitionedKeyGroupState = generateKeyGroupState(jobVertexID2, keyGroupPartitions2.get(index), false);
        SubtaskState checkpointStateHandles = new SubtaskState(nonPartitionedState, partitionableState, null, partitionedKeyGroupState, null);
        AcknowledgeCheckpoint acknowledgeCheckpoint = new AcknowledgeCheckpoint(jid, jobVertex2.getTaskVertices()[index].getCurrentExecutionAttempt().getAttemptId(), checkpointId, new CheckpointMetrics(), checkpointStateHandles);
        coord.receiveAcknowledgeMessage(acknowledgeCheckpoint);
    }
    List<CompletedCheckpoint> completedCheckpoints = coord.getSuccessfulCheckpoints();
    assertEquals(1, completedCheckpoints.size());
    Map<JobVertexID, ExecutionJobVertex> tasks = new HashMap<>();
    tasks.put(jobVertexID1, jobVertex1);
    tasks.put(jobVertexID2, jobVertex2);
    coord.restoreLatestCheckpointedState(tasks, true, false);
    // verify the restored state
    verifyStateRestore(jobVertexID1, jobVertex1, keyGroupPartitions1);
    verifyStateRestore(jobVertexID2, jobVertex2, keyGroupPartitions2);
}
Also used : HashMap(java.util.HashMap) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) ArrayList(java.util.ArrayList) KeyGroupRange(org.apache.flink.runtime.state.KeyGroupRange) ExecutionVertex(org.apache.flink.runtime.executiongraph.ExecutionVertex) KeyGroupsStateHandle(org.apache.flink.runtime.state.KeyGroupsStateHandle) StreamStateHandle(org.apache.flink.runtime.state.StreamStateHandle) ByteStreamStateHandle(org.apache.flink.runtime.state.memory.ByteStreamStateHandle) ExecutionJobVertex(org.apache.flink.runtime.executiongraph.ExecutionJobVertex) AcknowledgeCheckpoint(org.apache.flink.runtime.messages.checkpoint.AcknowledgeCheckpoint) DeclineCheckpoint(org.apache.flink.runtime.messages.checkpoint.DeclineCheckpoint) AcknowledgeCheckpoint(org.apache.flink.runtime.messages.checkpoint.AcknowledgeCheckpoint) OperatorStateHandle(org.apache.flink.runtime.state.OperatorStateHandle) JobID(org.apache.flink.api.common.JobID) Test(org.junit.Test)

Example 14 with KeyGroupRange

use of org.apache.flink.runtime.state.KeyGroupRange in project flink by apache.

the class StateAssignmentOperation method assignTaskStatesToOperatorInstances.

private static void assignTaskStatesToOperatorInstances(TaskState taskState, ExecutionJobVertex executionJobVertex) {
    final int oldParallelism = taskState.getParallelism();
    final int newParallelism = executionJobVertex.getParallelism();
    List<KeyGroupRange> keyGroupPartitions = createKeyGroupPartitions(executionJobVertex.getMaxParallelism(), newParallelism);
    final int chainLength = taskState.getChainLength();
    // operator chain idx -> list of the stored op states from all parallel instances for this chain idx
    @SuppressWarnings("unchecked") List<OperatorStateHandle>[] parallelOpStatesBackend = new List[chainLength];
    @SuppressWarnings("unchecked") List<OperatorStateHandle>[] parallelOpStatesStream = new List[chainLength];
    List<KeyGroupsStateHandle> parallelKeyedStatesBackend = new ArrayList<>(oldParallelism);
    List<KeyGroupsStateHandle> parallelKeyedStateStream = new ArrayList<>(oldParallelism);
    for (int p = 0; p < oldParallelism; ++p) {
        SubtaskState subtaskState = taskState.getState(p);
        if (null != subtaskState) {
            collectParallelStatesByChainOperator(parallelOpStatesBackend, subtaskState.getManagedOperatorState());
            collectParallelStatesByChainOperator(parallelOpStatesStream, subtaskState.getRawOperatorState());
            KeyGroupsStateHandle keyedStateBackend = subtaskState.getManagedKeyedState();
            if (null != keyedStateBackend) {
                parallelKeyedStatesBackend.add(keyedStateBackend);
            }
            KeyGroupsStateHandle keyedStateStream = subtaskState.getRawKeyedState();
            if (null != keyedStateStream) {
                parallelKeyedStateStream.add(keyedStateStream);
            }
        }
    }
    // operator chain index -> lists with collected states (one collection for each parallel subtasks)
    @SuppressWarnings("unchecked") List<Collection<OperatorStateHandle>>[] partitionedParallelStatesBackend = new List[chainLength];
    @SuppressWarnings("unchecked") List<Collection<OperatorStateHandle>>[] partitionedParallelStatesStream = new List[chainLength];
    //TODO here we can employ different redistribution strategies for state, e.g. union state.
    // For now we only offer round robin as the default.
    OperatorStateRepartitioner opStateRepartitioner = RoundRobinOperatorStateRepartitioner.INSTANCE;
    for (int chainIdx = 0; chainIdx < chainLength; ++chainIdx) {
        List<OperatorStateHandle> chainOpParallelStatesBackend = parallelOpStatesBackend[chainIdx];
        List<OperatorStateHandle> chainOpParallelStatesStream = parallelOpStatesStream[chainIdx];
        partitionedParallelStatesBackend[chainIdx] = applyRepartitioner(opStateRepartitioner, chainOpParallelStatesBackend, oldParallelism, newParallelism);
        partitionedParallelStatesStream[chainIdx] = applyRepartitioner(opStateRepartitioner, chainOpParallelStatesStream, oldParallelism, newParallelism);
    }
    for (int subTaskIdx = 0; subTaskIdx < newParallelism; ++subTaskIdx) {
        // non-partitioned state
        ChainedStateHandle<StreamStateHandle> nonPartitionableState = null;
        if (oldParallelism == newParallelism) {
            if (taskState.getState(subTaskIdx) != null) {
                nonPartitionableState = taskState.getState(subTaskIdx).getLegacyOperatorState();
            }
        }
        // partitionable state
        @SuppressWarnings("unchecked") Collection<OperatorStateHandle>[] iab = new Collection[chainLength];
        @SuppressWarnings("unchecked") Collection<OperatorStateHandle>[] ias = new Collection[chainLength];
        List<Collection<OperatorStateHandle>> operatorStateFromBackend = Arrays.asList(iab);
        List<Collection<OperatorStateHandle>> operatorStateFromStream = Arrays.asList(ias);
        for (int chainIdx = 0; chainIdx < partitionedParallelStatesBackend.length; ++chainIdx) {
            List<Collection<OperatorStateHandle>> redistributedOpStateBackend = partitionedParallelStatesBackend[chainIdx];
            List<Collection<OperatorStateHandle>> redistributedOpStateStream = partitionedParallelStatesStream[chainIdx];
            if (redistributedOpStateBackend != null) {
                operatorStateFromBackend.set(chainIdx, redistributedOpStateBackend.get(subTaskIdx));
            }
            if (redistributedOpStateStream != null) {
                operatorStateFromStream.set(chainIdx, redistributedOpStateStream.get(subTaskIdx));
            }
        }
        Execution currentExecutionAttempt = executionJobVertex.getTaskVertices()[subTaskIdx].getCurrentExecutionAttempt();
        List<KeyGroupsStateHandle> newKeyedStatesBackend;
        List<KeyGroupsStateHandle> newKeyedStateStream;
        if (oldParallelism == newParallelism) {
            SubtaskState subtaskState = taskState.getState(subTaskIdx);
            if (subtaskState != null) {
                KeyGroupsStateHandle oldKeyedStatesBackend = subtaskState.getManagedKeyedState();
                KeyGroupsStateHandle oldKeyedStatesStream = subtaskState.getRawKeyedState();
                newKeyedStatesBackend = oldKeyedStatesBackend != null ? Collections.singletonList(oldKeyedStatesBackend) : null;
                newKeyedStateStream = oldKeyedStatesStream != null ? Collections.singletonList(oldKeyedStatesStream) : null;
            } else {
                newKeyedStatesBackend = null;
                newKeyedStateStream = null;
            }
        } else {
            KeyGroupRange subtaskKeyGroupIds = keyGroupPartitions.get(subTaskIdx);
            newKeyedStatesBackend = getKeyGroupsStateHandles(parallelKeyedStatesBackend, subtaskKeyGroupIds);
            newKeyedStateStream = getKeyGroupsStateHandles(parallelKeyedStateStream, subtaskKeyGroupIds);
        }
        TaskStateHandles taskStateHandles = new TaskStateHandles(nonPartitionableState, operatorStateFromBackend, operatorStateFromStream, newKeyedStatesBackend, newKeyedStateStream);
        currentExecutionAttempt.setInitialState(taskStateHandles);
    }
}
Also used : KeyGroupRange(org.apache.flink.runtime.state.KeyGroupRange) ArrayList(java.util.ArrayList) KeyGroupsStateHandle(org.apache.flink.runtime.state.KeyGroupsStateHandle) TaskStateHandles(org.apache.flink.runtime.state.TaskStateHandles) StreamStateHandle(org.apache.flink.runtime.state.StreamStateHandle) Execution(org.apache.flink.runtime.executiongraph.Execution) Collection(java.util.Collection) ArrayList(java.util.ArrayList) List(java.util.List) OperatorStateHandle(org.apache.flink.runtime.state.OperatorStateHandle)

Example 15 with KeyGroupRange

use of org.apache.flink.runtime.state.KeyGroupRange in project flink by apache.

the class KVStateRequestSerializerRocksDBTest method testListSerialization.

/**
	 * Tests list serialization and deserialization match.
	 *
	 * @see KvStateRequestSerializerTest#testListSerialization()
	 * KvStateRequestSerializerTest#testListSerialization() using the heap state back-end
	 * test
	 */
@Test
public void testListSerialization() throws Exception {
    final long key = 0L;
    // objects for RocksDB state list serialisation
    DBOptions dbOptions = PredefinedOptions.DEFAULT.createDBOptions();
    dbOptions.setCreateIfMissing(true);
    ColumnFamilyOptions columnFamilyOptions = PredefinedOptions.DEFAULT.createColumnOptions();
    final RocksDBKeyedStateBackend2<Long> longHeapKeyedStateBackend = new RocksDBKeyedStateBackend2<>(new JobID(), "no-op", ClassLoader.getSystemClassLoader(), temporaryFolder.getRoot(), dbOptions, columnFamilyOptions, mock(TaskKvStateRegistry.class), LongSerializer.INSTANCE, 1, new KeyGroupRange(0, 0), new ExecutionConfig());
    longHeapKeyedStateBackend.setCurrentKey(key);
    final InternalListState<VoidNamespace, Long> listState = longHeapKeyedStateBackend.createListState(VoidNamespaceSerializer.INSTANCE, new ListStateDescriptor<>("test", LongSerializer.INSTANCE));
    KvStateRequestSerializerTest.testListSerialization(key, listState);
}
Also used : KeyGroupRange(org.apache.flink.runtime.state.KeyGroupRange) TaskKvStateRegistry(org.apache.flink.runtime.query.TaskKvStateRegistry) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) ColumnFamilyOptions(org.rocksdb.ColumnFamilyOptions) DBOptions(org.rocksdb.DBOptions) VoidNamespace(org.apache.flink.runtime.state.VoidNamespace) JobID(org.apache.flink.api.common.JobID) Test(org.junit.Test) KvStateRequestSerializerTest(org.apache.flink.runtime.query.netty.message.KvStateRequestSerializerTest)

Aggregations

KeyGroupRange (org.apache.flink.runtime.state.KeyGroupRange)106 Test (org.junit.Test)67 JobID (org.apache.flink.api.common.JobID)46 JobVertexID (org.apache.flink.runtime.jobgraph.JobVertexID)38 ArrayList (java.util.ArrayList)26 CloseableRegistry (org.apache.flink.core.fs.CloseableRegistry)23 MemoryStateBackend (org.apache.flink.runtime.state.memory.MemoryStateBackend)21 DummyEnvironment (org.apache.flink.runtime.operators.testutils.DummyEnvironment)18 KvStateRegistry (org.apache.flink.runtime.query.KvStateRegistry)18 UnregisteredMetricsGroup (org.apache.flink.metrics.groups.UnregisteredMetricsGroup)17 HashMap (java.util.HashMap)15 KeyGroupsStateHandle (org.apache.flink.runtime.state.KeyGroupsStateHandle)15 TestProcessingTimeService (org.apache.flink.streaming.runtime.tasks.TestProcessingTimeService)15 ExecutionConfig (org.apache.flink.api.common.ExecutionConfig)14 ExecutionJobVertex (org.apache.flink.runtime.executiongraph.ExecutionJobVertex)14 KeyedStateHandle (org.apache.flink.runtime.state.KeyedStateHandle)14 AcknowledgeCheckpoint (org.apache.flink.runtime.messages.checkpoint.AcknowledgeCheckpoint)13 OperatorStateHandle (org.apache.flink.runtime.state.OperatorStateHandle)13 List (java.util.List)12 StreamStateHandle (org.apache.flink.runtime.state.StreamStateHandle)12