use of org.apache.flink.runtime.state.OperatorStateHandle in project flink by apache.
the class StreamTaskStateInitializerImpl method rawOperatorStateInputs.
protected CloseableIterable<StatePartitionStreamProvider> rawOperatorStateInputs(Iterator<StateObjectCollection<OperatorStateHandle>> restoreStateAlternatives) {
if (restoreStateAlternatives.hasNext()) {
Collection<OperatorStateHandle> rawOperatorState = restoreStateAlternatives.next();
// TODO currently this does not support local state recovery, so we expect there is only
// one handle.
Preconditions.checkState(!restoreStateAlternatives.hasNext(), "Local recovery is currently not implemented for raw operator state, but found state alternative.");
if (rawOperatorState != null) {
return new CloseableIterable<StatePartitionStreamProvider>() {
final CloseableRegistry closeableRegistry = new CloseableRegistry();
@Override
public void close() throws IOException {
closeableRegistry.close();
}
@Nonnull
@Override
public Iterator<StatePartitionStreamProvider> iterator() {
return new OperatorStateStreamIterator(DefaultOperatorStateBackend.DEFAULT_OPERATOR_STATE_NAME, rawOperatorState.iterator(), closeableRegistry);
}
};
}
}
return CloseableIterable.empty();
}
use of org.apache.flink.runtime.state.OperatorStateHandle in project flink by apache.
the class InterruptSensitiveRestoreTest method createTask.
// ------------------------------------------------------------------------
// Utilities
// ------------------------------------------------------------------------
private static Task createTask(StreamConfig streamConfig, Configuration taskConfig, StreamStateHandle state, int mode) throws IOException {
ShuffleEnvironment<?, ?> shuffleEnvironment = new NettyShuffleEnvironmentBuilder().build();
Collection<KeyedStateHandle> keyedStateFromBackend = Collections.emptyList();
Collection<KeyedStateHandle> keyedStateFromStream = Collections.emptyList();
Collection<OperatorStateHandle> operatorStateBackend = Collections.emptyList();
Collection<OperatorStateHandle> operatorStateStream = Collections.emptyList();
Map<String, OperatorStateHandle.StateMetaInfo> operatorStateMetadata = new HashMap<>(1);
OperatorStateHandle.StateMetaInfo metaInfo = new OperatorStateHandle.StateMetaInfo(new long[] { 0 }, OperatorStateHandle.Mode.SPLIT_DISTRIBUTE);
operatorStateMetadata.put(DefaultOperatorStateBackend.DEFAULT_OPERATOR_STATE_NAME, metaInfo);
KeyGroupRangeOffsets keyGroupRangeOffsets = new KeyGroupRangeOffsets(new KeyGroupRange(0, 0));
Collection<OperatorStateHandle> operatorStateHandles = Collections.singletonList(new OperatorStreamStateHandle(operatorStateMetadata, state));
List<KeyedStateHandle> keyedStateHandles = Collections.singletonList(new KeyGroupsStateHandle(keyGroupRangeOffsets, state));
switch(mode) {
case OPERATOR_MANAGED:
operatorStateBackend = operatorStateHandles;
break;
case OPERATOR_RAW:
operatorStateStream = operatorStateHandles;
break;
case KEYED_MANAGED:
keyedStateFromBackend = keyedStateHandles;
break;
case KEYED_RAW:
keyedStateFromStream = keyedStateHandles;
break;
default:
throw new IllegalArgumentException();
}
OperatorSubtaskState operatorSubtaskState = OperatorSubtaskState.builder().setManagedOperatorState(new StateObjectCollection<>(operatorStateBackend)).setRawOperatorState(new StateObjectCollection<>(operatorStateStream)).setManagedKeyedState(new StateObjectCollection<>(keyedStateFromBackend)).setRawKeyedState(new StateObjectCollection<>(keyedStateFromStream)).build();
JobVertexID jobVertexID = new JobVertexID();
OperatorID operatorID = OperatorID.fromJobVertexID(jobVertexID);
streamConfig.setOperatorID(operatorID);
TaskStateSnapshot stateSnapshot = new TaskStateSnapshot();
stateSnapshot.putSubtaskStateByOperatorID(operatorID, operatorSubtaskState);
JobManagerTaskRestore taskRestore = new JobManagerTaskRestore(1L, stateSnapshot);
JobInformation jobInformation = new JobInformation(new JobID(), "test job name", new SerializedValue<>(new ExecutionConfig()), new Configuration(), Collections.emptyList(), Collections.emptyList());
TaskInformation taskInformation = new TaskInformation(jobVertexID, "test task name", 1, 1, SourceStreamTask.class.getName(), taskConfig);
TestTaskStateManager taskStateManager = TestTaskStateManager.builder().setReportedCheckpointId(taskRestore.getRestoreCheckpointId()).setJobManagerTaskStateSnapshotsByCheckpointId(Collections.singletonMap(taskRestore.getRestoreCheckpointId(), taskRestore.getTaskStateSnapshot())).build();
return new Task(jobInformation, taskInformation, new ExecutionAttemptID(), new AllocationID(), 0, 0, Collections.<ResultPartitionDeploymentDescriptor>emptyList(), Collections.<InputGateDeploymentDescriptor>emptyList(), mock(MemoryManager.class), mock(IOManager.class), shuffleEnvironment, new KvStateService(new KvStateRegistry(), null, null), mock(BroadcastVariableManager.class), new TaskEventDispatcher(), ExternalResourceInfoProvider.NO_EXTERNAL_RESOURCES, taskStateManager, mock(TaskManagerActions.class), mock(InputSplitProvider.class), mock(CheckpointResponder.class), new NoOpTaskOperatorEventGateway(), new TestGlobalAggregateManager(), TestingClassLoaderLease.newBuilder().build(), new FileCache(new String[] { EnvironmentInformation.getTemporaryFileDirectory() }, VoidPermanentBlobService.INSTANCE), new TestingTaskManagerRuntimeInfo(), UnregisteredMetricGroups.createUnregisteredTaskMetricGroup(), new NoOpResultPartitionConsumableNotifier(), mock(PartitionProducerStateChecker.class), mock(Executor.class));
}
use of org.apache.flink.runtime.state.OperatorStateHandle in project flink by apache.
the class CoordinatorEventsExactlyOnceITCase method readSnapshot.
@Nullable
static StreamStateHandle readSnapshot(TaskStateManager stateManager, OperatorID operatorId) {
final PrioritizedOperatorSubtaskState poss = stateManager.prioritizedOperatorState(operatorId);
if (!poss.isRestored()) {
return null;
}
final StateObjectCollection<OperatorStateHandle> opState = stateManager.prioritizedOperatorState(operatorId).getPrioritizedManagedOperatorState().get(0);
final OperatorStateHandle handle = Iterators.getOnlyElement(opState.iterator());
return handle.getDelegateStateHandle();
}
use of org.apache.flink.runtime.state.OperatorStateHandle in project flink by apache.
the class CoordinatorEventsExactlyOnceITCase method createSnapshot.
static TaskStateSnapshot createSnapshot(StreamStateHandle handle, OperatorID operatorId) {
final OperatorStateHandle.StateMetaInfo metaInfo = new OperatorStateHandle.StateMetaInfo(new long[] { 0 }, OperatorStateHandle.Mode.SPLIT_DISTRIBUTE);
final OperatorStateHandle state = new OperatorStreamStateHandle(Collections.singletonMap("état_et_moi_:_ça_fait_deux", metaInfo), handle);
final OperatorSubtaskState oss = OperatorSubtaskState.builder().setManagedOperatorState(state).build();
return new TaskStateSnapshot(Collections.singletonMap(operatorId, oss));
}
use of org.apache.flink.runtime.state.OperatorStateHandle in project flink by apache.
the class AbstractStreamOperatorTestHarness method repartitionOperatorState.
/**
* Returns the reshaped the state handles to include only those key-group states in the local
* key-group range and the operator states that would be assigned to the local subtask.
*/
public static OperatorSubtaskState repartitionOperatorState(final OperatorSubtaskState operatorStateHandles, final int numKeyGroups, final int oldParallelism, final int newParallelism, final int subtaskIndex) {
Preconditions.checkNotNull(operatorStateHandles, "the previous operatorStateHandles should not be null.");
// create a new OperatorStateHandles that only contains the state for our key-groups
List<KeyGroupRange> keyGroupPartitions = StateAssignmentOperation.createKeyGroupPartitions(numKeyGroups, newParallelism);
KeyGroupRange localKeyGroupRange = keyGroupPartitions.get(subtaskIndex);
List<KeyedStateHandle> localManagedKeyGroupState = new ArrayList<>();
StateAssignmentOperation.extractIntersectingState(operatorStateHandles.getManagedKeyedState(), localKeyGroupRange, localManagedKeyGroupState);
List<KeyedStateHandle> localRawKeyGroupState = new ArrayList<>();
StateAssignmentOperation.extractIntersectingState(operatorStateHandles.getRawKeyedState(), localKeyGroupRange, localRawKeyGroupState);
StateObjectCollection<OperatorStateHandle> managedOperatorStates = operatorStateHandles.getManagedOperatorState();
Collection<OperatorStateHandle> localManagedOperatorState;
if (!managedOperatorStates.isEmpty()) {
List<List<OperatorStateHandle>> managedOperatorState = managedOperatorStates.stream().map(Collections::singletonList).collect(Collectors.toList());
localManagedOperatorState = operatorStateRepartitioner.repartitionState(managedOperatorState, oldParallelism, newParallelism).get(subtaskIndex);
} else {
localManagedOperatorState = Collections.emptyList();
}
StateObjectCollection<OperatorStateHandle> rawOperatorStates = operatorStateHandles.getRawOperatorState();
Collection<OperatorStateHandle> localRawOperatorState;
if (!rawOperatorStates.isEmpty()) {
List<List<OperatorStateHandle>> rawOperatorState = rawOperatorStates.stream().map(Collections::singletonList).collect(Collectors.toList());
localRawOperatorState = operatorStateRepartitioner.repartitionState(rawOperatorState, oldParallelism, newParallelism).get(subtaskIndex);
} else {
localRawOperatorState = Collections.emptyList();
}
return OperatorSubtaskState.builder().setManagedOperatorState(new StateObjectCollection<>(nullToEmptyCollection(localManagedOperatorState))).setRawOperatorState(new StateObjectCollection<>(nullToEmptyCollection(localRawOperatorState))).setManagedKeyedState(new StateObjectCollection<>(nullToEmptyCollection(localManagedKeyGroupState))).setRawKeyedState(new StateObjectCollection<>(nullToEmptyCollection(localRawKeyGroupState))).build();
}
Aggregations