use of org.apache.flink.runtime.state.KeyGroupRangeOffsets in project flink by apache.
the class StateInitializationContextImplTest method setUp.
@Before
public void setUp() throws Exception {
this.writtenKeyGroups = 0;
this.writtenOperatorStates = new HashSet<>();
this.closableRegistry = new CloseableRegistry();
OperatorStateStore stateStore = mock(OperatorStateStore.class);
ByteArrayOutputStreamWithPos out = new ByteArrayOutputStreamWithPos(64);
List<KeyGroupsStateHandle> keyGroupsStateHandles = new ArrayList<>(NUM_HANDLES);
int prev = 0;
for (int i = 0; i < NUM_HANDLES; ++i) {
out.reset();
int size = i % 4;
int end = prev + size;
DataOutputView dov = new DataOutputViewStreamWrapper(out);
KeyGroupRangeOffsets offsets = new KeyGroupRangeOffsets(i == 9 ? KeyGroupRange.EMPTY_KEY_GROUP_RANGE : new KeyGroupRange(prev, end));
prev = end + 1;
for (int kg : offsets.getKeyGroupRange()) {
offsets.setKeyGroupOffset(kg, out.getPosition());
dov.writeInt(kg);
++writtenKeyGroups;
}
KeyGroupsStateHandle handle = new KeyGroupsStateHandle(offsets, new ByteStateHandleCloseChecking("kg-" + i, out.toByteArray()));
keyGroupsStateHandles.add(handle);
}
List<OperatorStateHandle> operatorStateHandles = new ArrayList<>(NUM_HANDLES);
for (int i = 0; i < NUM_HANDLES; ++i) {
int size = i % 4;
out.reset();
DataOutputView dov = new DataOutputViewStreamWrapper(out);
LongArrayList offsets = new LongArrayList(size);
for (int s = 0; s < size; ++s) {
offsets.add(out.getPosition());
int val = i * NUM_HANDLES + s;
dov.writeInt(val);
writtenOperatorStates.add(val);
}
Map<String, OperatorStateHandle.StateMetaInfo> offsetsMap = new HashMap<>();
offsetsMap.put(DefaultOperatorStateBackend.DEFAULT_OPERATOR_STATE_NAME, new OperatorStateHandle.StateMetaInfo(offsets.toArray(), OperatorStateHandle.Mode.SPLIT_DISTRIBUTE));
OperatorStateHandle operatorStateHandle = new OperatorStateHandle(offsetsMap, new ByteStateHandleCloseChecking("os-" + i, out.toByteArray()));
operatorStateHandles.add(operatorStateHandle);
}
this.initializationContext = new StateInitializationContextImpl(true, stateStore, mock(KeyedStateStore.class), keyGroupsStateHandles, operatorStateHandles, closableRegistry);
}
use of org.apache.flink.runtime.state.KeyGroupRangeOffsets in project flink by apache.
the class SavepointV0Serializer method convertKeyedBackendState.
/**
* This is public so that we can use it when restoring a legacy snapshot
* in {@code AbstractStreamOperatorTestHarness}.
*/
public static KeyGroupsStateHandle convertKeyedBackendState(HashMap<String, KvStateSnapshot<?, ?, ?, ?>> oldKeyedState, int parallelInstanceIdx, long checkpointID) throws Exception {
if (null != oldKeyedState) {
CheckpointStreamFactory checkpointStreamFactory = new MemCheckpointStreamFactory(MAX_SIZE);
CheckpointStreamFactory.CheckpointStateOutputStream keyedStateOut = checkpointStreamFactory.createCheckpointStateOutputStream(checkpointID, 0L);
try {
final long offset = keyedStateOut.getPos();
InstantiationUtil.serializeObject(keyedStateOut, oldKeyedState);
StreamStateHandle streamStateHandle = keyedStateOut.closeAndGetHandle();
// makes IOUtils.closeQuietly(...) ignore this
keyedStateOut = null;
if (null != streamStateHandle) {
KeyGroupRangeOffsets keyGroupRangeOffsets = new KeyGroupRangeOffsets(parallelInstanceIdx, parallelInstanceIdx, new long[] { offset });
return new MigrationKeyGroupStateHandle(keyGroupRangeOffsets, streamStateHandle);
}
} finally {
IOUtils.closeQuietly(keyedStateOut);
}
}
return null;
}
use of org.apache.flink.runtime.state.KeyGroupRangeOffsets in project flink by apache.
the class SavepointV1Test method createTaskStates.
static Collection<TaskState> createTaskStates(int numTaskStates, int numSubtasksPerTask) throws IOException {
Random random = new Random(numTaskStates * 31 + numSubtasksPerTask);
List<TaskState> taskStates = new ArrayList<>(numTaskStates);
for (int stateIdx = 0; stateIdx < numTaskStates; ++stateIdx) {
int chainLength = 1 + random.nextInt(8);
TaskState taskState = new TaskState(new JobVertexID(), numSubtasksPerTask, 128, chainLength);
int noNonPartitionableStateAtIndex = random.nextInt(chainLength);
int noOperatorStateBackendAtIndex = random.nextInt(chainLength);
int noOperatorStateStreamAtIndex = random.nextInt(chainLength);
boolean hasKeyedBackend = random.nextInt(4) != 0;
boolean hasKeyedStream = random.nextInt(4) != 0;
for (int subtaskIdx = 0; subtaskIdx < numSubtasksPerTask; subtaskIdx++) {
List<StreamStateHandle> nonPartitionableStates = new ArrayList<>(chainLength);
List<OperatorStateHandle> operatorStatesBackend = new ArrayList<>(chainLength);
List<OperatorStateHandle> operatorStatesStream = new ArrayList<>(chainLength);
for (int chainIdx = 0; chainIdx < chainLength; ++chainIdx) {
StreamStateHandle nonPartitionableState = new TestByteStreamStateHandleDeepCompare("a-" + chainIdx, ("Hi-" + chainIdx).getBytes(ConfigConstants.DEFAULT_CHARSET));
StreamStateHandle operatorStateBackend = new TestByteStreamStateHandleDeepCompare("b-" + chainIdx, ("Beautiful-" + chainIdx).getBytes(ConfigConstants.DEFAULT_CHARSET));
StreamStateHandle operatorStateStream = new TestByteStreamStateHandleDeepCompare("b-" + chainIdx, ("Beautiful-" + chainIdx).getBytes(ConfigConstants.DEFAULT_CHARSET));
Map<String, OperatorStateHandle.StateMetaInfo> offsetsMap = new HashMap<>();
offsetsMap.put("A", new OperatorStateHandle.StateMetaInfo(new long[] { 0, 10, 20 }, OperatorStateHandle.Mode.SPLIT_DISTRIBUTE));
offsetsMap.put("B", new OperatorStateHandle.StateMetaInfo(new long[] { 30, 40, 50 }, OperatorStateHandle.Mode.SPLIT_DISTRIBUTE));
offsetsMap.put("C", new OperatorStateHandle.StateMetaInfo(new long[] { 60, 70, 80 }, OperatorStateHandle.Mode.BROADCAST));
if (chainIdx != noNonPartitionableStateAtIndex) {
nonPartitionableStates.add(nonPartitionableState);
}
if (chainIdx != noOperatorStateBackendAtIndex) {
OperatorStateHandle operatorStateHandleBackend = new OperatorStateHandle(offsetsMap, operatorStateBackend);
operatorStatesBackend.add(operatorStateHandleBackend);
}
if (chainIdx != noOperatorStateStreamAtIndex) {
OperatorStateHandle operatorStateHandleStream = new OperatorStateHandle(offsetsMap, operatorStateStream);
operatorStatesStream.add(operatorStateHandleStream);
}
}
KeyGroupsStateHandle keyedStateBackend = null;
KeyGroupsStateHandle keyedStateStream = null;
if (hasKeyedBackend) {
keyedStateBackend = new KeyGroupsStateHandle(new KeyGroupRangeOffsets(1, 1, new long[] { 42 }), new TestByteStreamStateHandleDeepCompare("c", "Hello".getBytes(ConfigConstants.DEFAULT_CHARSET)));
}
if (hasKeyedStream) {
keyedStateStream = new KeyGroupsStateHandle(new KeyGroupRangeOffsets(1, 1, new long[] { 23 }), new TestByteStreamStateHandleDeepCompare("d", "World".getBytes(ConfigConstants.DEFAULT_CHARSET)));
}
taskState.putState(subtaskIdx, new SubtaskState(new ChainedStateHandle<>(nonPartitionableStates), new ChainedStateHandle<>(operatorStatesBackend), new ChainedStateHandle<>(operatorStatesStream), keyedStateStream, keyedStateBackend));
}
taskStates.add(taskState);
}
return taskStates;
}
use of org.apache.flink.runtime.state.KeyGroupRangeOffsets in project flink by apache.
the class CheckpointCoordinatorTest method generateKeyGroupState.
public static KeyGroupsStateHandle generateKeyGroupState(KeyGroupRange keyGroupRange, List<? extends Serializable> states) throws IOException {
Preconditions.checkArgument(keyGroupRange.getNumberOfKeyGroups() == states.size());
Tuple2<byte[], List<long[]>> serializedDataWithOffsets = serializeTogetherAndTrackOffsets(Collections.<List<? extends Serializable>>singletonList(states));
KeyGroupRangeOffsets keyGroupRangeOffsets = new KeyGroupRangeOffsets(keyGroupRange, serializedDataWithOffsets.f1.get(0));
ByteStreamStateHandle allSerializedStatesHandle = new TestByteStreamStateHandleDeepCompare(String.valueOf(UUID.randomUUID()), serializedDataWithOffsets.f0);
KeyGroupsStateHandle keyGroupsStateHandle = new KeyGroupsStateHandle(keyGroupRangeOffsets, allSerializedStatesHandle);
return keyGroupsStateHandle;
}
use of org.apache.flink.runtime.state.KeyGroupRangeOffsets in project flink by apache.
the class HeapKeyedStateBackend method snapshot.
@Override
@SuppressWarnings("unchecked")
public RunnableFuture<KeyGroupsStateHandle> snapshot(final long checkpointId, final long timestamp, final CheckpointStreamFactory streamFactory, CheckpointOptions checkpointOptions) throws Exception {
if (!hasRegisteredState()) {
return DoneFuture.nullValue();
}
long syncStartTime = System.currentTimeMillis();
Preconditions.checkState(stateTables.size() <= Short.MAX_VALUE, "Too many KV-States: " + stateTables.size() + ". Currently at most " + Short.MAX_VALUE + " states are supported");
List<KeyedBackendSerializationProxy.StateMetaInfo<?, ?>> metaInfoProxyList = new ArrayList<>(stateTables.size());
final Map<String, Integer> kVStateToId = new HashMap<>(stateTables.size());
final Map<StateTable<K, ?, ?>, StateTableSnapshot> cowStateStableSnapshots = new HashedMap(stateTables.size());
for (Map.Entry<String, StateTable<K, ?, ?>> kvState : stateTables.entrySet()) {
RegisteredBackendStateMetaInfo<?, ?> metaInfo = kvState.getValue().getMetaInfo();
KeyedBackendSerializationProxy.StateMetaInfo<?, ?> metaInfoProxy = new KeyedBackendSerializationProxy.StateMetaInfo(metaInfo.getStateType(), metaInfo.getName(), metaInfo.getNamespaceSerializer(), metaInfo.getStateSerializer());
metaInfoProxyList.add(metaInfoProxy);
kVStateToId.put(kvState.getKey(), kVStateToId.size());
StateTable<K, ?, ?> stateTable = kvState.getValue();
if (null != stateTable) {
cowStateStableSnapshots.put(stateTable, stateTable.createSnapshot());
}
}
final KeyedBackendSerializationProxy serializationProxy = new KeyedBackendSerializationProxy(keySerializer, metaInfoProxyList);
//--------------------------------------------------- this becomes the end of sync part
// implementation of the async IO operation, based on FutureTask
final AbstractAsyncIOCallable<KeyGroupsStateHandle, CheckpointStreamFactory.CheckpointStateOutputStream> ioCallable = new AbstractAsyncIOCallable<KeyGroupsStateHandle, CheckpointStreamFactory.CheckpointStateOutputStream>() {
AtomicBoolean open = new AtomicBoolean(false);
@Override
public CheckpointStreamFactory.CheckpointStateOutputStream openIOHandle() throws Exception {
if (open.compareAndSet(false, true)) {
CheckpointStreamFactory.CheckpointStateOutputStream stream = streamFactory.createCheckpointStateOutputStream(checkpointId, timestamp);
try {
cancelStreamRegistry.registerClosable(stream);
return stream;
} catch (Exception ex) {
open.set(false);
throw ex;
}
} else {
throw new IOException("Operation already opened.");
}
}
@Override
public KeyGroupsStateHandle performOperation() throws Exception {
long asyncStartTime = System.currentTimeMillis();
CheckpointStreamFactory.CheckpointStateOutputStream stream = getIoHandle();
DataOutputViewStreamWrapper outView = new DataOutputViewStreamWrapper(stream);
serializationProxy.write(outView);
long[] keyGroupRangeOffsets = new long[keyGroupRange.getNumberOfKeyGroups()];
for (int keyGroupPos = 0; keyGroupPos < keyGroupRange.getNumberOfKeyGroups(); ++keyGroupPos) {
int keyGroupId = keyGroupRange.getKeyGroupId(keyGroupPos);
keyGroupRangeOffsets[keyGroupPos] = stream.getPos();
outView.writeInt(keyGroupId);
for (Map.Entry<String, StateTable<K, ?, ?>> kvState : stateTables.entrySet()) {
outView.writeShort(kVStateToId.get(kvState.getKey()));
cowStateStableSnapshots.get(kvState.getValue()).writeMappingsInKeyGroup(outView, keyGroupId);
}
}
if (open.compareAndSet(true, false)) {
StreamStateHandle streamStateHandle = stream.closeAndGetHandle();
KeyGroupRangeOffsets offsets = new KeyGroupRangeOffsets(keyGroupRange, keyGroupRangeOffsets);
final KeyGroupsStateHandle keyGroupsStateHandle = new KeyGroupsStateHandle(offsets, streamStateHandle);
if (asynchronousSnapshots) {
LOG.info("Heap backend snapshot ({}, asynchronous part) in thread {} took {} ms.", streamFactory, Thread.currentThread(), (System.currentTimeMillis() - asyncStartTime));
}
return keyGroupsStateHandle;
} else {
throw new IOException("Checkpoint stream already closed.");
}
}
@Override
public void done(boolean canceled) {
if (open.compareAndSet(true, false)) {
CheckpointStreamFactory.CheckpointStateOutputStream stream = getIoHandle();
if (null != stream) {
cancelStreamRegistry.unregisterClosable(stream);
IOUtils.closeQuietly(stream);
}
}
for (StateTableSnapshot snapshot : cowStateStableSnapshots.values()) {
snapshot.release();
}
}
};
AsyncStoppableTaskWithCallback<KeyGroupsStateHandle> task = AsyncStoppableTaskWithCallback.from(ioCallable);
if (!asynchronousSnapshots) {
task.run();
}
LOG.info("Heap backend snapshot (" + streamFactory + ", synchronous part) in thread " + Thread.currentThread() + " took " + (System.currentTimeMillis() - syncStartTime) + " ms.");
return task;
}
Aggregations