use of org.apache.flink.runtime.state.StreamStateHandle in project flink by apache.
the class MetadataV2V3SerializerBase method deserializeKeyedStateHandle.
@VisibleForTesting
@Nullable
static KeyedStateHandle deserializeKeyedStateHandle(DataInputStream dis, @Nullable DeserializationContext context) throws IOException {
final int type = dis.readByte();
if (NULL_HANDLE == type) {
return null;
} else if (KEY_GROUPS_HANDLE == type || KEY_GROUPS_HANDLE_V2 == type || SAVEPOINT_KEY_GROUPS_HANDLE == type) {
int startKeyGroup = dis.readInt();
int numKeyGroups = dis.readInt();
KeyGroupRange keyGroupRange = KeyGroupRange.of(startKeyGroup, startKeyGroup + numKeyGroups - 1);
long[] offsets = new long[numKeyGroups];
for (int i = 0; i < numKeyGroups; ++i) {
offsets[i] = dis.readLong();
}
KeyGroupRangeOffsets keyGroupRangeOffsets = new KeyGroupRangeOffsets(keyGroupRange, offsets);
StreamStateHandle stateHandle = deserializeStreamStateHandle(dis, context);
if (SAVEPOINT_KEY_GROUPS_HANDLE == type) {
return new KeyGroupsSavepointStateHandle(keyGroupRangeOffsets, stateHandle);
} else {
StateHandleID stateHandleID = KEY_GROUPS_HANDLE_V2 == type ? new StateHandleID(dis.readUTF()) : StateHandleID.randomStateHandleId();
return KeyGroupsStateHandle.restore(keyGroupRangeOffsets, stateHandle, stateHandleID);
}
} else if (INCREMENTAL_KEY_GROUPS_HANDLE == type || INCREMENTAL_KEY_GROUPS_HANDLE_V2 == type) {
return deserializeIncrementalStateHandle(dis, context, type);
} else if (CHANGELOG_HANDLE == type) {
int startKeyGroup = dis.readInt();
int numKeyGroups = dis.readInt();
KeyGroupRange keyGroupRange = KeyGroupRange.of(startKeyGroup, startKeyGroup + numKeyGroups - 1);
long checkpointedSize = dis.readLong();
int baseSize = dis.readInt();
List<KeyedStateHandle> base = new ArrayList<>(baseSize);
for (int i = 0; i < baseSize; i++) {
KeyedStateHandle handle = deserializeKeyedStateHandle(dis, context);
if (handle != null) {
base.add(handle);
} else {
LOG.warn("Unexpected null keyed state handle of materialized part when deserializing changelog state-backend handle");
}
}
int deltaSize = dis.readInt();
List<ChangelogStateHandle> delta = new ArrayList<>(deltaSize);
for (int i = 0; i < deltaSize; i++) {
delta.add((ChangelogStateHandle) deserializeKeyedStateHandle(dis, context));
}
long materializationID = dis.readLong();
StateHandleID stateHandleId = new StateHandleID(dis.readUTF());
return ChangelogStateBackendHandleImpl.restore(base, delta, keyGroupRange, materializationID, checkpointedSize, stateHandleId);
} else if (CHANGELOG_BYTE_INCREMENT_HANDLE == type) {
int start = dis.readInt();
int numKeyGroups = dis.readInt();
KeyGroupRange keyGroupRange = KeyGroupRange.of(start, start + numKeyGroups - 1);
long from = dis.readLong();
long to = dis.readLong();
int size = dis.readInt();
List<StateChange> changes = new ArrayList<>(size);
for (int i = 0; i < size; i++) {
int keyGroup = dis.readInt();
int bytesSize = dis.readInt();
byte[] bytes = new byte[bytesSize];
IOUtils.readFully(dis, bytes, 0, bytesSize);
changes.add(new StateChange(keyGroup, bytes));
}
StateHandleID stateHandleId = new StateHandleID(dis.readUTF());
return InMemoryChangelogStateHandle.restore(changes, SequenceNumber.of(from), SequenceNumber.of(to), keyGroupRange, stateHandleId);
} else if (CHANGELOG_FILE_INCREMENT_HANDLE == type) {
int start = dis.readInt();
int numKeyGroups = dis.readInt();
KeyGroupRange keyGroupRange = KeyGroupRange.of(start, start + numKeyGroups - 1);
int numHandles = dis.readInt();
List<Tuple2<StreamStateHandle, Long>> streamHandleAndOffset = new ArrayList<>(numHandles);
for (int i = 0; i < numHandles; i++) {
long o = dis.readLong();
StreamStateHandle h = deserializeStreamStateHandle(dis, context);
streamHandleAndOffset.add(Tuple2.of(h, o));
}
long size = dis.readLong();
long checkpointedSize = dis.readLong();
StateHandleID stateHandleId = new StateHandleID(dis.readUTF());
return ChangelogStateHandleStreamImpl.restore(streamHandleAndOffset, keyGroupRange, size, checkpointedSize, stateHandleId);
} else {
throw new IllegalStateException("Reading invalid KeyedStateHandle, type: " + type);
}
}
use of org.apache.flink.runtime.state.StreamStateHandle in project flink by apache.
the class FsCheckpointStateToolset method duplicate.
@Override
public List<StreamStateHandle> duplicate(List<StreamStateHandle> stateHandles) throws IOException {
final List<CopyRequest> requests = new ArrayList<>();
for (StreamStateHandle handle : stateHandles) {
if (!(handle instanceof FileStateHandle)) {
throw new IllegalArgumentException("We can duplicate only FileStateHandles.");
}
final Path srcPath = ((FileStateHandle) handle).getFilePath();
requests.add(CopyRequest.of(srcPath, getNewDstPath(srcPath.getName())));
}
fs.duplicate(requests);
return IntStream.range(0, stateHandles.size()).mapToObj(idx -> {
final StreamStateHandle originalHandle = stateHandles.get(idx);
final Path dst = requests.get(idx).getDestination();
if (originalHandle instanceof RelativeFileStateHandle) {
return new RelativeFileStateHandle(dst, dst.getName(), originalHandle.getStateSize());
} else {
return new FileStateHandle(dst, originalHandle.getStateSize());
}
}).collect(Collectors.toList());
}
use of org.apache.flink.runtime.state.StreamStateHandle in project flink by apache.
the class CheckpointTestUtils method randomlySetSubtaskState.
private static void randomlySetSubtaskState(OperatorState taskState, int[] subtasksToSet, Random random, String basePath) {
boolean hasOperatorStateBackend = random.nextBoolean();
boolean hasOperatorStateStream = random.nextBoolean();
boolean hasKeyedBackend = random.nextInt(4) != 0;
boolean hasKeyedStream = random.nextInt(4) != 0;
boolean isIncremental = random.nextInt(3) == 0;
for (int subtaskIdx : subtasksToSet) {
StreamStateHandle operatorStateBackend = new ByteStreamStateHandle("b", ("Beautiful").getBytes(ConfigConstants.DEFAULT_CHARSET));
StreamStateHandle operatorStateStream = new ByteStreamStateHandle("b", ("Beautiful").getBytes(ConfigConstants.DEFAULT_CHARSET));
Map<String, OperatorStateHandle.StateMetaInfo> offsetsMap = new HashMap<>();
offsetsMap.put("A", new OperatorStateHandle.StateMetaInfo(new long[] { 0, 10, 20 }, OperatorStateHandle.Mode.SPLIT_DISTRIBUTE));
offsetsMap.put("B", new OperatorStateHandle.StateMetaInfo(new long[] { 30, 40, 50 }, OperatorStateHandle.Mode.SPLIT_DISTRIBUTE));
offsetsMap.put("C", new OperatorStateHandle.StateMetaInfo(new long[] { 60, 70, 80 }, OperatorStateHandle.Mode.UNION));
final OperatorSubtaskState.Builder state = OperatorSubtaskState.builder();
if (hasOperatorStateBackend) {
state.setManagedOperatorState(new OperatorStreamStateHandle(offsetsMap, operatorStateBackend));
}
if (hasOperatorStateStream) {
state.setRawOperatorState(new OperatorStreamStateHandle(offsetsMap, operatorStateStream));
}
if (hasKeyedBackend) {
final KeyedStateHandle stateHandle;
if (isSavepoint(basePath)) {
stateHandle = createDummyKeyGroupSavepointStateHandle(random, basePath);
} else if (isIncremental) {
stateHandle = createDummyIncrementalKeyedStateHandle(random);
} else {
stateHandle = createDummyKeyGroupStateHandle(random, null);
}
state.setRawKeyedState(stateHandle);
}
if (hasKeyedStream) {
final KeyedStateHandle stateHandle;
if (isSavepoint(basePath)) {
stateHandle = createDummyKeyGroupSavepointStateHandle(random, basePath);
} else {
stateHandle = createDummyKeyGroupStateHandle(random, null);
}
state.setManagedKeyedState(stateHandle);
}
state.setInputChannelState((random.nextBoolean() && !isSavepoint(basePath)) ? singleton(createNewInputChannelStateHandle(random.nextInt(5), random)) : empty());
state.setResultSubpartitionState((random.nextBoolean() && !isSavepoint(basePath)) ? singleton(createNewResultSubpartitionStateHandle(random.nextInt(5), random)) : empty());
taskState.putState(subtaskIdx, state.build());
}
}
use of org.apache.flink.runtime.state.StreamStateHandle in project flink by apache.
the class MetadataV3SerializerTest method testSerializeKeyGroupsStateHandle.
@Test
public void testSerializeKeyGroupsStateHandle() throws IOException {
KeyGroupRangeOffsets offsets = new KeyGroupRangeOffsets(0, 123);
byte[] data = { 1, 2, 3, 4 };
try (ByteArrayOutputStreamWithPos out = new ByteArrayOutputStreamWithPos()) {
MetadataV2V3SerializerBase.serializeStreamStateHandle(new KeyGroupsStateHandle(offsets, new ByteStreamStateHandle("test", data)), new DataOutputStream(out));
try (ByteArrayInputStream in = new ByteArrayInputStream(out.toByteArray())) {
StreamStateHandle handle = MetadataV2V3SerializerBase.deserializeStreamStateHandle(new DataInputStream(in), null);
assertTrue(handle instanceof KeyGroupsStateHandle);
assertEquals(offsets, ((KeyGroupsStateHandle) handle).getGroupRangeOffsets());
byte[] deserialized = new byte[data.length];
try (FSDataInputStream dataStream = handle.openInputStream()) {
dataStream.read(deserialized);
assertArrayEquals(data, deserialized);
}
}
}
}
use of org.apache.flink.runtime.state.StreamStateHandle in project flink by apache.
the class GenericWriteAheadSink method notifyCheckpointComplete.
@Override
public void notifyCheckpointComplete(long checkpointId) throws Exception {
super.notifyCheckpointComplete(checkpointId);
synchronized (pendingCheckpoints) {
Iterator<PendingCheckpoint> pendingCheckpointIt = pendingCheckpoints.iterator();
while (pendingCheckpointIt.hasNext()) {
PendingCheckpoint pendingCheckpoint = pendingCheckpointIt.next();
long pastCheckpointId = pendingCheckpoint.checkpointId;
int subtaskId = pendingCheckpoint.subtaskId;
long timestamp = pendingCheckpoint.timestamp;
StreamStateHandle streamHandle = pendingCheckpoint.stateHandle;
if (pastCheckpointId <= checkpointId) {
try {
if (!committer.isCheckpointCommitted(subtaskId, pastCheckpointId)) {
try (FSDataInputStream in = streamHandle.openInputStream()) {
boolean success = sendValues(new ReusingMutableToRegularIteratorWrapper<>(new InputViewIterator<>(new DataInputViewStreamWrapper(in), serializer), serializer), pastCheckpointId, timestamp);
if (success) {
// in case the checkpoint was successfully committed,
// discard its state from the backend and mark it for removal
// in case it failed, we retry on the next checkpoint
committer.commitCheckpoint(subtaskId, pastCheckpointId);
streamHandle.discardState();
pendingCheckpointIt.remove();
}
}
} else {
streamHandle.discardState();
pendingCheckpointIt.remove();
}
} catch (Exception e) {
// we have to break here to prevent a new (later) checkpoint
// from being committed before this one
LOG.error("Could not commit checkpoint.", e);
break;
}
}
}
}
}
Aggregations