use of org.apache.flink.migration.runtime.state.KvStateSnapshot in project flink by apache.
the class HeapKeyedStateBackend method restoreOldSavepointKeyedState.
@SuppressWarnings({ "unchecked", "rawtypes", "DeprecatedIsStillUsed" })
@Deprecated
private void restoreOldSavepointKeyedState(Collection<KeyGroupsStateHandle> stateHandles) throws IOException, ClassNotFoundException {
if (stateHandles.isEmpty()) {
return;
}
Preconditions.checkState(1 == stateHandles.size(), "Only one element expected here.");
HashMap<String, KvStateSnapshot<K, ?, ?, ?>> namedStates;
try (FSDataInputStream inputStream = stateHandles.iterator().next().openInputStream()) {
namedStates = InstantiationUtil.deserializeObject(inputStream, userCodeClassLoader);
}
for (Map.Entry<String, KvStateSnapshot<K, ?, ?, ?>> nameToState : namedStates.entrySet()) {
final String stateName = nameToState.getKey();
final KvStateSnapshot<K, ?, ?, ?> genericSnapshot = nameToState.getValue();
if (genericSnapshot instanceof MigrationRestoreSnapshot) {
MigrationRestoreSnapshot<K, ?, ?> stateSnapshot = (MigrationRestoreSnapshot<K, ?, ?>) genericSnapshot;
final StateTable rawResultMap = stateSnapshot.deserialize(stateName, this);
// add named state to the backend
stateTables.put(stateName, rawResultMap);
} else {
throw new IllegalStateException("Unknown state: " + genericSnapshot);
}
}
}
use of org.apache.flink.migration.runtime.state.KvStateSnapshot in project flink by apache.
the class SavepointV0Serializer method convertSubtaskState.
private org.apache.flink.runtime.checkpoint.SubtaskState convertSubtaskState(SubtaskState subtaskState, int parallelInstanceIdx, ClassLoader userClassLoader, long checkpointID) throws Exception {
SerializedValue<StateHandle<?>> serializedValue = subtaskState.getState();
StreamTaskStateList stateList = (StreamTaskStateList) serializedValue.deserializeValue(userClassLoader);
StreamTaskState[] streamTaskStates = stateList.getState(userClassLoader);
List<StreamStateHandle> newChainStateList = Arrays.asList(new StreamStateHandle[streamTaskStates.length]);
KeyGroupsStateHandle newKeyedState = null;
for (int chainIdx = 0; chainIdx < streamTaskStates.length; ++chainIdx) {
StreamTaskState streamTaskState = streamTaskStates[chainIdx];
if (streamTaskState == null) {
continue;
}
newChainStateList.set(chainIdx, convertOperatorAndFunctionState(streamTaskState));
HashMap<String, KvStateSnapshot<?, ?, ?, ?>> oldKeyedState = streamTaskState.getKvStates();
if (null != oldKeyedState) {
Preconditions.checkState(null == newKeyedState, "Found more than one keyed state in chain");
newKeyedState = convertKeyedBackendState(oldKeyedState, parallelInstanceIdx, checkpointID);
}
}
ChainedStateHandle<StreamStateHandle> newChainedState = new ChainedStateHandle<>(newChainStateList);
ChainedStateHandle<OperatorStateHandle> nopChain = new ChainedStateHandle<>(Arrays.asList(new OperatorStateHandle[newChainedState.getLength()]));
return new org.apache.flink.runtime.checkpoint.SubtaskState(newChainedState, nopChain, nopChain, newKeyedState, null);
}
use of org.apache.flink.migration.runtime.state.KvStateSnapshot in project flink by apache.
the class MigrationV0ToV1Test method testSavepointMigrationV0ToV1.
/**
* Simple test of savepoint methods.
*/
@Test
public void testSavepointMigrationV0ToV1() throws Exception {
String target = tmp.getRoot().getAbsolutePath();
assertEquals(0, tmp.getRoot().listFiles().length);
long checkpointId = ThreadLocalRandom.current().nextLong(Integer.MAX_VALUE);
int numTaskStates = 4;
int numSubtaskStates = 16;
Collection<org.apache.flink.migration.runtime.checkpoint.TaskState> expected = createTaskStatesOld(numTaskStates, numSubtaskStates);
SavepointV0 savepoint = new SavepointV0(checkpointId, expected);
assertEquals(SavepointV0.VERSION, savepoint.getVersion());
assertEquals(checkpointId, savepoint.getCheckpointId());
assertEquals(expected, savepoint.getOldTaskStates());
assertFalse(savepoint.getOldTaskStates().isEmpty());
Exception latestException = null;
Path path = null;
FSDataOutputStream fdos = null;
FileSystem fs = null;
try {
// Try to create a FS output stream
for (int attempt = 0; attempt < 10; attempt++) {
path = new Path(target, FileUtils.getRandomFilename("savepoint-"));
if (fs == null) {
fs = FileSystem.get(path.toUri());
}
try {
fdos = fs.create(path, false);
break;
} catch (Exception e) {
latestException = e;
}
}
if (fdos == null) {
throw new IOException("Failed to create file output stream at " + path, latestException);
}
try (DataOutputStream dos = new DataOutputStream(fdos)) {
dos.writeInt(SavepointStore.MAGIC_NUMBER);
dos.writeInt(savepoint.getVersion());
SavepointV0Serializer.INSTANCE.serializeOld(savepoint, dos);
}
ClassLoader cl = Thread.currentThread().getContextClassLoader();
Savepoint sp = SavepointStore.loadSavepoint(path.toString(), cl);
int t = 0;
for (TaskState taskState : sp.getTaskStates()) {
for (int p = 0; p < taskState.getParallelism(); ++p) {
SubtaskState subtaskState = taskState.getState(p);
ChainedStateHandle<StreamStateHandle> legacyOperatorState = subtaskState.getLegacyOperatorState();
for (int c = 0; c < legacyOperatorState.getLength(); ++c) {
StreamStateHandle stateHandle = legacyOperatorState.get(c);
try (InputStream is = stateHandle.openInputStream()) {
Tuple4<Integer, Integer, Integer, Integer> expTestState = new Tuple4<>(0, t, p, c);
Tuple4<Integer, Integer, Integer, Integer> actTestState;
//check function state
if (p % 4 != 0) {
assertEquals(1, is.read());
actTestState = InstantiationUtil.deserializeObject(is, cl);
assertEquals(expTestState, actTestState);
} else {
assertEquals(0, is.read());
}
//check operator state
expTestState.f0 = 1;
actTestState = InstantiationUtil.deserializeObject(is, cl);
assertEquals(expTestState, actTestState);
}
}
//check keyed state
KeyGroupsStateHandle keyGroupsStateHandle = subtaskState.getManagedKeyedState();
if (t % 3 != 0) {
assertEquals(1, keyGroupsStateHandle.getNumberOfKeyGroups());
assertEquals(p, keyGroupsStateHandle.getGroupRangeOffsets().getKeyGroupRange().getStartKeyGroup());
ByteStreamStateHandle stateHandle = (ByteStreamStateHandle) keyGroupsStateHandle.getDelegateStateHandle();
HashMap<String, KvStateSnapshot<?, ?, ?, ?>> testKeyedState = MigrationInstantiationUtil.deserializeObject(stateHandle.getData(), cl);
assertEquals(2, testKeyedState.size());
for (KvStateSnapshot<?, ?, ?, ?> snapshot : testKeyedState.values()) {
MemValueState.Snapshot<?, ?, ?> castedSnapshot = (MemValueState.Snapshot<?, ?, ?>) snapshot;
byte[] data = castedSnapshot.getData();
assertEquals(t, data[0]);
assertEquals(p, data[1]);
}
} else {
assertEquals(null, keyGroupsStateHandle);
}
}
++t;
}
savepoint.dispose();
} finally {
// Dispose
SavepointStore.removeSavepointFile(path.toString());
}
}
use of org.apache.flink.migration.runtime.state.KvStateSnapshot in project flink by apache.
the class MigrationV0ToV1Test method createTaskStatesOld.
private static Collection<org.apache.flink.migration.runtime.checkpoint.TaskState> createTaskStatesOld(int numTaskStates, int numSubtaskStates) throws Exception {
List<org.apache.flink.migration.runtime.checkpoint.TaskState> taskStates = new ArrayList<>(numTaskStates);
for (int i = 0; i < numTaskStates; i++) {
org.apache.flink.migration.runtime.checkpoint.TaskState taskState = new org.apache.flink.migration.runtime.checkpoint.TaskState(new JobVertexID(), numSubtaskStates);
for (int j = 0; j < numSubtaskStates; j++) {
StreamTaskState[] streamTaskStates = new StreamTaskState[2];
for (int k = 0; k < streamTaskStates.length; k++) {
StreamTaskState state = new StreamTaskState();
Tuple4<Integer, Integer, Integer, Integer> testState = new Tuple4<>(0, i, j, k);
if (j % 4 != 0) {
state.setFunctionState(new SerializedStateHandle<Serializable>(testState));
}
testState = new Tuple4<>(1, i, j, k);
state.setOperatorState(new SerializedStateHandle<>(testState));
if ((0 == k) && (i % 3 != 0)) {
HashMap<String, KvStateSnapshot<?, ?, ?, ?>> testKeyedState = new HashMap<>(2);
for (int l = 0; l < 2; ++l) {
String name = "keyed-" + l;
KvStateSnapshot<?, ?, ?, ?> testKeyedSnapshot = new MemValueState.Snapshot<>(IntSerializer.INSTANCE, VoidNamespaceSerializer.INSTANCE, IntSerializer.INSTANCE, new ValueStateDescriptor<>(name, Integer.class, 0), new byte[] { (byte) i, (byte) j });
testKeyedState.put(name, testKeyedSnapshot);
}
state.setKvStates(testKeyedState);
}
streamTaskStates[k] = state;
}
StreamTaskStateList streamTaskStateList = new StreamTaskStateList(streamTaskStates);
org.apache.flink.migration.util.SerializedValue<org.apache.flink.migration.runtime.state.StateHandle<?>> handle = new org.apache.flink.migration.util.SerializedValue<org.apache.flink.migration.runtime.state.StateHandle<?>>(streamTaskStateList);
taskState.putState(j, new org.apache.flink.migration.runtime.checkpoint.SubtaskState(handle, 0, 0));
}
taskStates.add(taskState);
}
return taskStates;
}
Aggregations