use of org.apache.flink.runtime.state.StateHandleID in project flink by apache.
the class RocksDBStateDownloaderTest method testMultiThreadRestoreThreadPoolExceptionRethrow.
/**
* Test that the exception arose in the thread pool will rethrow to the main thread.
*/
@Test
public void testMultiThreadRestoreThreadPoolExceptionRethrow() {
SpecifiedException expectedException = new SpecifiedException("throw exception while multi thread restore.");
StreamStateHandle stateHandle = new ThrowingStateHandle(expectedException);
Map<StateHandleID, StreamStateHandle> stateHandles = new HashMap<>(1);
stateHandles.put(new StateHandleID("state1"), stateHandle);
IncrementalRemoteKeyedStateHandle incrementalKeyedStateHandle = new IncrementalRemoteKeyedStateHandle(UUID.randomUUID(), KeyGroupRange.EMPTY_KEY_GROUP_RANGE, 1, stateHandles, stateHandles, stateHandle);
try (RocksDBStateDownloader rocksDBStateDownloader = new RocksDBStateDownloader(5)) {
rocksDBStateDownloader.transferAllStateDataToDirectory(incrementalKeyedStateHandle, temporaryFolder.newFolder().toPath(), new CloseableRegistry());
fail();
} catch (Exception e) {
assertEquals(expectedException, e);
}
}
use of org.apache.flink.runtime.state.StateHandleID in project flink by apache.
the class RocksDBStateUploaderTest method testMultiThreadUploadThreadPoolExceptionRethrow.
/**
* Test that the exception arose in the thread pool will rethrow to the main thread.
*/
@Test
public void testMultiThreadUploadThreadPoolExceptionRethrow() throws IOException {
SpecifiedException expectedException = new SpecifiedException("throw exception while multi thread upload states.");
CheckpointStateOutputStream outputStream = createFailingCheckpointStateOutputStream(expectedException);
CheckpointStreamFactory checkpointStreamFactory = new CheckpointStreamFactory() {
@Override
public CheckpointStateOutputStream createCheckpointStateOutputStream(CheckpointedStateScope scope) throws IOException {
return outputStream;
}
@Override
public boolean canFastDuplicate(StreamStateHandle stateHandle, CheckpointedStateScope scope) throws IOException {
return false;
}
@Override
public List<StreamStateHandle> duplicate(List<StreamStateHandle> stateHandles, CheckpointedStateScope scope) throws IOException {
return null;
}
};
File file = temporaryFolder.newFile(String.valueOf(UUID.randomUUID()));
generateRandomFileContent(file.getPath(), 20);
Map<StateHandleID, Path> filePaths = new HashMap<>(1);
filePaths.put(new StateHandleID("mockHandleID"), file.toPath());
try (RocksDBStateUploader rocksDBStateUploader = new RocksDBStateUploader(5)) {
rocksDBStateUploader.uploadFilesToCheckpointFs(filePaths, checkpointStreamFactory, CheckpointedStateScope.SHARED, new CloseableRegistry());
fail();
} catch (Exception e) {
assertEquals(expectedException, e);
}
}
use of org.apache.flink.runtime.state.StateHandleID in project flink by apache.
the class SchedulerUtilsTest method buildIncrementalHandle.
private IncrementalRemoteKeyedStateHandle buildIncrementalHandle(StateHandleID key, StreamStateHandle shared, UUID backendIdentifier) {
StreamStateHandle meta = new ByteStreamStateHandle("meta", new byte[] { 1, 2, 3 });
Map<StateHandleID, StreamStateHandle> sharedStateMap = new HashMap<>();
sharedStateMap.put(key, shared);
return new IncrementalRemoteKeyedStateHandle(backendIdentifier, KeyGroupRange.EMPTY_KEY_GROUP_RANGE, 1, sharedStateMap, emptyMap(), meta);
}
use of org.apache.flink.runtime.state.StateHandleID in project flink by apache.
the class SchedulerUtilsTest method testSharedStateRegistration.
/**
* Check that a {@link SharedStateRegistryFactory} used by {@link SchedulerUtils} registers
* shared checkpoint state on restore.
*/
@Test
public void testSharedStateRegistration() throws Exception {
UUID backendId = UUID.randomUUID();
StateHandleID key = new StateHandleID("k0");
StreamStateHandle handle = new ByteStreamStateHandle("h0", new byte[] { 1, 2, 3 });
CheckpointRecoveryFactory recoveryFactory = buildRecoveryFactory(buildCheckpoint(buildIncrementalHandle(key, handle, backendId)));
CompletedCheckpointStore checkpointStore = SchedulerUtils.createCompletedCheckpointStore(new Configuration(), recoveryFactory, Executors.directExecutor(), log, new JobID());
SharedStateRegistry sharedStateRegistry = checkpointStore.getSharedStateRegistry();
IncrementalRemoteKeyedStateHandle newHandle = buildIncrementalHandle(key, new PlaceholderStreamStateHandle(1L), backendId);
newHandle.registerSharedStates(sharedStateRegistry, 1L);
assertSame(handle, newHandle.getSharedState().get(key));
}
use of org.apache.flink.runtime.state.StateHandleID in project flink by apache.
the class MetadataV2V3SerializerBase method deserializeIncrementalStateHandle.
private static IncrementalRemoteKeyedStateHandle deserializeIncrementalStateHandle(DataInputStream dis, @Nullable DeserializationContext context, int stateHandleType) throws IOException {
boolean isV2Format = INCREMENTAL_KEY_GROUPS_HANDLE_V2 == stateHandleType;
long checkpointId = dis.readLong();
String backendId = dis.readUTF();
int startKeyGroup = dis.readInt();
int numKeyGroups = dis.readInt();
long checkpointedSize = isV2Format ? dis.readLong() : UNKNOWN_CHECKPOINTED_SIZE;
KeyGroupRange keyGroupRange = KeyGroupRange.of(startKeyGroup, startKeyGroup + numKeyGroups - 1);
StreamStateHandle metaDataStateHandle = deserializeStreamStateHandle(dis, context);
Map<StateHandleID, StreamStateHandle> sharedStates = deserializeStreamStateHandleMap(dis, context);
Map<StateHandleID, StreamStateHandle> privateStates = deserializeStreamStateHandleMap(dis, context);
UUID uuid;
try {
uuid = UUID.fromString(backendId);
} catch (Exception ex) {
// compatibility with old format pre FLINK-6964:
uuid = UUID.nameUUIDFromBytes(backendId.getBytes(StandardCharsets.UTF_8));
}
StateHandleID stateHandleId = isV2Format ? new StateHandleID(dis.readUTF()) : StateHandleID.randomStateHandleId();
return IncrementalRemoteKeyedStateHandle.restore(uuid, keyGroupRange, checkpointId, sharedStates, privateStates, metaDataStateHandle, checkpointedSize, stateHandleId);
}
Aggregations