use of org.apache.flink.runtime.state.CheckpointedStateScope in project flink by apache.
the class RocksDBStateUploaderTest method testMultiThreadUploadThreadPoolExceptionRethrow.
/**
* Test that the exception arose in the thread pool will rethrow to the main thread.
*/
@Test
public void testMultiThreadUploadThreadPoolExceptionRethrow() throws IOException {
SpecifiedException expectedException = new SpecifiedException("throw exception while multi thread upload states.");
CheckpointStateOutputStream outputStream = createFailingCheckpointStateOutputStream(expectedException);
CheckpointStreamFactory checkpointStreamFactory = new CheckpointStreamFactory() {
@Override
public CheckpointStateOutputStream createCheckpointStateOutputStream(CheckpointedStateScope scope) throws IOException {
return outputStream;
}
@Override
public boolean canFastDuplicate(StreamStateHandle stateHandle, CheckpointedStateScope scope) throws IOException {
return false;
}
@Override
public List<StreamStateHandle> duplicate(List<StreamStateHandle> stateHandles, CheckpointedStateScope scope) throws IOException {
return null;
}
};
File file = temporaryFolder.newFile(String.valueOf(UUID.randomUUID()));
generateRandomFileContent(file.getPath(), 20);
Map<StateHandleID, Path> filePaths = new HashMap<>(1);
filePaths.put(new StateHandleID("mockHandleID"), file.toPath());
try (RocksDBStateUploader rocksDBStateUploader = new RocksDBStateUploader(5)) {
rocksDBStateUploader.uploadFilesToCheckpointFs(filePaths, checkpointStreamFactory, CheckpointedStateScope.SHARED, new CloseableRegistry());
fail();
} catch (Exception e) {
assertEquals(expectedException, e);
}
}
use of org.apache.flink.runtime.state.CheckpointedStateScope in project flink by apache.
the class RocksDBAsyncSnapshotTest method testCancelFullyAsyncCheckpoints.
/**
* This tests ensures that canceling of asynchronous snapshots works as expected and does not
* block.
*/
@Test
public void testCancelFullyAsyncCheckpoints() throws Exception {
final OneInputStreamTaskTestHarness<String, String> testHarness = new OneInputStreamTaskTestHarness<>(OneInputStreamTask::new, BasicTypeInfo.STRING_TYPE_INFO, BasicTypeInfo.STRING_TYPE_INFO);
testHarness.setupOutputForSingletonOperatorChain();
testHarness.configureForKeyedStream(value -> value, BasicTypeInfo.STRING_TYPE_INFO);
StreamConfig streamConfig = testHarness.getStreamConfig();
File dbDir = temporaryFolder.newFolder();
final EmbeddedRocksDBStateBackend.PriorityQueueStateType timerServicePriorityQueueType = RocksDBOptions.TIMER_SERVICE_FACTORY.defaultValue();
final int skipStreams;
if (timerServicePriorityQueueType == EmbeddedRocksDBStateBackend.PriorityQueueStateType.HEAP) {
// we skip the first created stream, because it is used to checkpoint the timer service,
// which is
// currently not asynchronous.
skipStreams = 1;
} else if (timerServicePriorityQueueType == EmbeddedRocksDBStateBackend.PriorityQueueStateType.ROCKSDB) {
skipStreams = 0;
} else {
throw new AssertionError(String.format("Unknown timer service priority queue type %s.", timerServicePriorityQueueType));
}
// this is the proper instance that we need to call.
BlockerCheckpointStreamFactory blockerCheckpointStreamFactory = new BlockerCheckpointStreamFactory(4 * 1024 * 1024) {
int count = skipStreams;
@Override
public CheckpointStateOutputStream createCheckpointStateOutputStream(CheckpointedStateScope scope) throws IOException {
if (count > 0) {
--count;
return new BlockingCheckpointOutputStream(new MemCheckpointStreamFactory.MemoryCheckpointOutputStream(maxSize), null, null, Integer.MAX_VALUE);
} else {
return super.createCheckpointStateOutputStream(scope);
}
}
};
// to avoid serialization of the above factory instance, we need to pass it in
// through a static variable
StateBackend stateBackend = new BackendForTestStream(new StaticForwardFactory(blockerCheckpointStreamFactory));
RocksDBStateBackend backend = new RocksDBStateBackend(stateBackend);
backend.setDbStoragePath(dbDir.getAbsolutePath());
streamConfig.setStateBackend(backend);
streamConfig.setStreamOperator(new AsyncCheckpointOperator());
streamConfig.setOperatorID(new OperatorID());
TestTaskStateManager taskStateManagerTestMock = new TestTaskStateManager();
StreamMockEnvironment mockEnv = new StreamMockEnvironment(testHarness.jobConfig, testHarness.taskConfig, testHarness.memorySize, new MockInputSplitProvider(), testHarness.bufferSize, taskStateManagerTestMock);
blockerCheckpointStreamFactory.setBlockerLatch(new OneShotLatch());
blockerCheckpointStreamFactory.setWaiterLatch(new OneShotLatch());
testHarness.invoke(mockEnv);
testHarness.waitForTaskRunning();
final OneInputStreamTask<String, String> task = testHarness.getTask();
task.triggerCheckpointAsync(new CheckpointMetaData(42, 17), CheckpointOptions.forCheckpointWithDefaultLocation()).get();
testHarness.processElement(new StreamRecord<>("Wohoo", 0));
blockerCheckpointStreamFactory.getWaiterLatch().await();
task.cancel();
blockerCheckpointStreamFactory.getBlockerLatch().trigger();
testHarness.endInput();
ExecutorService threadPool = task.getAsyncOperationsThreadPool();
threadPool.shutdown();
Assert.assertTrue(threadPool.awaitTermination(60_000, TimeUnit.MILLISECONDS));
Set<BlockingCheckpointOutputStream> createdStreams = blockerCheckpointStreamFactory.getAllCreatedStreams();
for (BlockingCheckpointOutputStream stream : createdStreams) {
Assert.assertTrue("Not all of the " + createdStreams.size() + " created streams have been closed.", stream.isClosed());
}
try {
testHarness.waitForTaskCompletion();
fail("Operation completed. Cancel failed.");
} catch (Exception expected) {
Throwable cause = expected.getCause();
if (!(cause instanceof CancelTaskException)) {
fail("Unexpected exception: " + expected);
}
}
}
Aggregations