use of org.apache.flink.runtime.util.BlockingCheckpointOutputStream in project flink by apache.
the class OperatorStateBackendTest method testSnapshotAsyncCancel.
@Test
public void testSnapshotAsyncCancel() throws Exception {
DefaultOperatorStateBackend operatorStateBackend = new DefaultOperatorStateBackendBuilder(OperatorStateBackendTest.class.getClassLoader(), new ExecutionConfig(), true, emptyStateHandles, new CloseableRegistry()).build();
ListStateDescriptor<MutableType> stateDescriptor1 = new ListStateDescriptor<>("test1", new JavaSerializer<MutableType>());
ListState<MutableType> listState1 = operatorStateBackend.getListState(stateDescriptor1);
listState1.add(MutableType.of(42));
listState1.add(MutableType.of(4711));
BlockerCheckpointStreamFactory streamFactory = new BlockerCheckpointStreamFactory(1024 * 1024);
OneShotLatch waiterLatch = new OneShotLatch();
OneShotLatch blockerLatch = new OneShotLatch();
streamFactory.setWaiterLatch(waiterLatch);
streamFactory.setBlockerLatch(blockerLatch);
RunnableFuture<SnapshotResult<OperatorStateHandle>> runnableFuture = operatorStateBackend.snapshot(1, 1, streamFactory, CheckpointOptions.forCheckpointWithDefaultLocation());
ExecutorService executorService = Executors.newFixedThreadPool(1);
executorService.submit(runnableFuture);
// wait until the async checkpoint is in the stream's write code, then continue
waiterLatch.await();
// cancel the future, which should close the underlying stream
runnableFuture.cancel(true);
for (BlockingCheckpointOutputStream stream : streamFactory.getAllCreatedStreams()) {
Assert.assertTrue(stream.isClosed());
}
// we allow the stream under test to proceed
blockerLatch.trigger();
try {
runnableFuture.get(60, TimeUnit.SECONDS);
Assert.fail();
} catch (CancellationException ignore) {
}
}
use of org.apache.flink.runtime.util.BlockingCheckpointOutputStream in project flink by apache.
the class RocksDBAsyncSnapshotTest method testCancelFullyAsyncCheckpoints.
/**
* This tests ensures that canceling of asynchronous snapshots works as expected and does not
* block.
*/
@Test
public void testCancelFullyAsyncCheckpoints() throws Exception {
final OneInputStreamTaskTestHarness<String, String> testHarness = new OneInputStreamTaskTestHarness<>(OneInputStreamTask::new, BasicTypeInfo.STRING_TYPE_INFO, BasicTypeInfo.STRING_TYPE_INFO);
testHarness.setupOutputForSingletonOperatorChain();
testHarness.configureForKeyedStream(value -> value, BasicTypeInfo.STRING_TYPE_INFO);
StreamConfig streamConfig = testHarness.getStreamConfig();
File dbDir = temporaryFolder.newFolder();
final EmbeddedRocksDBStateBackend.PriorityQueueStateType timerServicePriorityQueueType = RocksDBOptions.TIMER_SERVICE_FACTORY.defaultValue();
final int skipStreams;
if (timerServicePriorityQueueType == EmbeddedRocksDBStateBackend.PriorityQueueStateType.HEAP) {
// we skip the first created stream, because it is used to checkpoint the timer service,
// which is
// currently not asynchronous.
skipStreams = 1;
} else if (timerServicePriorityQueueType == EmbeddedRocksDBStateBackend.PriorityQueueStateType.ROCKSDB) {
skipStreams = 0;
} else {
throw new AssertionError(String.format("Unknown timer service priority queue type %s.", timerServicePriorityQueueType));
}
// this is the proper instance that we need to call.
BlockerCheckpointStreamFactory blockerCheckpointStreamFactory = new BlockerCheckpointStreamFactory(4 * 1024 * 1024) {
int count = skipStreams;
@Override
public CheckpointStateOutputStream createCheckpointStateOutputStream(CheckpointedStateScope scope) throws IOException {
if (count > 0) {
--count;
return new BlockingCheckpointOutputStream(new MemCheckpointStreamFactory.MemoryCheckpointOutputStream(maxSize), null, null, Integer.MAX_VALUE);
} else {
return super.createCheckpointStateOutputStream(scope);
}
}
};
// to avoid serialization of the above factory instance, we need to pass it in
// through a static variable
StateBackend stateBackend = new BackendForTestStream(new StaticForwardFactory(blockerCheckpointStreamFactory));
RocksDBStateBackend backend = new RocksDBStateBackend(stateBackend);
backend.setDbStoragePath(dbDir.getAbsolutePath());
streamConfig.setStateBackend(backend);
streamConfig.setStreamOperator(new AsyncCheckpointOperator());
streamConfig.setOperatorID(new OperatorID());
TestTaskStateManager taskStateManagerTestMock = new TestTaskStateManager();
StreamMockEnvironment mockEnv = new StreamMockEnvironment(testHarness.jobConfig, testHarness.taskConfig, testHarness.memorySize, new MockInputSplitProvider(), testHarness.bufferSize, taskStateManagerTestMock);
blockerCheckpointStreamFactory.setBlockerLatch(new OneShotLatch());
blockerCheckpointStreamFactory.setWaiterLatch(new OneShotLatch());
testHarness.invoke(mockEnv);
testHarness.waitForTaskRunning();
final OneInputStreamTask<String, String> task = testHarness.getTask();
task.triggerCheckpointAsync(new CheckpointMetaData(42, 17), CheckpointOptions.forCheckpointWithDefaultLocation()).get();
testHarness.processElement(new StreamRecord<>("Wohoo", 0));
blockerCheckpointStreamFactory.getWaiterLatch().await();
task.cancel();
blockerCheckpointStreamFactory.getBlockerLatch().trigger();
testHarness.endInput();
ExecutorService threadPool = task.getAsyncOperationsThreadPool();
threadPool.shutdown();
Assert.assertTrue(threadPool.awaitTermination(60_000, TimeUnit.MILLISECONDS));
Set<BlockingCheckpointOutputStream> createdStreams = blockerCheckpointStreamFactory.getAllCreatedStreams();
for (BlockingCheckpointOutputStream stream : createdStreams) {
Assert.assertTrue("Not all of the " + createdStreams.size() + " created streams have been closed.", stream.isClosed());
}
try {
testHarness.waitForTaskCompletion();
fail("Operation completed. Cancel failed.");
} catch (Exception expected) {
Throwable cause = expected.getCause();
if (!(cause instanceof CancelTaskException)) {
fail("Unexpected exception: " + expected);
}
}
}
use of org.apache.flink.runtime.util.BlockingCheckpointOutputStream in project flink by apache.
the class EmbeddedRocksDBStateBackendTest method testCancelRunningSnapshot.
@Test
public void testCancelRunningSnapshot() throws Exception {
setupRocksKeyedStateBackend();
try {
RunnableFuture<SnapshotResult<KeyedStateHandle>> snapshot = keyedStateBackend.snapshot(0L, 0L, testStreamFactory, CheckpointOptions.forCheckpointWithDefaultLocation());
Thread asyncSnapshotThread = new Thread(snapshot);
asyncSnapshotThread.start();
// wait for snapshot to run
waiter.await();
waiter.reset();
runStateUpdates();
snapshot.cancel(true);
// allow checkpointing to start writing
blocker.trigger();
for (BlockingCheckpointOutputStream stream : testStreamFactory.getAllCreatedStreams()) {
assertTrue(stream.isClosed());
}
// wait for snapshot stream writing to run
waiter.await();
try {
snapshot.get();
fail();
} catch (Exception ignored) {
}
asyncSnapshotThread.join();
verifyRocksObjectsReleased();
} finally {
this.keyedStateBackend.dispose();
this.keyedStateBackend = null;
}
verifyRocksDBStateUploaderClosed();
}
use of org.apache.flink.runtime.util.BlockingCheckpointOutputStream in project flink by apache.
the class EmbeddedRocksDBStateBackendTest method testCompletingSnapshot.
@Test
public void testCompletingSnapshot() throws Exception {
setupRocksKeyedStateBackend();
try {
RunnableFuture<SnapshotResult<KeyedStateHandle>> snapshot = keyedStateBackend.snapshot(0L, 0L, testStreamFactory, CheckpointOptions.forCheckpointWithDefaultLocation());
Thread asyncSnapshotThread = new Thread(snapshot);
asyncSnapshotThread.start();
// wait for snapshot to run
waiter.await();
waiter.reset();
runStateUpdates();
// allow checkpointing to start writing
blocker.trigger();
// wait for snapshot stream writing to run
waiter.await();
SnapshotResult<KeyedStateHandle> snapshotResult = snapshot.get();
KeyedStateHandle keyedStateHandle = snapshotResult.getJobManagerOwnedSnapshot();
assertNotNull(keyedStateHandle);
assertTrue(keyedStateHandle.getStateSize() > 0);
assertEquals(2, keyedStateHandle.getKeyGroupRange().getNumberOfKeyGroups());
for (BlockingCheckpointOutputStream stream : testStreamFactory.getAllCreatedStreams()) {
assertTrue(stream.isClosed());
}
asyncSnapshotThread.join();
verifyRocksObjectsReleased();
} finally {
this.keyedStateBackend.dispose();
this.keyedStateBackend = null;
}
verifyRocksDBStateUploaderClosed();
}
Aggregations