use of org.apache.flink.core.testutils.CheckedThread in project flink by apache.
the class CheckpointStateOutputStreamTest method testCloseDoesNotLock.
/**
* This test validates that a close operation can happen even while a 'closeAndGetHandle()' call
* is in progress.
*
* <p>That behavior is essential for fast cancellation (concurrent cleanup).
*/
@Test
public void testCloseDoesNotLock() throws Exception {
final Path folder = new Path(tmp.newFolder().toURI());
final String fileName = "this-is-ignored-anyways.file";
final FileSystem fileSystem = spy(new FsWithoutRecoverableWriter((path) -> new BlockerStream()));
final FSDataOutputStream checkpointStream = createTestStream(fileSystem, folder, fileName);
final OneShotLatch sync = new OneShotLatch();
final CheckedThread thread = new CheckedThread() {
@Override
public void go() throws Exception {
sync.trigger();
// that call should now block, because it accesses the position
closeAndGetResult(checkpointStream);
}
};
thread.start();
sync.await();
checkpointStream.close();
// it is not important for this test, important is that the thread does not freeze/lock up
try {
thread.sync();
} catch (IOException ignored) {
}
}
use of org.apache.flink.core.testutils.CheckedThread in project flink by apache.
the class StateBackendTestBase method testValueStateRace.
/**
* Tests {@link ValueState#value()} and {@link InternalKvState#getSerializedValue(byte[],
* TypeSerializer, TypeSerializer, TypeSerializer)} accessing the state concurrently. They
* should not get in the way of each other.
*/
@Test
@SuppressWarnings("unchecked")
public void testValueStateRace() throws Exception {
final Integer namespace = 1;
final ValueStateDescriptor<String> kvId = new ValueStateDescriptor<>("id", String.class);
final TypeSerializer<Integer> keySerializer = IntSerializer.INSTANCE;
final TypeSerializer<Integer> namespaceSerializer = IntSerializer.INSTANCE;
final CheckpointableKeyedStateBackend<Integer> backend = createKeyedBackend(IntSerializer.INSTANCE);
try {
final ValueState<String> state = backend.getPartitionedState(namespace, IntSerializer.INSTANCE, kvId);
// this is only available after the backend initialized the serializer
final TypeSerializer<String> valueSerializer = kvId.getSerializer();
@SuppressWarnings("unchecked") final InternalKvState<Integer, Integer, String> kvState = (InternalKvState<Integer, Integer, String>) state;
/**
* 1) Test that ValueState#value() before and after KvState#getSerializedValue(byte[])
* return the same value.
*/
// set some key and namespace
final int key1 = 1;
backend.setCurrentKey(key1);
kvState.setCurrentNamespace(2);
state.update("2");
assertEquals("2", state.value());
// query another key and namespace
assertNull(getSerializedValue(kvState, 3, keySerializer, namespace, IntSerializer.INSTANCE, valueSerializer));
// the state should not have changed!
assertEquals("2", state.value());
// re-set values
kvState.setCurrentNamespace(namespace);
/**
* 2) Test two threads concurrently using ValueState#value() and
* KvState#getSerializedValue(byte[]).
*/
// some modifications to the state
final int key2 = 10;
backend.setCurrentKey(key2);
assertNull(state.value());
assertNull(getSerializedValue(kvState, key2, keySerializer, namespace, namespaceSerializer, valueSerializer));
state.update("1");
final CheckedThread getter = new CheckedThread("State getter") {
@Override
public void go() throws Exception {
while (!isInterrupted()) {
assertEquals("1", state.value());
}
}
};
final CheckedThread serializedGetter = new CheckedThread("Serialized state getter") {
@Override
public void go() throws Exception {
while (!isInterrupted() && getter.isAlive()) {
final String serializedValue = getSerializedValue(kvState, key2, keySerializer, namespace, namespaceSerializer, valueSerializer);
assertEquals("1", serializedValue);
}
}
};
getter.start();
serializedGetter.start();
// run both threads for max 100ms
Timer t = new Timer("stopper");
t.schedule(new TimerTask() {
@Override
public void run() {
getter.interrupt();
serializedGetter.interrupt();
this.cancel();
}
}, 100);
// wait for both threads to finish
// serializedGetter will finish if its assertion fails or if
// getter is not alive any more
serializedGetter.sync();
// if serializedGetter crashed, getter will not know -> interrupt just in case
getter.interrupt();
getter.sync();
// if not executed yet
t.cancel();
} finally {
// clean up
IOUtils.closeQuietly(backend);
backend.dispose();
}
}
use of org.apache.flink.core.testutils.CheckedThread in project flink by apache.
the class NetworkBufferPoolTest method testRequestMemorySegmentsInterruptable.
/**
* Tests {@link NetworkBufferPool#requestUnpooledMemorySegments(int)}, verifying it may be
* aborted in case of a concurrent {@link NetworkBufferPool#destroy()} call.
*/
@Test
public void testRequestMemorySegmentsInterruptable() throws Exception {
final int numBuffers = 10;
NetworkBufferPool globalPool = new NetworkBufferPool(numBuffers, 128);
MemorySegment segment = globalPool.requestPooledMemorySegment();
assertNotNull(segment);
final OneShotLatch isRunning = new OneShotLatch();
CheckedThread asyncRequest = new CheckedThread() {
@Override
public void go() throws IOException {
isRunning.trigger();
globalPool.requestUnpooledMemorySegments(10);
}
};
asyncRequest.start();
// We want the destroy call inside the blocking part of the
// globalPool.requestMemorySegments()
// call above. We cannot guarantee this though but make it highly probable:
isRunning.await();
Thread.sleep(10);
globalPool.destroy();
segment.free();
try {
Exception ex = assertThrows(IllegalStateException.class, asyncRequest::sync);
assertTrue(ex.getMessage().contains("destroyed"));
} finally {
globalPool.destroy();
}
}
use of org.apache.flink.core.testutils.CheckedThread in project flink by apache.
the class CheckpointedInputGateTest method testPriorityBeforeClose.
/**
* Tests a priority notification happening right before cancellation. The mail would be
* processed while draining mailbox but can't pull any data anymore.
*/
@Test
public void testPriorityBeforeClose() throws IOException, InterruptedException {
NetworkBufferPool bufferPool = new NetworkBufferPool(10, 1024);
try (Closer closer = Closer.create()) {
closer.register(bufferPool::destroy);
for (int repeat = 0; repeat < 100; repeat++) {
setUp();
SingleInputGate singleInputGate = new SingleInputGateBuilder().setNumberOfChannels(2).setBufferPoolFactory(bufferPool.createBufferPool(2, Integer.MAX_VALUE)).setSegmentProvider(bufferPool).setChannelFactory(InputChannelBuilder::buildRemoteChannel).build();
singleInputGate.setup();
((RemoteInputChannel) singleInputGate.getChannel(0)).requestSubpartition();
final TaskMailboxImpl mailbox = new TaskMailboxImpl();
MailboxExecutorImpl mailboxExecutor = new MailboxExecutorImpl(mailbox, 0, StreamTaskActionExecutor.IMMEDIATE);
ValidatingCheckpointHandler validatingHandler = new ValidatingCheckpointHandler(1);
SingleCheckpointBarrierHandler barrierHandler = TestBarrierHandlerFactory.forTarget(validatingHandler).create(singleInputGate, new MockChannelStateWriter());
CheckpointedInputGate checkpointedInputGate = new CheckpointedInputGate(singleInputGate, barrierHandler, mailboxExecutor, UpstreamRecoveryTracker.forInputGate(singleInputGate));
final int oldSize = mailbox.size();
enqueue(checkpointedInputGate, 0, barrier(1));
// wait for priority mail to be enqueued
Deadline deadline = Deadline.fromNow(Duration.ofMinutes(1));
while (deadline.hasTimeLeft() && oldSize >= mailbox.size()) {
Thread.sleep(1);
}
// test the race condition
// either priority event could be handled, then we expect a checkpoint to be
// triggered or closing came first in which case we expect a CancelTaskException
CountDownLatch beforeLatch = new CountDownLatch(2);
final CheckedThread canceler = new CheckedThread("Canceler") {
@Override
public void go() throws IOException {
beforeLatch.countDown();
singleInputGate.close();
}
};
canceler.start();
beforeLatch.countDown();
try {
while (mailboxExecutor.tryYield()) {
}
assertEquals(1L, validatingHandler.triggeredCheckpointCounter);
} catch (CancelTaskException e) {
}
canceler.join();
}
}
}
use of org.apache.flink.core.testutils.CheckedThread in project flink by apache.
the class StateDescriptorTest method testSerializerLazyInitializeInParallel.
@Test
public void testSerializerLazyInitializeInParallel() throws Exception {
final String name = "testSerializerLazyInitializeInParallel";
// use PojoTypeInfo which will create a new serializer when createSerializer is invoked.
final TestStateDescriptor<String> desc = new TestStateDescriptor<>(name, new PojoTypeInfo<>(String.class, new ArrayList<>()));
final int threadNumber = 20;
final ArrayList<CheckedThread> threads = new ArrayList<>(threadNumber);
final ExecutionConfig executionConfig = new ExecutionConfig();
final ConcurrentHashMap<Integer, TypeSerializer<String>> serializers = new ConcurrentHashMap<>();
for (int i = 0; i < threadNumber; i++) {
threads.add(new CheckedThread() {
@Override
public void go() {
desc.initializeSerializerUnlessSet(executionConfig);
TypeSerializer<String> serializer = desc.getOriginalSerializer();
serializers.put(System.identityHashCode(serializer), serializer);
}
});
}
threads.forEach(Thread::start);
for (CheckedThread t : threads) {
t.sync();
}
assertEquals("Should use only one serializer but actually: " + serializers, 1, serializers.size());
threads.clear();
}
Aggregations