use of org.apache.flink.core.testutils.OneShotLatch in project flink by apache.
the class Kafka010FetcherTest method testCommitDoesNotBlock.
@Test
public void testCommitDoesNotBlock() throws Exception {
// test data
final KafkaTopicPartition testPartition = new KafkaTopicPartition("test", 42);
final Map<KafkaTopicPartition, Long> testCommitData = new HashMap<>();
testCommitData.put(testPartition, 11L);
// to synchronize when the consumer is in its blocking method
final OneShotLatch sync = new OneShotLatch();
// ----- the mock consumer with blocking poll calls ----
final MultiShotLatch blockerLatch = new MultiShotLatch();
KafkaConsumer<?, ?> mockConsumer = mock(KafkaConsumer.class);
when(mockConsumer.poll(anyLong())).thenAnswer(new Answer<ConsumerRecords<?, ?>>() {
@Override
public ConsumerRecords<?, ?> answer(InvocationOnMock invocation) throws InterruptedException {
sync.trigger();
blockerLatch.await();
return ConsumerRecords.empty();
}
});
doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) {
blockerLatch.trigger();
return null;
}
}).when(mockConsumer).wakeup();
// make sure the fetcher creates the mock consumer
whenNew(KafkaConsumer.class).withAnyArguments().thenReturn(mockConsumer);
// ----- create the test fetcher -----
@SuppressWarnings("unchecked") SourceContext<String> sourceContext = mock(SourceContext.class);
Map<KafkaTopicPartition, Long> partitionsWithInitialOffsets = Collections.singletonMap(new KafkaTopicPartition("test", 42), KafkaTopicPartitionStateSentinel.GROUP_OFFSET);
KeyedDeserializationSchema<String> schema = new KeyedDeserializationSchemaWrapper<>(new SimpleStringSchema());
final Kafka010Fetcher<String> fetcher = new Kafka010Fetcher<>(sourceContext, partitionsWithInitialOffsets, null, /* periodic assigner */
null, /* punctuated assigner */
new TestProcessingTimeService(), 10, getClass().getClassLoader(), "taskname-with-subtask", new UnregisteredMetricsGroup(), schema, new Properties(), 0L, false);
// ----- run the fetcher -----
final AtomicReference<Throwable> error = new AtomicReference<>();
final Thread fetcherRunner = new Thread("fetcher runner") {
@Override
public void run() {
try {
fetcher.runFetchLoop();
} catch (Throwable t) {
error.set(t);
}
}
};
fetcherRunner.start();
// wait until the fetcher has reached the method of interest
sync.await();
// ----- trigger the offset commit -----
final AtomicReference<Throwable> commitError = new AtomicReference<>();
final Thread committer = new Thread("committer runner") {
@Override
public void run() {
try {
fetcher.commitInternalOffsetsToKafka(testCommitData);
} catch (Throwable t) {
commitError.set(t);
}
}
};
committer.start();
// ----- ensure that the committer finishes in time -----
committer.join(30000);
assertFalse("The committer did not finish in time", committer.isAlive());
// ----- test done, wait till the fetcher is done for a clean shutdown -----
fetcher.cancel();
fetcherRunner.join();
// check that there were no errors in the fetcher
final Throwable fetcherError = error.get();
if (fetcherError != null && !(fetcherError instanceof Handover.ClosedException)) {
throw new Exception("Exception in the fetcher", fetcherError);
}
final Throwable committerError = commitError.get();
if (committerError != null) {
throw new Exception("Exception in the committer", committerError);
}
}
use of org.apache.flink.core.testutils.OneShotLatch in project flink by apache.
the class Kafka09FetcherTest method testCommitDoesNotBlock.
@Test
public void testCommitDoesNotBlock() throws Exception {
// test data
final KafkaTopicPartition testPartition = new KafkaTopicPartition("test", 42);
final Map<KafkaTopicPartition, Long> testCommitData = new HashMap<>();
testCommitData.put(testPartition, 11L);
// to synchronize when the consumer is in its blocking method
final OneShotLatch sync = new OneShotLatch();
// ----- the mock consumer with blocking poll calls ----
final MultiShotLatch blockerLatch = new MultiShotLatch();
KafkaConsumer<?, ?> mockConsumer = mock(KafkaConsumer.class);
when(mockConsumer.poll(anyLong())).thenAnswer(new Answer<ConsumerRecords<?, ?>>() {
@Override
public ConsumerRecords<?, ?> answer(InvocationOnMock invocation) throws InterruptedException {
sync.trigger();
blockerLatch.await();
return ConsumerRecords.empty();
}
});
doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) {
blockerLatch.trigger();
return null;
}
}).when(mockConsumer).wakeup();
// make sure the fetcher creates the mock consumer
whenNew(KafkaConsumer.class).withAnyArguments().thenReturn(mockConsumer);
// ----- create the test fetcher -----
@SuppressWarnings("unchecked") SourceContext<String> sourceContext = mock(SourceContext.class);
Map<KafkaTopicPartition, Long> partitionsWithInitialOffsets = Collections.singletonMap(new KafkaTopicPartition("test", 42), KafkaTopicPartitionStateSentinel.GROUP_OFFSET);
KeyedDeserializationSchema<String> schema = new KeyedDeserializationSchemaWrapper<>(new SimpleStringSchema());
final Kafka09Fetcher<String> fetcher = new Kafka09Fetcher<>(sourceContext, partitionsWithInitialOffsets, null, /* periodic watermark extractor */
null, /* punctuated watermark extractor */
new TestProcessingTimeService(), 10, /* watermark interval */
this.getClass().getClassLoader(), "task_name", new UnregisteredMetricsGroup(), schema, new Properties(), 0L, false);
// ----- run the fetcher -----
final AtomicReference<Throwable> error = new AtomicReference<>();
final Thread fetcherRunner = new Thread("fetcher runner") {
@Override
public void run() {
try {
fetcher.runFetchLoop();
} catch (Throwable t) {
error.set(t);
}
}
};
fetcherRunner.start();
// wait until the fetcher has reached the method of interest
sync.await();
// ----- trigger the offset commit -----
final AtomicReference<Throwable> commitError = new AtomicReference<>();
final Thread committer = new Thread("committer runner") {
@Override
public void run() {
try {
fetcher.commitInternalOffsetsToKafka(testCommitData);
} catch (Throwable t) {
commitError.set(t);
}
}
};
committer.start();
// ----- ensure that the committer finishes in time -----
committer.join(30000);
assertFalse("The committer did not finish in time", committer.isAlive());
// ----- test done, wait till the fetcher is done for a clean shutdown -----
fetcher.cancel();
fetcherRunner.join();
// check that there were no errors in the fetcher
final Throwable fetcherError = error.get();
if (fetcherError != null && !(fetcherError instanceof Handover.ClosedException)) {
throw new Exception("Exception in the fetcher", fetcherError);
}
final Throwable committerError = commitError.get();
if (committerError != null) {
throw new Exception("Exception in the committer", committerError);
}
}
use of org.apache.flink.core.testutils.OneShotLatch in project flink by apache.
the class RocksDBAsyncSnapshotTest method testFullyAsyncSnapshot.
/**
* This ensures that asynchronous state handles are actually materialized asynchronously.
*
* <p>We use latches to block at various stages and see if the code still continues through
* the parts that are not asynchronous. If the checkpoint is not done asynchronously the
* test will simply lock forever.
*/
@Test
public void testFullyAsyncSnapshot() throws Exception {
LocalFileSystem localFS = new LocalFileSystem();
localFS.initialize(new URI("file:///"), new Configuration());
PowerMockito.stub(PowerMockito.method(FileSystem.class, "get", URI.class, Configuration.class)).toReturn(localFS);
final OneInputStreamTask<String, String> task = new OneInputStreamTask<>();
final OneInputStreamTaskTestHarness<String, String> testHarness = new OneInputStreamTaskTestHarness<>(task, BasicTypeInfo.STRING_TYPE_INFO, BasicTypeInfo.STRING_TYPE_INFO);
testHarness.setupOutputForSingletonOperatorChain();
testHarness.configureForKeyedStream(new KeySelector<String, String>() {
@Override
public String getKey(String value) throws Exception {
return value;
}
}, BasicTypeInfo.STRING_TYPE_INFO);
StreamConfig streamConfig = testHarness.getStreamConfig();
File dbDir = new File(new File(ConfigConstants.DEFAULT_TASK_MANAGER_TMP_PATH, UUID.randomUUID().toString()), "state");
RocksDBStateBackend backend = new RocksDBStateBackend(new MemoryStateBackend());
backend.setDbStoragePath(dbDir.getAbsolutePath());
streamConfig.setStateBackend(backend);
streamConfig.setStreamOperator(new AsyncCheckpointOperator());
final OneShotLatch delayCheckpointLatch = new OneShotLatch();
final OneShotLatch ensureCheckpointLatch = new OneShotLatch();
StreamMockEnvironment mockEnv = new StreamMockEnvironment(testHarness.jobConfig, testHarness.taskConfig, testHarness.memorySize, new MockInputSplitProvider(), testHarness.bufferSize) {
@Override
public void acknowledgeCheckpoint(long checkpointId, CheckpointMetrics checkpointMetrics, SubtaskState checkpointStateHandles) {
super.acknowledgeCheckpoint(checkpointId, checkpointMetrics);
// even though the async checkpoint would not finish
try {
delayCheckpointLatch.await();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
// should be one k/v state
assertNotNull(checkpointStateHandles.getManagedKeyedState());
// we now know that the checkpoint went through
ensureCheckpointLatch.trigger();
}
};
testHarness.invoke(mockEnv);
// wait for the task to be running
for (Field field : StreamTask.class.getDeclaredFields()) {
if (field.getName().equals("isRunning")) {
field.setAccessible(true);
while (!field.getBoolean(task)) {
Thread.sleep(10);
}
}
}
task.triggerCheckpoint(new CheckpointMetaData(42, 17), CheckpointOptions.forFullCheckpoint());
testHarness.processElement(new StreamRecord<>("Wohoo", 0));
// now we allow the checkpoint
delayCheckpointLatch.trigger();
// wait for the checkpoint to go through
ensureCheckpointLatch.await();
testHarness.endInput();
ExecutorService threadPool = task.getAsyncOperationsThreadPool();
threadPool.shutdown();
Assert.assertTrue(threadPool.awaitTermination(60_000, TimeUnit.MILLISECONDS));
testHarness.waitForTaskCompletion();
if (mockEnv.wasFailedExternally()) {
fail("Unexpected exception during execution.");
}
}
use of org.apache.flink.core.testutils.OneShotLatch in project flink by apache.
the class RocksDBStateBackendTest method setupRocksKeyedStateBackend.
public void setupRocksKeyedStateBackend() throws Exception {
blocker = new OneShotLatch();
waiter = new OneShotLatch();
testStreamFactory = new BlockerCheckpointStreamFactory(1024 * 1024);
testStreamFactory.setBlockerLatch(blocker);
testStreamFactory.setWaiterLatch(waiter);
testStreamFactory.setAfterNumberInvocations(100);
RocksDBStateBackend backend = getStateBackend();
Environment env = new DummyEnvironment("TestTask", 1, 0);
keyedStateBackend = (RocksDBKeyedStateBackend<Integer>) backend.createKeyedStateBackend(env, new JobID(), "Test", IntSerializer.INSTANCE, 2, new KeyGroupRange(0, 1), mock(TaskKvStateRegistry.class));
testState1 = keyedStateBackend.getPartitionedState(VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, new ValueStateDescriptor<>("TestState-1", Integer.class, 0));
testState2 = keyedStateBackend.getPartitionedState(VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, new ValueStateDescriptor<>("TestState-2", String.class, ""));
allCreatedCloseables = new ArrayList<>();
keyedStateBackend.db = spy(keyedStateBackend.db);
doAnswer(new Answer<Object>() {
@Override
public Object answer(InvocationOnMock invocationOnMock) throws Throwable {
RocksIterator rocksIterator = spy((RocksIterator) invocationOnMock.callRealMethod());
allCreatedCloseables.add(rocksIterator);
return rocksIterator;
}
}).when(keyedStateBackend.db).newIterator(any(ColumnFamilyHandle.class), any(ReadOptions.class));
doAnswer(new Answer<Object>() {
@Override
public Object answer(InvocationOnMock invocationOnMock) throws Throwable {
Snapshot snapshot = spy((Snapshot) invocationOnMock.callRealMethod());
allCreatedCloseables.add(snapshot);
return snapshot;
}
}).when(keyedStateBackend.db).getSnapshot();
doAnswer(new Answer<Object>() {
@Override
public Object answer(InvocationOnMock invocationOnMock) throws Throwable {
ColumnFamilyHandle snapshot = spy((ColumnFamilyHandle) invocationOnMock.callRealMethod());
allCreatedCloseables.add(snapshot);
return snapshot;
}
}).when(keyedStateBackend.db).createColumnFamily(any(ColumnFamilyDescriptor.class));
for (int i = 0; i < 100; ++i) {
keyedStateBackend.setCurrentKey(i);
testState1.update(4200 + i);
testState2.update("S-" + (4200 + i));
}
}
use of org.apache.flink.core.testutils.OneShotLatch in project flink by apache.
the class ContinuousFileProcessingMigrationTest method testReaderSnapshotRestore.
// END OF PREPARATIONS
// TESTS
@Test
public void testReaderSnapshotRestore() throws Exception {
/*
FileInputSplit split1 =
new FileInputSplit(3, new Path("test/test1"), 0, 100, null);
FileInputSplit split2 =
new FileInputSplit(2, new Path("test/test2"), 101, 200, null);
FileInputSplit split3 =
new FileInputSplit(1, new Path("test/test2"), 0, 100, null);
FileInputSplit split4 =
new FileInputSplit(0, new Path("test/test3"), 0, 100, null);
final OneShotLatch latch = new OneShotLatch();
BlockingFileInputFormat format = new BlockingFileInputFormat(latch, new Path(hdfsURI));
TypeInformation<FileInputSplit> typeInfo = TypeExtractor.getInputFormatTypes(format);
ContinuousFileReaderOperator<FileInputSplit, ?> initReader = new ContinuousFileReaderOperator<>(format);
initReader.setOutputType(typeInfo, new ExecutionConfig());
OneInputStreamOperatorTestHarness<FileInputSplit, FileInputSplit> initTestInstance =
new OneInputStreamOperatorTestHarness<>(initReader);
initTestInstance.setTimeCharacteristic(TimeCharacteristic.EventTime);
initTestInstance.open();
// create some state in the reader
initTestInstance.processElement(new StreamRecord<>(split1));
initTestInstance.processElement(new StreamRecord<>(split2));
initTestInstance.processElement(new StreamRecord<>(split3));
initTestInstance.processElement(new StreamRecord<>(split4));
// take a snapshot of the operator's state. This will be used
// to initialize another reader and compare the results of the
// two operators.
final StreamTaskState snapshot;
synchronized (initTestInstance.getCheckpointLock()) {
snapshot = initTestInstance.snapshot(0L, 0L);
}
initTestInstance.snaphotToFile(snapshot, "src/test/resources/reader-migration-test-flink1.1-snapshot");
*/
TimestampedFileInputSplit split1 = new TimestampedFileInputSplit(0, 3, new Path("test/test1"), 0, 100, null);
TimestampedFileInputSplit split2 = new TimestampedFileInputSplit(10, 2, new Path("test/test2"), 101, 200, null);
TimestampedFileInputSplit split3 = new TimestampedFileInputSplit(10, 1, new Path("test/test2"), 0, 100, null);
TimestampedFileInputSplit split4 = new TimestampedFileInputSplit(11, 0, new Path("test/test3"), 0, 100, null);
final OneShotLatch latch = new OneShotLatch();
BlockingFileInputFormat format = new BlockingFileInputFormat(latch, new Path(hdfsURI));
TypeInformation<FileInputSplit> typeInfo = TypeExtractor.getInputFormatTypes(format);
ContinuousFileReaderOperator<FileInputSplit> initReader = new ContinuousFileReaderOperator<>(format);
initReader.setOutputType(typeInfo, new ExecutionConfig());
OneInputStreamOperatorTestHarness<TimestampedFileInputSplit, FileInputSplit> initTestInstance = new OneInputStreamOperatorTestHarness<>(initReader);
initTestInstance.setTimeCharacteristic(TimeCharacteristic.EventTime);
initTestInstance.setup();
initTestInstance.initializeStateFromLegacyCheckpoint(getResourceFilename("reader-migration-test-flink1.1-snapshot"));
initTestInstance.open();
latch.trigger();
synchronized (initTestInstance.getCheckpointLock()) {
initTestInstance.close();
}
FileInputSplit fsSplit1 = createSplitFromTimestampedSplit(split1);
FileInputSplit fsSplit2 = createSplitFromTimestampedSplit(split2);
FileInputSplit fsSplit3 = createSplitFromTimestampedSplit(split3);
FileInputSplit fsSplit4 = createSplitFromTimestampedSplit(split4);
// compare if the results contain what they should contain and also if
// they are the same, as they should.
Assert.assertTrue(initTestInstance.getOutput().contains(new StreamRecord<>(fsSplit1)));
Assert.assertTrue(initTestInstance.getOutput().contains(new StreamRecord<>(fsSplit2)));
Assert.assertTrue(initTestInstance.getOutput().contains(new StreamRecord<>(fsSplit3)));
Assert.assertTrue(initTestInstance.getOutput().contains(new StreamRecord<>(fsSplit4)));
}
Aggregations