use of org.apache.flink.connector.base.source.reader.synchronization.FutureCompletingBlockingQueue in project flink by apache.
the class SplitFetcherManagerTest method testExceptionPropagation.
// the final modifier is important so that '@SafeVarargs' is accepted on Java 8
@SuppressWarnings("FinalPrivateMethod")
@SafeVarargs
private final void testExceptionPropagation(final RecordsWithSplitIds<Integer>... fetchesBeforeError) throws Exception {
final IOException testingException = new IOException("test");
final FutureCompletingBlockingQueue<RecordsWithSplitIds<Integer>> queue = new FutureCompletingBlockingQueue<>(10);
final AwaitingReader<Integer, TestingSourceSplit> reader = new AwaitingReader<>(testingException, fetchesBeforeError);
final SplitFetcherManager<Integer, TestingSourceSplit> fetcher = createFetcher("testSplit", queue, reader);
reader.awaitAllRecordsReturned();
drainQueue(queue);
assertFalse(queue.getAvailabilityFuture().isDone());
reader.triggerThrowException();
// await the error propagation
queue.getAvailabilityFuture().get();
try {
fetcher.checkErrors();
fail("expected exception");
} catch (Exception e) {
assertSame(testingException, e.getCause().getCause());
} finally {
fetcher.close(20_000L);
}
}
use of org.apache.flink.connector.base.source.reader.synchronization.FutureCompletingBlockingQueue in project flink by apache.
the class SourceReaderBaseTest method testMultipleSplitsWithDifferentFinishingMoments.
@Test
void testMultipleSplitsWithDifferentFinishingMoments() throws Exception {
FutureCompletingBlockingQueue<RecordsWithSplitIds<int[]>> elementsQueue = new FutureCompletingBlockingQueue<>();
MockSplitReader mockSplitReader = MockSplitReader.newBuilder().setNumRecordsPerSplitPerFetch(2).setSeparatedFinishedRecord(false).setBlockingFetch(false).build();
MockSourceReader reader = new MockSourceReader(elementsQueue, () -> mockSplitReader, getConfig(), new TestingReaderContext());
reader.start();
List<MockSourceSplit> splits = Arrays.asList(getSplit(0, 10, Boundedness.BOUNDED), getSplit(1, 12, Boundedness.BOUNDED));
reader.addSplits(splits);
reader.notifyNoMoreSplits();
while (true) {
InputStatus status = reader.pollNext(new TestingReaderOutput<>());
if (status == InputStatus.END_OF_INPUT) {
break;
}
if (status == InputStatus.NOTHING_AVAILABLE) {
reader.isAvailable().get();
}
}
}
use of org.apache.flink.connector.base.source.reader.synchronization.FutureCompletingBlockingQueue in project flink by apache.
the class SourceReaderBaseTest method testPollNextReturnMoreAvailableWhenAllSplitFetcherCloseWithLeftoverElementInQueue.
@Test
void testPollNextReturnMoreAvailableWhenAllSplitFetcherCloseWithLeftoverElementInQueue() throws Exception {
FutureCompletingBlockingQueue<RecordsWithSplitIds<int[]>> elementsQueue = new FutureCompletingBlockingQueue<>();
MockSplitReader mockSplitReader = MockSplitReader.newBuilder().setNumRecordsPerSplitPerFetch(1).setBlockingFetch(true).build();
BlockingShutdownSplitFetcherManager<int[], MockSourceSplit> splitFetcherManager = new BlockingShutdownSplitFetcherManager<>(elementsQueue, () -> mockSplitReader);
final MockSourceReader sourceReader = new MockSourceReader(elementsQueue, splitFetcherManager, getConfig(), new TestingReaderContext());
// Create and add a split that only contains one record
final MockSourceSplit split = new MockSourceSplit(0, 0, 1);
sourceReader.addSplits(Collections.singletonList(split));
sourceReader.notifyNoMoreSplits();
// Add the last record to the split when the splitFetcherManager shutting down SplitFetchers
splitFetcherManager.getInShutdownSplitFetcherFuture().thenRun(() -> split.addRecord(1));
assertThat(sourceReader.pollNext(new TestingReaderOutput<>())).isEqualTo(InputStatus.MORE_AVAILABLE);
}
use of org.apache.flink.connector.base.source.reader.synchronization.FutureCompletingBlockingQueue in project flink by apache.
the class SourceReaderBaseTest method testExceptionInSplitReader.
@Test
void testExceptionInSplitReader() {
assertThatThrownBy(() -> {
final String errMsg = "Testing Exception";
FutureCompletingBlockingQueue<RecordsWithSplitIds<int[]>> elementsQueue = new FutureCompletingBlockingQueue<>();
// called.
try (MockSourceReader reader = new MockSourceReader(elementsQueue, () -> new SplitReader<int[], MockSourceSplit>() {
@Override
public RecordsWithSplitIds<int[]> fetch() {
throw new RuntimeException(errMsg);
}
@Override
public void handleSplitsChanges(SplitsChange<MockSourceSplit> splitsChanges) {
}
@Override
public void wakeUp() {
}
@Override
public void close() {
}
}, getConfig(), new TestingReaderContext())) {
ValidatingSourceOutput output = new ValidatingSourceOutput();
reader.addSplits(Collections.singletonList(getSplit(0, NUM_RECORDS_PER_SPLIT, Boundedness.CONTINUOUS_UNBOUNDED)));
reader.notifyNoMoreSplits();
// two polls.
while (true) {
InputStatus inputStatus = reader.pollNext(output);
assertThat(inputStatus).isNotEqualTo(InputStatus.END_OF_INPUT);
// Add a sleep to avoid tight loop.
Thread.sleep(1);
}
}
}).isInstanceOf(RuntimeException.class).hasMessage("One or more fetchers have encountered exception");
}
use of org.apache.flink.connector.base.source.reader.synchronization.FutureCompletingBlockingQueue in project flink by apache.
the class KafkaSource method createReader.
@VisibleForTesting
SourceReader<OUT, KafkaPartitionSplit> createReader(SourceReaderContext readerContext, Consumer<Collection<String>> splitFinishedHook) throws Exception {
FutureCompletingBlockingQueue<RecordsWithSplitIds<ConsumerRecord<byte[], byte[]>>> elementsQueue = new FutureCompletingBlockingQueue<>();
deserializationSchema.open(new DeserializationSchema.InitializationContext() {
@Override
public MetricGroup getMetricGroup() {
return readerContext.metricGroup().addGroup("deserializer");
}
@Override
public UserCodeClassLoader getUserCodeClassLoader() {
return readerContext.getUserCodeClassLoader();
}
});
final KafkaSourceReaderMetrics kafkaSourceReaderMetrics = new KafkaSourceReaderMetrics(readerContext.metricGroup());
Supplier<KafkaPartitionSplitReader> splitReaderSupplier = () -> new KafkaPartitionSplitReader(props, readerContext, kafkaSourceReaderMetrics);
KafkaRecordEmitter<OUT> recordEmitter = new KafkaRecordEmitter<>(deserializationSchema);
return new KafkaSourceReader<>(elementsQueue, new KafkaSourceFetcherManager(elementsQueue, splitReaderSupplier::get, splitFinishedHook), recordEmitter, toConfiguration(props), readerContext, kafkaSourceReaderMetrics);
}
Aggregations