use of org.apache.flink.connector.testutils.source.reader.TestingReaderContext in project flink by apache.
the class SourceReaderBaseTest method testExceptionInSplitReader.
@Test
void testExceptionInSplitReader() {
assertThatThrownBy(() -> {
final String errMsg = "Testing Exception";
FutureCompletingBlockingQueue<RecordsWithSplitIds<int[]>> elementsQueue = new FutureCompletingBlockingQueue<>();
// called.
try (MockSourceReader reader = new MockSourceReader(elementsQueue, () -> new SplitReader<int[], MockSourceSplit>() {
@Override
public RecordsWithSplitIds<int[]> fetch() {
throw new RuntimeException(errMsg);
}
@Override
public void handleSplitsChanges(SplitsChange<MockSourceSplit> splitsChanges) {
}
@Override
public void wakeUp() {
}
@Override
public void close() {
}
}, getConfig(), new TestingReaderContext())) {
ValidatingSourceOutput output = new ValidatingSourceOutput();
reader.addSplits(Collections.singletonList(getSplit(0, NUM_RECORDS_PER_SPLIT, Boundedness.CONTINUOUS_UNBOUNDED)));
reader.notifyNoMoreSplits();
// two polls.
while (true) {
InputStatus inputStatus = reader.pollNext(output);
assertThat(inputStatus).isNotEqualTo(InputStatus.END_OF_INPUT);
// Add a sleep to avoid tight loop.
Thread.sleep(1);
}
}
}).isInstanceOf(RuntimeException.class).hasMessage("One or more fetchers have encountered exception");
}
use of org.apache.flink.connector.testutils.source.reader.TestingReaderContext in project flink by apache.
the class KafkaPartitionSplitReaderTest method createReader.
private KafkaPartitionSplitReader createReader(Properties additionalProperties, SourceReaderMetricGroup sourceReaderMetricGroup) {
Properties props = new Properties();
props.putAll(KafkaSourceTestEnv.getConsumerProperties(ByteArrayDeserializer.class));
props.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "none");
if (!additionalProperties.isEmpty()) {
props.putAll(additionalProperties);
}
KafkaSourceReaderMetrics kafkaSourceReaderMetrics = new KafkaSourceReaderMetrics(sourceReaderMetricGroup);
return new KafkaPartitionSplitReader(props, new TestingReaderContext(new Configuration(), sourceReaderMetricGroup), kafkaSourceReaderMetrics);
}
use of org.apache.flink.connector.testutils.source.reader.TestingReaderContext in project flink by apache.
the class KafkaSourceReaderTest method testDisableOffsetCommit.
@Test
void testDisableOffsetCommit() throws Exception {
final Properties properties = new Properties();
properties.setProperty(KafkaSourceOptions.COMMIT_OFFSETS_ON_CHECKPOINT.key(), "false");
try (KafkaSourceReader<Integer> reader = (KafkaSourceReader<Integer>) createReader(Boundedness.CONTINUOUS_UNBOUNDED, new TestingReaderContext(), (ignore) -> {
}, properties)) {
reader.addSplits(getSplits(numSplits, NUM_RECORDS_PER_SPLIT, Boundedness.CONTINUOUS_UNBOUNDED));
ValidatingSourceOutput output = new ValidatingSourceOutput();
long checkpointId = 0;
do {
checkpointId++;
reader.pollNext(output);
// Create a checkpoint for each message consumption, but not complete them.
reader.snapshotState(checkpointId);
// Offsets to commit should be always empty because offset commit is disabled
assertThat(reader.getOffsetsToCommit()).isEmpty();
} while (output.count() < totalNumRecords);
}
}
use of org.apache.flink.connector.testutils.source.reader.TestingReaderContext in project flink by apache.
the class KafkaSourceReaderTest method testAssigningEmptySplits.
@Test
void testAssigningEmptySplits() throws Exception {
// Normal split with NUM_RECORDS_PER_SPLIT records
final KafkaPartitionSplit normalSplit = new KafkaPartitionSplit(new TopicPartition(TOPIC, 0), 0, KafkaPartitionSplit.LATEST_OFFSET);
// Empty split with no record
final KafkaPartitionSplit emptySplit = new KafkaPartitionSplit(new TopicPartition(TOPIC, 1), NUM_RECORDS_PER_SPLIT, NUM_RECORDS_PER_SPLIT);
// Split finished hook for listening finished splits
final Set<String> finishedSplits = new HashSet<>();
final Consumer<Collection<String>> splitFinishedHook = finishedSplits::addAll;
try (final KafkaSourceReader<Integer> reader = (KafkaSourceReader<Integer>) createReader(Boundedness.BOUNDED, "KafkaSourceReaderTestGroup", new TestingReaderContext(), splitFinishedHook)) {
reader.addSplits(Arrays.asList(normalSplit, emptySplit));
pollUntil(reader, new TestingReaderOutput<>(), () -> reader.getNumAliveFetchers() == 0, "The split fetcher did not exit before timeout.");
MatcherAssert.assertThat(finishedSplits, Matchers.containsInAnyOrder(KafkaPartitionSplit.toSplitId(normalSplit.getTopicPartition()), KafkaPartitionSplit.toSplitId(emptySplit.getTopicPartition())));
}
}
use of org.apache.flink.connector.testutils.source.reader.TestingReaderContext in project flink by apache.
the class PulsarSourceReaderTestBase method sourceReader.
private PulsarSourceReaderBase<Integer> sourceReader(boolean autoAcknowledgementEnabled, SubscriptionType subscriptionType) {
Configuration configuration = operator().config();
configuration.set(PULSAR_MAX_FETCH_RECORDS, 1);
configuration.set(PULSAR_MAX_FETCH_TIME, 1000L);
configuration.set(PULSAR_SUBSCRIPTION_NAME, randomAlphabetic(10));
configuration.set(PULSAR_SUBSCRIPTION_TYPE, subscriptionType);
if (autoAcknowledgementEnabled || configuration.get(PULSAR_SUBSCRIPTION_TYPE) == SubscriptionType.Shared) {
configuration.set(PULSAR_ENABLE_AUTO_ACKNOWLEDGE_MESSAGE, true);
}
PulsarDeserializationSchema<Integer> deserializationSchema = pulsarSchema(Schema.INT32);
SourceReaderContext context = new TestingReaderContext();
try {
deserializationSchema.open(new PulsarDeserializationSchemaInitializationContext(context), mock(SourceConfiguration.class));
} catch (Exception e) {
fail("Error while opening deserializationSchema");
}
SourceConfiguration sourceConfiguration = new SourceConfiguration(configuration);
return (PulsarSourceReaderBase<Integer>) PulsarSourceReaderFactory.create(context, deserializationSchema, sourceConfiguration);
}
Aggregations