use of org.apache.flink.connector.base.source.reader.splitreader.SplitsAddition in project flink by apache.
the class PulsarPartitionSplitReaderTestBase method seekStartPositionAndHandleSplit.
private void seekStartPositionAndHandleSplit(PulsarPartitionSplitReaderBase<String> reader, String topicName, int partitionId, MessageId startPosition) {
TopicPartition partition = new TopicPartition(topicName, partitionId, createFullRange());
PulsarPartitionSplit split = new PulsarPartitionSplit(partition, StopCursor.never(), null, null);
SplitsAddition<PulsarPartitionSplit> addition = new SplitsAddition<>(singletonList(split));
// create consumer and seek before split changes
try (Consumer<byte[]> consumer = reader.createPulsarConsumer(partition)) {
// inclusive messageId
StartCursor startCursor = StartCursor.fromMessageId(startPosition);
startCursor.seekPosition(partition.getTopic(), partition.getPartitionId(), consumer);
} catch (PulsarClientException e) {
sneakyThrow(e);
}
reader.handleSplitsChanges(addition);
}
use of org.apache.flink.connector.base.source.reader.splitreader.SplitsAddition in project flink by apache.
the class PulsarPartitionSplitReaderTestBase method handleSplit.
protected void handleSplit(PulsarPartitionSplitReaderBase<String> reader, String topicName, int partitionId, MessageId startPosition) {
TopicPartition partition = new TopicPartition(topicName, partitionId, createFullRange());
PulsarPartitionSplit split = new PulsarPartitionSplit(partition, StopCursor.never(), startPosition, null);
SplitsAddition<PulsarPartitionSplit> addition = new SplitsAddition<>(singletonList(split));
reader.handleSplitsChanges(addition);
}
use of org.apache.flink.connector.base.source.reader.splitreader.SplitsAddition in project flink by apache.
the class KafkaPartitionSplitReaderTest method testUsingCommittedOffsetsWithNoneOffsetResetStrategy.
@Test
public void testUsingCommittedOffsetsWithNoneOffsetResetStrategy() {
final Properties props = new Properties();
props.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "using-committed-offset-with-none-offset-reset");
KafkaPartitionSplitReader reader = createReader(props, UnregisteredMetricsGroup.createSourceReaderMetricGroup());
// We expect that there is a committed offset, but the group does not actually have a
// committed offset, and the offset reset strategy is none (Throw exception to the consumer
// if no previous offset is found for the consumer's group);
// So it is expected to throw an exception that missing the committed offset.
final KafkaException undefinedOffsetException = Assertions.assertThrows(KafkaException.class, () -> reader.handleSplitsChanges(new SplitsAddition<>(Collections.singletonList(new KafkaPartitionSplit(new TopicPartition(TOPIC1, 0), KafkaPartitionSplit.COMMITTED_OFFSET)))));
MatcherAssert.assertThat(undefinedOffsetException.getMessage(), CoreMatchers.containsString("Undefined offset with no reset policy for partition"));
}
use of org.apache.flink.connector.base.source.reader.splitreader.SplitsAddition in project flink by apache.
the class PulsarPartitionSplitReaderBase method handleSplitsChanges.
@Override
public void handleSplitsChanges(SplitsChange<PulsarPartitionSplit> splitsChanges) {
LOG.debug("Handle split changes {}", splitsChanges);
// Get all the partition assignments and stopping offsets.
if (!(splitsChanges instanceof SplitsAddition)) {
throw new UnsupportedOperationException(String.format("The SplitChange type of %s is not supported.", splitsChanges.getClass()));
}
if (registeredSplit != null) {
throw new IllegalStateException("This split reader have assigned split.");
}
List<PulsarPartitionSplit> newSplits = splitsChanges.splits();
Preconditions.checkArgument(newSplits.size() == 1, "This pulsar split reader only support one split.");
PulsarPartitionSplit newSplit = newSplits.get(0);
// Create pulsar consumer.
Consumer<byte[]> consumer = createPulsarConsumer(newSplit);
// Open start & stop cursor.
newSplit.open(pulsarAdmin);
// Start Consumer.
startConsumer(newSplit, consumer);
LOG.info("Register split {} consumer for current reader.", newSplit);
this.registeredSplit = newSplit;
this.pulsarConsumer = consumer;
}
Aggregations