use of org.apache.flink.connector.pulsar.source.split.PulsarPartitionSplit in project flink by apache.
the class PulsarSourceEnumStateSerializerTest method serializeAndDeserializePulsarSourceEnumState.
@Test
void serializeAndDeserializePulsarSourceEnumState() throws Exception {
Set<TopicPartition> partitions = Sets.newHashSet(new TopicPartition(randomAlphabetic(10), 2, new TopicRange(1, 30)), new TopicPartition(randomAlphabetic(10), 1, createFullRange()));
Set<PulsarPartitionSplit> splits = Collections.singleton(new PulsarPartitionSplit(new TopicPartition(randomAlphabetic(10), 10, createFullRange()), StopCursor.defaultStopCursor()));
Map<Integer, Set<PulsarPartitionSplit>> shared = Collections.singletonMap(5, splits);
Map<Integer, Set<String>> mapping = ImmutableMap.of(1, Sets.newHashSet(randomAlphabetic(10), randomAlphabetic(10)), 2, Sets.newHashSet(randomAlphabetic(10), randomAlphabetic(10)));
PulsarSourceEnumState state = new PulsarSourceEnumState(partitions, splits, shared, mapping, true);
byte[] bytes = INSTANCE.serialize(state);
PulsarSourceEnumState state1 = INSTANCE.deserialize(INSTANCE.getVersion(), bytes);
assertEquals(state.getAppendedPartitions(), state1.getAppendedPartitions());
assertEquals(state.getPendingPartitionSplits(), state1.getPendingPartitionSplits());
assertEquals(state.getReaderAssignedSplits(), state1.getReaderAssignedSplits());
assertEquals(state.isInitialized(), state1.isInitialized());
assertNotSame(state, state1);
}
use of org.apache.flink.connector.pulsar.source.split.PulsarPartitionSplit in project flink by apache.
the class PulsarPartitionSplitReaderTestBase method handleSplit.
protected void handleSplit(PulsarPartitionSplitReaderBase<String> reader, String topicName, int partitionId, MessageId startPosition) {
TopicPartition partition = new TopicPartition(topicName, partitionId, createFullRange());
PulsarPartitionSplit split = new PulsarPartitionSplit(partition, StopCursor.never(), startPosition, null);
SplitsAddition<PulsarPartitionSplit> addition = new SplitsAddition<>(singletonList(split));
reader.handleSplitsChanges(addition);
}
use of org.apache.flink.connector.pulsar.source.split.PulsarPartitionSplit in project flink by apache.
the class PulsarTestCommonUtils method createPartitionSplit.
public static PulsarPartitionSplit createPartitionSplit(String topic, int partitionId, Boundedness boundedness, MessageId latestConsumedId) {
TopicPartition topicPartition = new TopicPartition(topic, partitionId, TopicRange.createFullRange());
StopCursor stopCursor = boundedness == Boundedness.BOUNDED ? StopCursor.latest() : StopCursor.never();
return new PulsarPartitionSplit(topicPartition, stopCursor, latestConsumedId, null);
}
use of org.apache.flink.connector.pulsar.source.split.PulsarPartitionSplit in project flink by apache.
the class PulsarPartitionSplitReaderBase method handleSplitsChanges.
@Override
public void handleSplitsChanges(SplitsChange<PulsarPartitionSplit> splitsChanges) {
LOG.debug("Handle split changes {}", splitsChanges);
// Get all the partition assignments and stopping offsets.
if (!(splitsChanges instanceof SplitsAddition)) {
throw new UnsupportedOperationException(String.format("The SplitChange type of %s is not supported.", splitsChanges.getClass()));
}
if (registeredSplit != null) {
throw new IllegalStateException("This split reader have assigned split.");
}
List<PulsarPartitionSplit> newSplits = splitsChanges.splits();
Preconditions.checkArgument(newSplits.size() == 1, "This pulsar split reader only support one split.");
PulsarPartitionSplit newSplit = newSplits.get(0);
// Create pulsar consumer.
Consumer<byte[]> consumer = createPulsarConsumer(newSplit);
// Open start & stop cursor.
newSplit.open(pulsarAdmin);
// Start Consumer.
startConsumer(newSplit, consumer);
LOG.info("Register split {} consumer for current reader.", newSplit);
this.registeredSplit = newSplit;
this.pulsarConsumer = consumer;
}
Aggregations