use of org.apache.flink.connector.kafka.source.enumerator.subscriber.KafkaSubscriber in project flink by apache.
the class KafkaEnumeratorTest method createEnumerator.
/**
* Create the enumerator. For the purpose of the tests in this class we don't care about the
* subscriber and offsets initializer, so just use arbitrary settings.
*/
private KafkaSourceEnumerator createEnumerator(MockSplitEnumeratorContext<KafkaPartitionSplit> enumContext, boolean enablePeriodicPartitionDiscovery, Collection<String> topicsToSubscribe, Set<TopicPartition> assignedPartitions, Properties overrideProperties) {
// Use a TopicPatternSubscriber so that no exception if a subscribed topic hasn't been
// created yet.
StringJoiner topicNameJoiner = new StringJoiner("|");
topicsToSubscribe.forEach(topicNameJoiner::add);
Pattern topicPattern = Pattern.compile(topicNameJoiner.toString());
KafkaSubscriber subscriber = KafkaSubscriber.getTopicPatternSubscriber(topicPattern);
OffsetsInitializer startingOffsetsInitializer = OffsetsInitializer.earliest();
OffsetsInitializer stoppingOffsetsInitializer = new NoStoppingOffsetsInitializer();
Properties props = new Properties(KafkaSourceTestEnv.getConsumerProperties(StringDeserializer.class));
KafkaSourceEnumerator.deepCopyProperties(overrideProperties, props);
String partitionDiscoverInterval = enablePeriodicPartitionDiscovery ? "1" : "-1";
props.setProperty(KafkaSourceOptions.PARTITION_DISCOVERY_INTERVAL_MS.key(), partitionDiscoverInterval);
return new KafkaSourceEnumerator(subscriber, startingOffsetsInitializer, stoppingOffsetsInitializer, props, enumContext, Boundedness.CONTINUOUS_UNBOUNDED, assignedPartitions);
}
Aggregations