use of org.apache.flink.streaming.connectors.kafka.internals.AbstractPartitionDiscoverer in project flink by apache.
the class FlinkKafkaConsumerBaseTest method checkFilterRestoredPartitionsWithDisovered.
private void checkFilterRestoredPartitionsWithDisovered(List<String> restoredKafkaTopics, List<String> initKafkaTopics, List<String> expectedSubscribedPartitions, Boolean disableFiltering) throws Exception {
final AbstractPartitionDiscoverer discoverer = new TestPartitionDiscoverer(new KafkaTopicsDescriptor(initKafkaTopics, null), 0, 1, TestPartitionDiscoverer.createMockGetAllTopicsSequenceFromFixedReturn(initKafkaTopics), TestPartitionDiscoverer.createMockGetAllPartitionsFromTopicsSequenceFromFixedReturn(initKafkaTopics.stream().map(topic -> new KafkaTopicPartition(topic, 0)).collect(Collectors.toList())));
final FlinkKafkaConsumerBase<String> consumer = new DummyFlinkKafkaConsumer<>(initKafkaTopics, discoverer);
if (disableFiltering) {
consumer.disableFilterRestoredPartitionsWithSubscribedTopics();
}
final TestingListState<Tuple2<KafkaTopicPartition, Long>> listState = new TestingListState<>();
for (int i = 0; i < restoredKafkaTopics.size(); i++) {
listState.add(new Tuple2<>(new KafkaTopicPartition(restoredKafkaTopics.get(i), 0), 12345L));
}
setupConsumer(consumer, true, listState, true, 0, 1);
Map<KafkaTopicPartition, Long> subscribedPartitionsToStartOffsets = consumer.getSubscribedPartitionsToStartOffsets();
assertEquals(new HashSet<>(expectedSubscribedPartitions), subscribedPartitionsToStartOffsets.keySet().stream().map(partition -> partition.getTopic()).collect(Collectors.toSet()));
}
Aggregations