use of org.apache.kafka.common.TopicPartitionInfo in project flink by apache.
the class KafkaSinkExternalContext method createSinkDataReader.
@Override
public ExternalSystemDataReader<String> createSinkDataReader(TestingSinkSettings sinkSettings) {
LOG.info("Fetching information for topic: {}", topicName);
final Map<String, TopicDescription> topicMetadata = getTopicMetadata(Arrays.asList(topicName));
Set<TopicPartition> subscribedPartitions = new HashSet<>();
for (TopicDescription topic : topicMetadata.values()) {
for (TopicPartitionInfo partition : topic.partitions()) {
subscribedPartitions.add(new TopicPartition(topic.name(), partition.partition()));
}
}
Properties properties = new Properties();
properties.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "flink-kafka-test" + subscribedPartitions.hashCode());
properties.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
properties.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getCanonicalName());
properties.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getCanonicalName());
if (EXACTLY_ONCE.equals(sinkSettings.getCheckpointingMode())) {
// default is read_uncommitted
properties.setProperty(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed");
}
properties.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
readers.add(new KafkaDataReader(properties, subscribedPartitions));
return readers.get(readers.size() - 1);
}
use of org.apache.kafka.common.TopicPartitionInfo in project eventapis by kloiasoft.
the class ListTopicSchedule method runInternal.
@Override
boolean runInternal(StopWatch stopWatch) throws InterruptedException, ExecutionException {
stopWatch.start("adminClient.listTopics()");
Collection<String> topicNames = adminClient.listTopics().listings().get().stream().map(TopicListing::name).filter(this::shouldCollectEvent).collect(Collectors.toList());
topicsMap.removeAll(new RemoveTopicPredicate(topicNames));
DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(topicNames);
describeTopicsResult.all().get().forEach((topic, topicDescription) -> topicsMap.executeOnKey(topic, new SetTopicPartitionsProcessor(topicDescription.partitions().stream().map(TopicPartitionInfo::partition).collect(Collectors.toList()))));
metaMap.set(this.getName() + TopicServiceScheduler.LAST_SUCCESS_PREFIX, System.currentTimeMillis());
log.debug("Topics:" + topicsMap.entrySet());
log.debug(stopWatch.prettyPrint());
return true;
}
Aggregations