use of org.apache.kafka.clients.admin.Admin in project kafka by apache.
the class KafkaExactlyOnceDemo method recreateTopics.
private static void recreateTopics(final int numPartitions) throws ExecutionException, InterruptedException {
Properties props = new Properties();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, KafkaProperties.KAFKA_SERVER_URL + ":" + KafkaProperties.KAFKA_SERVER_PORT);
Admin adminClient = Admin.create(props);
List<String> topicsToDelete = Arrays.asList(INPUT_TOPIC, OUTPUT_TOPIC);
deleteTopic(adminClient, topicsToDelete);
// Check topic existence in a retry loop
while (true) {
System.out.println("Making sure the topics are deleted successfully: " + topicsToDelete);
Set<String> listedTopics = adminClient.listTopics().names().get();
System.out.println("Current list of topics: " + listedTopics);
boolean hasTopicInfo = false;
for (String listedTopic : listedTopics) {
if (topicsToDelete.contains(listedTopic)) {
hasTopicInfo = true;
break;
}
}
if (!hasTopicInfo) {
break;
}
Thread.sleep(1000);
}
// Create topics in a retry loop
while (true) {
final short replicationFactor = 1;
final List<NewTopic> newTopics = Arrays.asList(new NewTopic(INPUT_TOPIC, numPartitions, replicationFactor), new NewTopic(OUTPUT_TOPIC, numPartitions, replicationFactor));
try {
adminClient.createTopics(newTopics).all().get();
System.out.println("Created new topics: " + newTopics);
break;
} catch (ExecutionException e) {
if (!(e.getCause() instanceof TopicExistsException)) {
throw e;
}
System.out.println("Metadata of the old topics are not cleared yet...");
deleteTopic(adminClient, topicsToDelete);
Thread.sleep(1000);
}
}
}
use of org.apache.kafka.clients.admin.Admin in project kafka by apache.
the class DeadLetterQueueReporter method createAndSetup.
public static DeadLetterQueueReporter createAndSetup(Map<String, Object> adminProps, ConnectorTaskId id, SinkConnectorConfig sinkConfig, Map<String, Object> producerProps, ErrorHandlingMetrics errorHandlingMetrics) {
String topic = sinkConfig.dlqTopicName();
try (Admin admin = Admin.create(adminProps)) {
if (!admin.listTopics().names().get().contains(topic)) {
log.error("Topic {} doesn't exist. Will attempt to create topic.", topic);
NewTopic schemaTopicRequest = new NewTopic(topic, DLQ_NUM_DESIRED_PARTITIONS, sinkConfig.dlqTopicReplicationFactor());
admin.createTopics(singleton(schemaTopicRequest)).all().get();
}
} catch (InterruptedException e) {
throw new ConnectException("Could not initialize dead letter queue with topic=" + topic, e);
} catch (ExecutionException e) {
if (!(e.getCause() instanceof TopicExistsException)) {
throw new ConnectException("Could not initialize dead letter queue with topic=" + topic, e);
}
}
KafkaProducer<byte[], byte[]> dlqProducer = new KafkaProducer<>(producerProps);
return new DeadLetterQueueReporter(dlqProducer, sinkConfig, id, errorHandlingMetrics);
}
use of org.apache.kafka.clients.admin.Admin in project kafka by apache.
the class EmbeddedKafkaCluster method describeTopics.
/**
* Get the topic descriptions of the named topics. The value of the map entry will be empty
* if the topic does not exist.
*
* @param topicNames the names of the topics to describe
* @return the map of optional {@link TopicDescription} keyed by the topic name
*/
public Map<String, Optional<TopicDescription>> describeTopics(Set<String> topicNames) {
Map<String, Optional<TopicDescription>> results = new HashMap<>();
log.info("Describing topics {}", topicNames);
try (Admin admin = createAdminClient()) {
DescribeTopicsResult result = admin.describeTopics(topicNames);
Map<String, KafkaFuture<TopicDescription>> byName = result.topicNameValues();
for (Map.Entry<String, KafkaFuture<TopicDescription>> entry : byName.entrySet()) {
String topicName = entry.getKey();
try {
TopicDescription desc = entry.getValue().get();
results.put(topicName, Optional.of(desc));
log.info("Found topic {} : {}", topicName, desc);
} catch (ExecutionException e) {
Throwable cause = e.getCause();
if (cause instanceof UnknownTopicOrPartitionException) {
results.put(topicName, Optional.empty());
log.info("Found non-existant topic {}", topicName);
continue;
}
throw new AssertionError("Could not describe topic(s)" + topicNames, e);
}
}
} catch (Exception e) {
throw new AssertionError("Could not describe topic(s) " + topicNames, e);
}
log.info("Found topics {}", results);
return results;
}
use of org.apache.kafka.clients.admin.Admin in project kafka by apache.
the class KafkaStreams method allLocalStorePartitionLags.
protected Map<String, Map<Integer, LagInfo>> allLocalStorePartitionLags(final List<Task> tasksToCollectLagFor) {
final Map<String, Map<Integer, LagInfo>> localStorePartitionLags = new TreeMap<>();
final Collection<TopicPartition> allPartitions = new LinkedList<>();
final Map<TopicPartition, Long> allChangelogPositions = new HashMap<>();
// Obtain the current positions, of all the active-restoring and standby tasks
for (final Task task : tasksToCollectLagFor) {
allPartitions.addAll(task.changelogPartitions());
// Note that not all changelog partitions, will have positions; since some may not have started
allChangelogPositions.putAll(task.changelogOffsets());
}
log.debug("Current changelog positions: {}", allChangelogPositions);
final Map<TopicPartition, ListOffsetsResultInfo> allEndOffsets;
allEndOffsets = fetchEndOffsets(allPartitions, adminClient);
log.debug("Current end offsets :{}", allEndOffsets);
for (final Map.Entry<TopicPartition, ListOffsetsResultInfo> entry : allEndOffsets.entrySet()) {
// Avoiding an extra admin API lookup by computing lags for not-yet-started restorations
// from zero instead of the real "earliest offset" for the changelog.
// This will yield the correct relative order of lagginess for the tasks in the cluster,
// but it is an over-estimate of how much work remains to restore the task from scratch.
final long earliestOffset = 0L;
final long changelogPosition = allChangelogPositions.getOrDefault(entry.getKey(), earliestOffset);
final long latestOffset = entry.getValue().offset();
final LagInfo lagInfo = new LagInfo(changelogPosition == Task.LATEST_OFFSET ? latestOffset : changelogPosition, latestOffset);
final String storeName = streamsMetadataState.getStoreForChangelogTopic(entry.getKey().topic());
localStorePartitionLags.computeIfAbsent(storeName, ignored -> new TreeMap<>()).put(entry.getKey().partition(), lagInfo);
}
return Collections.unmodifiableMap(localStorePartitionLags);
}
use of org.apache.kafka.clients.admin.Admin in project kafka by apache.
the class StreamsResetter method run.
public int run(final String[] args, final Properties config) {
int exitCode;
Admin adminClient = null;
try {
parseArguments(args);
final boolean dryRun = options.has(dryRunOption);
final String groupId = options.valueOf(applicationIdOption);
final Properties properties = new Properties();
if (options.has(commandConfigOption)) {
properties.putAll(Utils.loadProps(options.valueOf(commandConfigOption)));
}
properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, options.valueOf(bootstrapServerOption));
adminClient = Admin.create(properties);
maybeDeleteActiveConsumers(groupId, adminClient);
allTopics.clear();
allTopics.addAll(adminClient.listTopics().names().get(60, TimeUnit.SECONDS));
if (dryRun) {
System.out.println("----Dry run displays the actions which will be performed when running Streams Reset Tool----");
}
final HashMap<Object, Object> consumerConfig = new HashMap<>(config);
consumerConfig.putAll(properties);
exitCode = maybeResetInputAndSeekToEndIntermediateTopicOffsets(consumerConfig, dryRun);
exitCode |= maybeDeleteInternalTopics(adminClient, dryRun);
} catch (final Throwable e) {
exitCode = EXIT_CODE_ERROR;
System.err.println("ERROR: " + e);
e.printStackTrace(System.err);
} finally {
if (adminClient != null) {
adminClient.close(Duration.ofSeconds(60));
}
}
return exitCode;
}
Aggregations