Search in sources :

Example 21 with Admin

use of org.apache.kafka.clients.admin.Admin in project kafka by apache.

the class KafkaExactlyOnceDemo method recreateTopics.

private static void recreateTopics(final int numPartitions) throws ExecutionException, InterruptedException {
    Properties props = new Properties();
    props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, KafkaProperties.KAFKA_SERVER_URL + ":" + KafkaProperties.KAFKA_SERVER_PORT);
    Admin adminClient = Admin.create(props);
    List<String> topicsToDelete = Arrays.asList(INPUT_TOPIC, OUTPUT_TOPIC);
    deleteTopic(adminClient, topicsToDelete);
    // Check topic existence in a retry loop
    while (true) {
        System.out.println("Making sure the topics are deleted successfully: " + topicsToDelete);
        Set<String> listedTopics = adminClient.listTopics().names().get();
        System.out.println("Current list of topics: " + listedTopics);
        boolean hasTopicInfo = false;
        for (String listedTopic : listedTopics) {
            if (topicsToDelete.contains(listedTopic)) {
                hasTopicInfo = true;
                break;
            }
        }
        if (!hasTopicInfo) {
            break;
        }
        Thread.sleep(1000);
    }
    // Create topics in a retry loop
    while (true) {
        final short replicationFactor = 1;
        final List<NewTopic> newTopics = Arrays.asList(new NewTopic(INPUT_TOPIC, numPartitions, replicationFactor), new NewTopic(OUTPUT_TOPIC, numPartitions, replicationFactor));
        try {
            adminClient.createTopics(newTopics).all().get();
            System.out.println("Created new topics: " + newTopics);
            break;
        } catch (ExecutionException e) {
            if (!(e.getCause() instanceof TopicExistsException)) {
                throw e;
            }
            System.out.println("Metadata of the old topics are not cleared yet...");
            deleteTopic(adminClient, topicsToDelete);
            Thread.sleep(1000);
        }
    }
}
Also used : NewTopic(org.apache.kafka.clients.admin.NewTopic) Properties(java.util.Properties) Admin(org.apache.kafka.clients.admin.Admin) ExecutionException(java.util.concurrent.ExecutionException) TopicExistsException(org.apache.kafka.common.errors.TopicExistsException)

Example 22 with Admin

use of org.apache.kafka.clients.admin.Admin in project kafka by apache.

the class DeadLetterQueueReporter method createAndSetup.

public static DeadLetterQueueReporter createAndSetup(Map<String, Object> adminProps, ConnectorTaskId id, SinkConnectorConfig sinkConfig, Map<String, Object> producerProps, ErrorHandlingMetrics errorHandlingMetrics) {
    String topic = sinkConfig.dlqTopicName();
    try (Admin admin = Admin.create(adminProps)) {
        if (!admin.listTopics().names().get().contains(topic)) {
            log.error("Topic {} doesn't exist. Will attempt to create topic.", topic);
            NewTopic schemaTopicRequest = new NewTopic(topic, DLQ_NUM_DESIRED_PARTITIONS, sinkConfig.dlqTopicReplicationFactor());
            admin.createTopics(singleton(schemaTopicRequest)).all().get();
        }
    } catch (InterruptedException e) {
        throw new ConnectException("Could not initialize dead letter queue with topic=" + topic, e);
    } catch (ExecutionException e) {
        if (!(e.getCause() instanceof TopicExistsException)) {
            throw new ConnectException("Could not initialize dead letter queue with topic=" + topic, e);
        }
    }
    KafkaProducer<byte[], byte[]> dlqProducer = new KafkaProducer<>(producerProps);
    return new DeadLetterQueueReporter(dlqProducer, sinkConfig, id, errorHandlingMetrics);
}
Also used : KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) NewTopic(org.apache.kafka.clients.admin.NewTopic) Admin(org.apache.kafka.clients.admin.Admin) ExecutionException(java.util.concurrent.ExecutionException) TopicExistsException(org.apache.kafka.common.errors.TopicExistsException) ConnectException(org.apache.kafka.connect.errors.ConnectException)

Example 23 with Admin

use of org.apache.kafka.clients.admin.Admin in project kafka by apache.

the class EmbeddedKafkaCluster method describeTopics.

/**
 * Get the topic descriptions of the named topics. The value of the map entry will be empty
 * if the topic does not exist.
 *
 * @param topicNames the names of the topics to describe
 * @return the map of optional {@link TopicDescription} keyed by the topic name
 */
public Map<String, Optional<TopicDescription>> describeTopics(Set<String> topicNames) {
    Map<String, Optional<TopicDescription>> results = new HashMap<>();
    log.info("Describing topics {}", topicNames);
    try (Admin admin = createAdminClient()) {
        DescribeTopicsResult result = admin.describeTopics(topicNames);
        Map<String, KafkaFuture<TopicDescription>> byName = result.topicNameValues();
        for (Map.Entry<String, KafkaFuture<TopicDescription>> entry : byName.entrySet()) {
            String topicName = entry.getKey();
            try {
                TopicDescription desc = entry.getValue().get();
                results.put(topicName, Optional.of(desc));
                log.info("Found topic {} : {}", topicName, desc);
            } catch (ExecutionException e) {
                Throwable cause = e.getCause();
                if (cause instanceof UnknownTopicOrPartitionException) {
                    results.put(topicName, Optional.empty());
                    log.info("Found non-existant topic {}", topicName);
                    continue;
                }
                throw new AssertionError("Could not describe topic(s)" + topicNames, e);
            }
        }
    } catch (Exception e) {
        throw new AssertionError("Could not describe topic(s) " + topicNames, e);
    }
    log.info("Found topics {}", results);
    return results;
}
Also used : Optional(java.util.Optional) KafkaFuture(org.apache.kafka.common.KafkaFuture) HashMap(java.util.HashMap) UnknownTopicOrPartitionException(org.apache.kafka.common.errors.UnknownTopicOrPartitionException) Admin(org.apache.kafka.clients.admin.Admin) KafkaException(org.apache.kafka.common.KafkaException) InvalidReplicationFactorException(org.apache.kafka.common.errors.InvalidReplicationFactorException) UnknownTopicOrPartitionException(org.apache.kafka.common.errors.UnknownTopicOrPartitionException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) ConnectException(org.apache.kafka.connect.errors.ConnectException) DescribeTopicsResult(org.apache.kafka.clients.admin.DescribeTopicsResult) TopicDescription(org.apache.kafka.clients.admin.TopicDescription) ExecutionException(java.util.concurrent.ExecutionException) Map(java.util.Map) HashMap(java.util.HashMap)

Example 24 with Admin

use of org.apache.kafka.clients.admin.Admin in project kafka by apache.

the class KafkaStreams method allLocalStorePartitionLags.

protected Map<String, Map<Integer, LagInfo>> allLocalStorePartitionLags(final List<Task> tasksToCollectLagFor) {
    final Map<String, Map<Integer, LagInfo>> localStorePartitionLags = new TreeMap<>();
    final Collection<TopicPartition> allPartitions = new LinkedList<>();
    final Map<TopicPartition, Long> allChangelogPositions = new HashMap<>();
    // Obtain the current positions, of all the active-restoring and standby tasks
    for (final Task task : tasksToCollectLagFor) {
        allPartitions.addAll(task.changelogPartitions());
        // Note that not all changelog partitions, will have positions; since some may not have started
        allChangelogPositions.putAll(task.changelogOffsets());
    }
    log.debug("Current changelog positions: {}", allChangelogPositions);
    final Map<TopicPartition, ListOffsetsResultInfo> allEndOffsets;
    allEndOffsets = fetchEndOffsets(allPartitions, adminClient);
    log.debug("Current end offsets :{}", allEndOffsets);
    for (final Map.Entry<TopicPartition, ListOffsetsResultInfo> entry : allEndOffsets.entrySet()) {
        // Avoiding an extra admin API lookup by computing lags for not-yet-started restorations
        // from zero instead of the real "earliest offset" for the changelog.
        // This will yield the correct relative order of lagginess for the tasks in the cluster,
        // but it is an over-estimate of how much work remains to restore the task from scratch.
        final long earliestOffset = 0L;
        final long changelogPosition = allChangelogPositions.getOrDefault(entry.getKey(), earliestOffset);
        final long latestOffset = entry.getValue().offset();
        final LagInfo lagInfo = new LagInfo(changelogPosition == Task.LATEST_OFFSET ? latestOffset : changelogPosition, latestOffset);
        final String storeName = streamsMetadataState.getStoreForChangelogTopic(entry.getKey().topic());
        localStorePartitionLags.computeIfAbsent(storeName, ignored -> new TreeMap<>()).put(entry.getKey().partition(), lagInfo);
    }
    return Collections.unmodifiableMap(localStorePartitionLags);
}
Also used : SHUTDOWN_CLIENT(org.apache.kafka.streams.errors.StreamsUncaughtExceptionHandler.StreamThreadExceptionResponse.SHUTDOWN_CLIENT) RecordingLevel(org.apache.kafka.common.metrics.Sensor.RecordingLevel) DefaultKafkaClientSupplier(org.apache.kafka.streams.processor.internals.DefaultKafkaClientSupplier) Arrays(java.util.Arrays) TopologyMetadata(org.apache.kafka.streams.processor.internals.TopologyMetadata) StreamsException(org.apache.kafka.streams.errors.StreamsException) StateDirectory(org.apache.kafka.streams.processor.internals.StateDirectory) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) LogContext(org.apache.kafka.common.utils.LogContext) Duration(java.time.Duration) Map(java.util.Map) Metric(org.apache.kafka.common.Metric) MetricName(org.apache.kafka.common.MetricName) StreamsMetricsImpl(org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl) CommonClientConfigs(org.apache.kafka.clients.CommonClientConfigs) TopicPartition(org.apache.kafka.common.TopicPartition) Sensor(org.apache.kafka.common.metrics.Sensor) QueryableStoreProvider(org.apache.kafka.streams.state.internals.QueryableStoreProvider) Evolving(org.apache.kafka.common.annotation.InterfaceStability.Evolving) QueryConfig(org.apache.kafka.streams.query.QueryConfig) MemberToRemove(org.apache.kafka.clients.admin.MemberToRemove) StreamsMetadataState(org.apache.kafka.streams.processor.internals.StreamsMetadataState) Time(org.apache.kafka.common.utils.Time) Collection(java.util.Collection) MetricConfig(org.apache.kafka.common.metrics.MetricConfig) FailureReason(org.apache.kafka.streams.query.FailureReason) ProcessorStateException(org.apache.kafka.streams.errors.ProcessorStateException) Set(java.util.Set) UUID(java.util.UUID) StreamsNotStartedException(org.apache.kafka.streams.errors.StreamsNotStartedException) StateRestoreListener(org.apache.kafka.streams.processor.StateRestoreListener) Collectors(java.util.stream.Collectors) Executors(java.util.concurrent.Executors) Objects(java.util.Objects) ListOffsetsResultInfo(org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo) List(java.util.List) Metrics(org.apache.kafka.common.metrics.Metrics) ClientUtils(org.apache.kafka.streams.processor.internals.ClientUtils) StateQueryResult(org.apache.kafka.streams.query.StateQueryResult) GlobalStreamThread(org.apache.kafka.streams.processor.internals.GlobalStreamThread) StreamThreadStateStoreProvider(org.apache.kafka.streams.state.internals.StreamThreadStateStoreProvider) ApiUtils.validateMillisecondDuration(org.apache.kafka.streams.internals.ApiUtils.validateMillisecondDuration) MetricsReporter(org.apache.kafka.common.metrics.MetricsReporter) Entry(java.util.Map.Entry) Optional(java.util.Optional) InvalidStateStoreException(org.apache.kafka.streams.errors.InvalidStateStoreException) ClientMetrics(org.apache.kafka.streams.internals.metrics.ClientMetrics) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) TaskId(org.apache.kafka.streams.processor.TaskId) KafkaMetricsContext(org.apache.kafka.common.metrics.KafkaMetricsContext) HostInfo(org.apache.kafka.streams.state.HostInfo) PositionBound(org.apache.kafka.streams.query.PositionBound) HashMap(java.util.HashMap) RemoveMembersFromConsumerGroupResult(org.apache.kafka.clients.admin.RemoveMembersFromConsumerGroupResult) UnknownStateStoreException(org.apache.kafka.streams.errors.UnknownStateStoreException) ArrayList(java.util.ArrayList) MetricsContext(org.apache.kafka.common.metrics.MetricsContext) HashSet(java.util.HashSet) LinkedHashMap(java.util.LinkedHashMap) StreamsStoppedException(org.apache.kafka.streams.errors.StreamsStoppedException) StateQueryRequest(org.apache.kafka.streams.query.StateQueryRequest) KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) State(org.apache.kafka.streams.processor.internals.GlobalStreamThread.State) BiConsumer(java.util.function.BiConsumer) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) Admin(org.apache.kafka.clients.admin.Admin) LinkedList(java.util.LinkedList) QueryResult(org.apache.kafka.streams.query.QueryResult) JmxReporter(org.apache.kafka.common.metrics.JmxReporter) TimeoutException(org.apache.kafka.common.errors.TimeoutException) StreamPartitioner(org.apache.kafka.streams.processor.StreamPartitioner) Logger(org.slf4j.Logger) Properties(java.util.Properties) StreamsUncaughtExceptionHandler(org.apache.kafka.streams.errors.StreamsUncaughtExceptionHandler) StreamThread(org.apache.kafka.streams.processor.internals.StreamThread) InvalidStateStorePartitionException(org.apache.kafka.streams.errors.InvalidStateStorePartitionException) ThreadStateTransitionValidator(org.apache.kafka.streams.processor.internals.ThreadStateTransitionValidator) RemoveMembersFromConsumerGroupOptions(org.apache.kafka.clients.admin.RemoveMembersFromConsumerGroupOptions) ClientUtils.fetchEndOffsets(org.apache.kafka.streams.processor.internals.ClientUtils.fetchEndOffsets) GlobalStateStoreProvider(org.apache.kafka.streams.state.internals.GlobalStateStoreProvider) Task(org.apache.kafka.streams.processor.internals.Task) AssignorError(org.apache.kafka.streams.processor.internals.assignment.AssignorError) ExecutionException(java.util.concurrent.ExecutionException) TimeUnit(java.util.concurrent.TimeUnit) Consumer(java.util.function.Consumer) ApiUtils.prepareMillisCheckFailMsgPrefix(org.apache.kafka.streams.internals.ApiUtils.prepareMillisCheckFailMsgPrefix) StateStore(org.apache.kafka.streams.processor.StateStore) TreeMap(java.util.TreeMap) Serializer(org.apache.kafka.common.serialization.Serializer) Collections(java.util.Collections) METRICS_RECORDING_LEVEL_CONFIG(org.apache.kafka.streams.StreamsConfig.METRICS_RECORDING_LEVEL_CONFIG) Task(org.apache.kafka.streams.processor.internals.Task) ListOffsetsResultInfo(org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) TreeMap(java.util.TreeMap) LinkedList(java.util.LinkedList) TopicPartition(org.apache.kafka.common.TopicPartition) Map(java.util.Map) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) TreeMap(java.util.TreeMap)

Example 25 with Admin

use of org.apache.kafka.clients.admin.Admin in project kafka by apache.

the class StreamsResetter method run.

public int run(final String[] args, final Properties config) {
    int exitCode;
    Admin adminClient = null;
    try {
        parseArguments(args);
        final boolean dryRun = options.has(dryRunOption);
        final String groupId = options.valueOf(applicationIdOption);
        final Properties properties = new Properties();
        if (options.has(commandConfigOption)) {
            properties.putAll(Utils.loadProps(options.valueOf(commandConfigOption)));
        }
        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, options.valueOf(bootstrapServerOption));
        adminClient = Admin.create(properties);
        maybeDeleteActiveConsumers(groupId, adminClient);
        allTopics.clear();
        allTopics.addAll(adminClient.listTopics().names().get(60, TimeUnit.SECONDS));
        if (dryRun) {
            System.out.println("----Dry run displays the actions which will be performed when running Streams Reset Tool----");
        }
        final HashMap<Object, Object> consumerConfig = new HashMap<>(config);
        consumerConfig.putAll(properties);
        exitCode = maybeResetInputAndSeekToEndIntermediateTopicOffsets(consumerConfig, dryRun);
        exitCode |= maybeDeleteInternalTopics(adminClient, dryRun);
    } catch (final Throwable e) {
        exitCode = EXIT_CODE_ERROR;
        System.err.println("ERROR: " + e);
        e.printStackTrace(System.err);
    } finally {
        if (adminClient != null) {
            adminClient.close(Duration.ofSeconds(60));
        }
    }
    return exitCode;
}
Also used : HashMap(java.util.HashMap) Admin(org.apache.kafka.clients.admin.Admin) Properties(java.util.Properties)

Aggregations

Admin (org.apache.kafka.clients.admin.Admin)27 ExecutionException (java.util.concurrent.ExecutionException)12 Map (java.util.Map)11 Properties (java.util.Properties)9 HashMap (java.util.HashMap)8 TopicPartition (org.apache.kafka.common.TopicPartition)8 NewTopic (org.apache.kafka.clients.admin.NewTopic)7 AdminClientConfig (org.apache.kafka.clients.admin.AdminClientConfig)6 Test (org.junit.Test)6 Collection (java.util.Collection)5 ConfigResource (org.apache.kafka.common.config.ConfigResource)5 Arrays (java.util.Arrays)4 Collections (java.util.Collections)4 Optional (java.util.Optional)4 Set (java.util.Set)4 Config (org.apache.kafka.clients.admin.Config)4 ListOffsetsResult (org.apache.kafka.clients.admin.ListOffsetsResult)4 MirrorMakerConfig (org.apache.kafka.connect.mirror.MirrorMakerConfig)4 Logger (org.slf4j.Logger)4 IOException (java.io.IOException)3