Search in sources :

Example 6 with TopicPartitionInfo

use of org.apache.kafka.common.TopicPartitionInfo in project apache-kafka-on-k8s by banzaicloud.

the class KafkaAdminClient method describeTopics.

@Override
public DescribeTopicsResult describeTopics(final Collection<String> topicNames, DescribeTopicsOptions options) {
    final Map<String, KafkaFutureImpl<TopicDescription>> topicFutures = new HashMap<>(topicNames.size());
    final ArrayList<String> topicNamesList = new ArrayList<>();
    for (String topicName : topicNames) {
        if (topicNameIsUnrepresentable(topicName)) {
            KafkaFutureImpl<TopicDescription> future = new KafkaFutureImpl<TopicDescription>();
            future.completeExceptionally(new InvalidTopicException("The given topic name '" + topicName + "' cannot be represented in a request."));
            topicFutures.put(topicName, future);
        } else if (!topicFutures.containsKey(topicName)) {
            topicFutures.put(topicName, new KafkaFutureImpl<TopicDescription>());
            topicNamesList.add(topicName);
        }
    }
    final long now = time.milliseconds();
    Call call = new Call("describeTopics", calcDeadlineMs(now, options.timeoutMs()), new ControllerNodeProvider()) {

        private boolean supportsDisablingTopicCreation = true;

        @Override
        AbstractRequest.Builder createRequest(int timeoutMs) {
            if (supportsDisablingTopicCreation)
                return new MetadataRequest.Builder(topicNamesList, false);
            else
                return MetadataRequest.Builder.allTopics();
        }

        @Override
        void handleResponse(AbstractResponse abstractResponse) {
            MetadataResponse response = (MetadataResponse) abstractResponse;
            // Handle server responses for particular topics.
            Cluster cluster = response.cluster();
            Map<String, Errors> errors = response.errors();
            for (Map.Entry<String, KafkaFutureImpl<TopicDescription>> entry : topicFutures.entrySet()) {
                String topicName = entry.getKey();
                KafkaFutureImpl<TopicDescription> future = entry.getValue();
                Errors topicError = errors.get(topicName);
                if (topicError != null) {
                    future.completeExceptionally(topicError.exception());
                    continue;
                }
                if (!cluster.topics().contains(topicName)) {
                    future.completeExceptionally(new InvalidTopicException("Topic " + topicName + " not found."));
                    continue;
                }
                boolean isInternal = cluster.internalTopics().contains(topicName);
                List<PartitionInfo> partitionInfos = cluster.partitionsForTopic(topicName);
                List<TopicPartitionInfo> partitions = new ArrayList<>(partitionInfos.size());
                for (PartitionInfo partitionInfo : partitionInfos) {
                    TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(partitionInfo.partition(), leader(partitionInfo), Arrays.asList(partitionInfo.replicas()), Arrays.asList(partitionInfo.inSyncReplicas()));
                    partitions.add(topicPartitionInfo);
                }
                Collections.sort(partitions, new Comparator<TopicPartitionInfo>() {

                    @Override
                    public int compare(TopicPartitionInfo tp1, TopicPartitionInfo tp2) {
                        return Integer.compare(tp1.partition(), tp2.partition());
                    }
                });
                TopicDescription topicDescription = new TopicDescription(topicName, isInternal, partitions);
                future.complete(topicDescription);
            }
        }

        private Node leader(PartitionInfo partitionInfo) {
            if (partitionInfo.leader() == null || partitionInfo.leader().id() == Node.noNode().id())
                return null;
            return partitionInfo.leader();
        }

        @Override
        boolean handleUnsupportedVersionException(UnsupportedVersionException exception) {
            if (supportsDisablingTopicCreation) {
                supportsDisablingTopicCreation = false;
                return true;
            }
            return false;
        }

        @Override
        void handleFailure(Throwable throwable) {
            completeAllExceptionally(topicFutures.values(), throwable);
        }
    };
    if (!topicNamesList.isEmpty()) {
        runnable.call(call, now);
    }
    return new DescribeTopicsResult(new HashMap<String, KafkaFuture<TopicDescription>>(topicFutures));
}
Also used : HashMap(java.util.HashMap) AbstractRequest(org.apache.kafka.common.requests.AbstractRequest) ArrayList(java.util.ArrayList) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) PartitionInfo(org.apache.kafka.common.PartitionInfo) TopicPartitionInfo(org.apache.kafka.common.TopicPartitionInfo) KafkaFuture(org.apache.kafka.common.KafkaFuture) AbstractResponse(org.apache.kafka.common.requests.AbstractResponse) Cluster(org.apache.kafka.common.Cluster) KafkaFutureImpl(org.apache.kafka.common.internals.KafkaFutureImpl) Errors(org.apache.kafka.common.protocol.Errors) MetadataRequest(org.apache.kafka.common.requests.MetadataRequest) TopicPartitionInfo(org.apache.kafka.common.TopicPartitionInfo) InvalidTopicException(org.apache.kafka.common.errors.InvalidTopicException) Map(java.util.Map) HashMap(java.util.HashMap) UnsupportedVersionException(org.apache.kafka.common.errors.UnsupportedVersionException)

Example 7 with TopicPartitionInfo

use of org.apache.kafka.common.TopicPartitionInfo in project strimzi by strimzi.

the class Utils method getTopicMetadata.

public static TopicMetadata getTopicMetadata(Topic kubeTopic) {
    List<Node> nodes = new ArrayList<>();
    for (int nodeId = 0; nodeId < kubeTopic.getNumReplicas(); nodeId++) {
        nodes.add(new Node(nodeId, "localhost", 9092 + nodeId));
    }
    List<TopicPartitionInfo> partitions = new ArrayList<>();
    for (int partitionId = 0; partitionId < kubeTopic.getNumPartitions(); partitionId++) {
        partitions.add(new TopicPartitionInfo(partitionId, nodes.get(0), nodes, nodes));
    }
    List<ConfigEntry> configs = new ArrayList<>();
    for (Map.Entry<String, String> entry : kubeTopic.getConfig().entrySet()) {
        configs.add(new ConfigEntry(entry.getKey(), entry.getValue()));
    }
    return new TopicMetadata(new TopicDescription(kubeTopic.getTopicName().toString(), false, partitions), new Config(configs));
}
Also used : Config(org.apache.kafka.clients.admin.Config) Node(org.apache.kafka.common.Node) ArrayList(java.util.ArrayList) ConfigEntry(org.apache.kafka.clients.admin.ConfigEntry) TopicPartitionInfo(org.apache.kafka.common.TopicPartitionInfo) TopicDescription(org.apache.kafka.clients.admin.TopicDescription) Map(java.util.Map)

Example 8 with TopicPartitionInfo

use of org.apache.kafka.common.TopicPartitionInfo in project strimzi by strimzi.

the class Utils method getTopicMetadata.

public static TopicMetadata getTopicMetadata(String topicName, Config config) {
    Node node0 = new Node(0, "host0", 1234);
    Node node1 = new Node(1, "host1", 1234);
    Node node2 = new Node(2, "host2", 1234);
    List<Node> nodes02 = asList(node0, node1, node2);
    TopicDescription desc = new TopicDescription(topicName, false, asList(new TopicPartitionInfo(0, node0, nodes02, nodes02), new TopicPartitionInfo(1, node0, nodes02, nodes02)));
    // org.apache.kafka.clients.admin.Config config = new Config(configs);
    return new TopicMetadata(desc, config);
}
Also used : TopicPartitionInfo(org.apache.kafka.common.TopicPartitionInfo) Node(org.apache.kafka.common.Node) TopicDescription(org.apache.kafka.clients.admin.TopicDescription)

Example 9 with TopicPartitionInfo

use of org.apache.kafka.common.TopicPartitionInfo in project flink by apache.

the class TopicListSubscriber method getSubscribedTopicPartitions.

@Override
public Set<TopicPartition> getSubscribedTopicPartitions(AdminClient adminClient) {
    LOG.debug("Fetching descriptions for topics: {}", topics);
    final Map<String, TopicDescription> topicMetadata = getTopicMetadata(adminClient, new HashSet<>(topics));
    Set<TopicPartition> subscribedPartitions = new HashSet<>();
    for (TopicDescription topic : topicMetadata.values()) {
        for (TopicPartitionInfo partition : topic.partitions()) {
            subscribedPartitions.add(new TopicPartition(topic.name(), partition.partition()));
        }
    }
    return subscribedPartitions;
}
Also used : TopicPartitionInfo(org.apache.kafka.common.TopicPartitionInfo) TopicPartition(org.apache.kafka.common.TopicPartition) TopicDescription(org.apache.kafka.clients.admin.TopicDescription) HashSet(java.util.HashSet)

Example 10 with TopicPartitionInfo

use of org.apache.kafka.common.TopicPartitionInfo in project kafka by apache.

the class MockAdminClient method addTopic.

public synchronized void addTopic(boolean internal, String name, List<TopicPartitionInfo> partitions, Map<String, String> configs, boolean usesTopicId) {
    if (allTopics.containsKey(name)) {
        throw new IllegalArgumentException(String.format("Topic %s was already added.", name));
    }
    for (TopicPartitionInfo partition : partitions) {
        if (!brokers.contains(partition.leader())) {
            throw new IllegalArgumentException("Leader broker unknown");
        }
        if (!brokers.containsAll(partition.replicas())) {
            throw new IllegalArgumentException("Unknown brokers in replica list");
        }
        if (!brokers.containsAll(partition.isr())) {
            throw new IllegalArgumentException("Unknown brokers in isr list");
        }
    }
    ArrayList<String> logDirs = new ArrayList<>();
    for (TopicPartitionInfo partition : partitions) {
        if (partition.leader() != null) {
            logDirs.add(brokerLogDirs.get(partition.leader().id()).get(0));
        }
    }
    Uuid topicId;
    if (usesTopicId) {
        topicId = Uuid.randomUuid();
        topicIds.put(name, topicId);
        topicNames.put(topicId, name);
    } else {
        topicId = Uuid.ZERO_UUID;
    }
    allTopics.put(name, new TopicMetadata(topicId, internal, partitions, logDirs, configs));
}
Also used : Uuid(org.apache.kafka.common.Uuid) TopicPartitionInfo(org.apache.kafka.common.TopicPartitionInfo) ArrayList(java.util.ArrayList)

Aggregations

TopicPartitionInfo (org.apache.kafka.common.TopicPartitionInfo)62 Test (org.junit.Test)33 TopicDescription (org.apache.kafka.clients.admin.TopicDescription)31 Node (org.apache.kafka.common.Node)28 ArrayList (java.util.ArrayList)20 MockAdminClient (org.apache.kafka.clients.admin.MockAdminClient)18 NewTopic (org.apache.kafka.clients.admin.NewTopic)16 HashMap (java.util.HashMap)14 Cluster (org.apache.kafka.common.Cluster)11 KafkaFutureImpl (org.apache.kafka.common.internals.KafkaFutureImpl)11 StreamsConfig (org.apache.kafka.streams.StreamsConfig)11 Test (org.junit.jupiter.api.Test)10 TopicPartition (org.apache.kafka.common.TopicPartition)8 ConfigResource (org.apache.kafka.common.config.ConfigResource)8 Map (java.util.Map)7 AdminClient (org.apache.kafka.clients.admin.AdminClient)7 Config (org.apache.kafka.clients.admin.Config)7 TopicMetadataAndConfig (org.apache.kafka.clients.admin.CreateTopicsResult.TopicMetadataAndConfig)7 TopicConfig (org.apache.kafka.common.config.TopicConfig)7 ConsumerConfig (org.apache.kafka.clients.consumer.ConsumerConfig)6