Search in sources :

Example 1 with DescribeLogDirsResponse

use of org.apache.kafka.common.requests.DescribeLogDirsResponse in project kafka by apache.

the class KafkaAdminClient method describeReplicaLogDirs.

@Override
public DescribeReplicaLogDirsResult describeReplicaLogDirs(Collection<TopicPartitionReplica> replicas, DescribeReplicaLogDirsOptions options) {
    final Map<TopicPartitionReplica, KafkaFutureImpl<DescribeReplicaLogDirsResult.ReplicaLogDirInfo>> futures = new HashMap<>(replicas.size());
    for (TopicPartitionReplica replica : replicas) {
        futures.put(replica, new KafkaFutureImpl<>());
    }
    Map<Integer, DescribeLogDirsRequestData> partitionsByBroker = new HashMap<>();
    for (TopicPartitionReplica replica : replicas) {
        DescribeLogDirsRequestData requestData = partitionsByBroker.computeIfAbsent(replica.brokerId(), brokerId -> new DescribeLogDirsRequestData());
        DescribableLogDirTopic describableLogDirTopic = requestData.topics().find(replica.topic());
        if (describableLogDirTopic == null) {
            List<Integer> partitions = new ArrayList<>();
            partitions.add(replica.partition());
            describableLogDirTopic = new DescribableLogDirTopic().setTopic(replica.topic()).setPartitions(partitions);
            requestData.topics().add(describableLogDirTopic);
        } else {
            describableLogDirTopic.partitions().add(replica.partition());
        }
    }
    final long now = time.milliseconds();
    for (Map.Entry<Integer, DescribeLogDirsRequestData> entry : partitionsByBroker.entrySet()) {
        final int brokerId = entry.getKey();
        final DescribeLogDirsRequestData topicPartitions = entry.getValue();
        final Map<TopicPartition, ReplicaLogDirInfo> replicaDirInfoByPartition = new HashMap<>();
        for (DescribableLogDirTopic topicPartition : topicPartitions.topics()) {
            for (Integer partitionId : topicPartition.partitions()) {
                replicaDirInfoByPartition.put(new TopicPartition(topicPartition.topic(), partitionId), new ReplicaLogDirInfo());
            }
        }
        runnable.call(new Call("describeReplicaLogDirs", calcDeadlineMs(now, options.timeoutMs()), new ConstantNodeIdProvider(brokerId)) {

            @Override
            public DescribeLogDirsRequest.Builder createRequest(int timeoutMs) {
                // Query selected partitions in all log directories
                return new DescribeLogDirsRequest.Builder(topicPartitions);
            }

            @Override
            public void handleResponse(AbstractResponse abstractResponse) {
                DescribeLogDirsResponse response = (DescribeLogDirsResponse) abstractResponse;
                for (Map.Entry<String, LogDirDescription> responseEntry : logDirDescriptions(response).entrySet()) {
                    String logDir = responseEntry.getKey();
                    LogDirDescription logDirInfo = responseEntry.getValue();
                    // No replica info will be provided if the log directory is offline
                    if (logDirInfo.error() instanceof KafkaStorageException)
                        continue;
                    if (logDirInfo.error() != null)
                        handleFailure(new IllegalStateException("The error " + logDirInfo.error().getClass().getName() + " for log directory " + logDir + " in the response from broker " + brokerId + " is illegal"));
                    for (Map.Entry<TopicPartition, ReplicaInfo> replicaInfoEntry : logDirInfo.replicaInfos().entrySet()) {
                        TopicPartition tp = replicaInfoEntry.getKey();
                        ReplicaInfo replicaInfo = replicaInfoEntry.getValue();
                        ReplicaLogDirInfo replicaLogDirInfo = replicaDirInfoByPartition.get(tp);
                        if (replicaLogDirInfo == null) {
                            log.warn("Server response from broker {} mentioned unknown partition {}", brokerId, tp);
                        } else if (replicaInfo.isFuture()) {
                            replicaDirInfoByPartition.put(tp, new ReplicaLogDirInfo(replicaLogDirInfo.getCurrentReplicaLogDir(), replicaLogDirInfo.getCurrentReplicaOffsetLag(), logDir, replicaInfo.offsetLag()));
                        } else {
                            replicaDirInfoByPartition.put(tp, new ReplicaLogDirInfo(logDir, replicaInfo.offsetLag(), replicaLogDirInfo.getFutureReplicaLogDir(), replicaLogDirInfo.getFutureReplicaOffsetLag()));
                        }
                    }
                }
                for (Map.Entry<TopicPartition, ReplicaLogDirInfo> entry : replicaDirInfoByPartition.entrySet()) {
                    TopicPartition tp = entry.getKey();
                    KafkaFutureImpl<ReplicaLogDirInfo> future = futures.get(new TopicPartitionReplica(tp.topic(), tp.partition(), brokerId));
                    future.complete(entry.getValue());
                }
            }

            @Override
            void handleFailure(Throwable throwable) {
                completeAllExceptionally(futures.values(), throwable);
            }
        }, now);
    }
    return new DescribeReplicaLogDirsResult(new HashMap<>(futures));
}
Also used : ReplicaLogDirInfo(org.apache.kafka.clients.admin.DescribeReplicaLogDirsResult.ReplicaLogDirInfo) HashMap(java.util.HashMap) DescribeLogDirsRequestData(org.apache.kafka.common.message.DescribeLogDirsRequestData) ChannelBuilder(org.apache.kafka.common.network.ChannelBuilder) ArrayList(java.util.ArrayList) TopicPartitionReplica(org.apache.kafka.common.TopicPartitionReplica) KafkaStorageException(org.apache.kafka.common.errors.KafkaStorageException) DescribableLogDirTopic(org.apache.kafka.common.message.DescribeLogDirsRequestData.DescribableLogDirTopic) AbstractResponse(org.apache.kafka.common.requests.AbstractResponse) DescribeLogDirsResponse(org.apache.kafka.common.requests.DescribeLogDirsResponse) KafkaFutureImpl(org.apache.kafka.common.internals.KafkaFutureImpl) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TopicPartition(org.apache.kafka.common.TopicPartition) DescribeLogDirsRequest(org.apache.kafka.common.requests.DescribeLogDirsRequest) Map(java.util.Map) TreeMap(java.util.TreeMap) HashMap(java.util.HashMap)

Example 2 with DescribeLogDirsResponse

use of org.apache.kafka.common.requests.DescribeLogDirsResponse in project kafka by apache.

the class KafkaAdminClient method describeLogDirs.

@Override
public DescribeLogDirsResult describeLogDirs(Collection<Integer> brokers, DescribeLogDirsOptions options) {
    final Map<Integer, KafkaFutureImpl<Map<String, LogDirDescription>>> futures = new HashMap<>(brokers.size());
    final long now = time.milliseconds();
    for (final Integer brokerId : brokers) {
        KafkaFutureImpl<Map<String, LogDirDescription>> future = new KafkaFutureImpl<>();
        futures.put(brokerId, future);
        runnable.call(new Call("describeLogDirs", calcDeadlineMs(now, options.timeoutMs()), new ConstantNodeIdProvider(brokerId)) {

            @Override
            public DescribeLogDirsRequest.Builder createRequest(int timeoutMs) {
                // Query selected partitions in all log directories
                return new DescribeLogDirsRequest.Builder(new DescribeLogDirsRequestData().setTopics(null));
            }

            @Override
            public void handleResponse(AbstractResponse abstractResponse) {
                DescribeLogDirsResponse response = (DescribeLogDirsResponse) abstractResponse;
                Map<String, LogDirDescription> descriptions = logDirDescriptions(response);
                if (descriptions.size() > 0) {
                    future.complete(descriptions);
                } else {
                    // Up to v3 DescribeLogDirsResponse did not have an error code field, hence it defaults to None
                    Errors error = response.data().errorCode() == Errors.NONE.code() ? Errors.CLUSTER_AUTHORIZATION_FAILED : Errors.forCode(response.data().errorCode());
                    future.completeExceptionally(error.exception());
                }
            }

            @Override
            void handleFailure(Throwable throwable) {
                future.completeExceptionally(throwable);
            }
        }, now);
    }
    return new DescribeLogDirsResult(new HashMap<>(futures));
}
Also used : HashMap(java.util.HashMap) AbstractResponse(org.apache.kafka.common.requests.AbstractResponse) DescribeLogDirsRequestData(org.apache.kafka.common.message.DescribeLogDirsRequestData) ChannelBuilder(org.apache.kafka.common.network.ChannelBuilder) DescribeLogDirsResponse(org.apache.kafka.common.requests.DescribeLogDirsResponse) KafkaFutureImpl(org.apache.kafka.common.internals.KafkaFutureImpl) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Errors(org.apache.kafka.common.protocol.Errors) DescribeLogDirsRequest(org.apache.kafka.common.requests.DescribeLogDirsRequest) Map(java.util.Map) TreeMap(java.util.TreeMap) HashMap(java.util.HashMap)

Example 3 with DescribeLogDirsResponse

use of org.apache.kafka.common.requests.DescribeLogDirsResponse in project kafka by apache.

the class KafkaAdminClientTest method testDescribeReplicaLogDirsUnexpected.

@Test
public void testDescribeReplicaLogDirsUnexpected() throws ExecutionException, InterruptedException {
    TopicPartitionReplica expected = new TopicPartitionReplica("topic", 12, 1);
    TopicPartitionReplica unexpected = new TopicPartitionReplica("topic", 12, 2);
    try (AdminClientUnitTestEnv env = mockClientEnv()) {
        env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
        String broker1log0 = "/var/data/kafka0";
        String broker1log1 = "/var/data/kafka1";
        int broker1Log0PartitionSize = 987654321;
        int broker1Log0OffsetLag = 24;
        int broker1Log1PartitionSize = 123456789;
        int broker1Log1OffsetLag = 4321;
        env.kafkaClient().prepareResponseFrom(new DescribeLogDirsResponse(new DescribeLogDirsResponseData().setResults(asList(prepareDescribeLogDirsResult(expected, broker1log0, broker1Log0PartitionSize, broker1Log0OffsetLag, false), prepareDescribeLogDirsResult(unexpected, broker1log1, broker1Log1PartitionSize, broker1Log1OffsetLag, true)))), env.cluster().nodeById(expected.brokerId()));
        DescribeReplicaLogDirsResult result = env.adminClient().describeReplicaLogDirs(asList(expected));
        Map<TopicPartitionReplica, KafkaFuture<DescribeReplicaLogDirsResult.ReplicaLogDirInfo>> values = result.values();
        assertEquals(TestUtils.toSet(asList(expected)), values.keySet());
        assertNotNull(values.get(expected));
        assertEquals(broker1log0, values.get(expected).get().getCurrentReplicaLogDir());
        assertEquals(broker1Log0OffsetLag, values.get(expected).get().getCurrentReplicaOffsetLag());
        assertEquals(broker1log1, values.get(expected).get().getFutureReplicaLogDir());
        assertEquals(broker1Log1OffsetLag, values.get(expected).get().getFutureReplicaOffsetLag());
    }
}
Also used : TopicPartitionReplica(org.apache.kafka.common.TopicPartitionReplica) KafkaFuture(org.apache.kafka.common.KafkaFuture) DescribeLogDirsResponseData(org.apache.kafka.common.message.DescribeLogDirsResponseData) DescribeLogDirsResponse(org.apache.kafka.common.requests.DescribeLogDirsResponse) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Test(org.junit.jupiter.api.Test)

Example 4 with DescribeLogDirsResponse

use of org.apache.kafka.common.requests.DescribeLogDirsResponse in project apache-kafka-on-k8s by banzaicloud.

the class KafkaAdminClient method describeReplicaLogDirs.

@Override
public DescribeReplicaLogDirsResult describeReplicaLogDirs(Collection<TopicPartitionReplica> replicas, DescribeReplicaLogDirsOptions options) {
    final Map<TopicPartitionReplica, KafkaFutureImpl<DescribeReplicaLogDirsResult.ReplicaLogDirInfo>> futures = new HashMap<>(replicas.size());
    for (TopicPartitionReplica replica : replicas) {
        futures.put(replica, new KafkaFutureImpl<DescribeReplicaLogDirsResult.ReplicaLogDirInfo>());
    }
    Map<Integer, Set<TopicPartition>> partitionsByBroker = new HashMap<>();
    for (TopicPartitionReplica replica : replicas) {
        if (!partitionsByBroker.containsKey(replica.brokerId()))
            partitionsByBroker.put(replica.brokerId(), new HashSet<TopicPartition>());
        partitionsByBroker.get(replica.brokerId()).add(new TopicPartition(replica.topic(), replica.partition()));
    }
    final long now = time.milliseconds();
    for (Map.Entry<Integer, Set<TopicPartition>> entry : partitionsByBroker.entrySet()) {
        final int brokerId = entry.getKey();
        final Set<TopicPartition> topicPartitions = entry.getValue();
        final Map<TopicPartition, ReplicaLogDirInfo> replicaDirInfoByPartition = new HashMap<>();
        for (TopicPartition topicPartition : topicPartitions) replicaDirInfoByPartition.put(topicPartition, new ReplicaLogDirInfo());
        runnable.call(new Call("describeReplicaLogDirs", calcDeadlineMs(now, options.timeoutMs()), new ConstantNodeIdProvider(brokerId)) {

            @Override
            public AbstractRequest.Builder createRequest(int timeoutMs) {
                // Query selected partitions in all log directories
                return new DescribeLogDirsRequest.Builder(topicPartitions);
            }

            @Override
            public void handleResponse(AbstractResponse abstractResponse) {
                DescribeLogDirsResponse response = (DescribeLogDirsResponse) abstractResponse;
                for (Map.Entry<String, DescribeLogDirsResponse.LogDirInfo> responseEntry : response.logDirInfos().entrySet()) {
                    String logDir = responseEntry.getKey();
                    DescribeLogDirsResponse.LogDirInfo logDirInfo = responseEntry.getValue();
                    // No replica info will be provided if the log directory is offline
                    if (logDirInfo.error == Errors.KAFKA_STORAGE_ERROR)
                        continue;
                    if (logDirInfo.error != Errors.NONE)
                        handleFailure(new IllegalStateException("The error " + logDirInfo.error + " for log directory " + logDir + " in the response from broker " + brokerId + " is illegal"));
                    for (Map.Entry<TopicPartition, DescribeLogDirsResponse.ReplicaInfo> replicaInfoEntry : logDirInfo.replicaInfos.entrySet()) {
                        TopicPartition tp = replicaInfoEntry.getKey();
                        DescribeLogDirsResponse.ReplicaInfo replicaInfo = replicaInfoEntry.getValue();
                        ReplicaLogDirInfo replicaLogDirInfo = replicaDirInfoByPartition.get(tp);
                        if (replicaLogDirInfo == null) {
                            handleFailure(new IllegalStateException("The partition " + tp + " in the response from broker " + brokerId + " is not in the request"));
                        } else if (replicaInfo.isFuture) {
                            replicaDirInfoByPartition.put(tp, new ReplicaLogDirInfo(replicaLogDirInfo.getCurrentReplicaLogDir(), replicaLogDirInfo.getCurrentReplicaOffsetLag(), logDir, replicaInfo.offsetLag));
                        } else {
                            replicaDirInfoByPartition.put(tp, new ReplicaLogDirInfo(logDir, replicaInfo.offsetLag, replicaLogDirInfo.getFutureReplicaLogDir(), replicaLogDirInfo.getFutureReplicaOffsetLag()));
                        }
                    }
                }
                for (Map.Entry<TopicPartition, ReplicaLogDirInfo> entry : replicaDirInfoByPartition.entrySet()) {
                    TopicPartition tp = entry.getKey();
                    KafkaFutureImpl<ReplicaLogDirInfo> future = futures.get(new TopicPartitionReplica(tp.topic(), tp.partition(), brokerId));
                    future.complete(entry.getValue());
                }
            }

            @Override
            void handleFailure(Throwable throwable) {
                completeAllExceptionally(futures.values(), throwable);
            }
        }, now);
    }
    return new DescribeReplicaLogDirsResult(new HashMap<TopicPartitionReplica, KafkaFuture<ReplicaLogDirInfo>>(futures));
}
Also used : ReplicaLogDirInfo(org.apache.kafka.clients.admin.DescribeReplicaLogDirsResult.ReplicaLogDirInfo) Set(java.util.Set) HashSet(java.util.HashSet) HashMap(java.util.HashMap) ChannelBuilder(org.apache.kafka.common.network.ChannelBuilder) ReplicaLogDirInfo(org.apache.kafka.clients.admin.DescribeReplicaLogDirsResult.ReplicaLogDirInfo) TopicPartitionReplica(org.apache.kafka.common.TopicPartitionReplica) HashSet(java.util.HashSet) KafkaFuture(org.apache.kafka.common.KafkaFuture) AbstractResponse(org.apache.kafka.common.requests.AbstractResponse) DescribeLogDirsResponse(org.apache.kafka.common.requests.DescribeLogDirsResponse) KafkaFutureImpl(org.apache.kafka.common.internals.KafkaFutureImpl) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TopicPartition(org.apache.kafka.common.TopicPartition) DescribeLogDirsRequest(org.apache.kafka.common.requests.DescribeLogDirsRequest) Map(java.util.Map) HashMap(java.util.HashMap)

Example 5 with DescribeLogDirsResponse

use of org.apache.kafka.common.requests.DescribeLogDirsResponse in project apache-kafka-on-k8s by banzaicloud.

the class KafkaAdminClient method describeLogDirs.

@Override
public DescribeLogDirsResult describeLogDirs(Collection<Integer> brokers, DescribeLogDirsOptions options) {
    final Map<Integer, KafkaFutureImpl<Map<String, DescribeLogDirsResponse.LogDirInfo>>> futures = new HashMap<>(brokers.size());
    for (Integer brokerId : brokers) {
        futures.put(brokerId, new KafkaFutureImpl<Map<String, DescribeLogDirsResponse.LogDirInfo>>());
    }
    final long now = time.milliseconds();
    for (final Integer brokerId : brokers) {
        runnable.call(new Call("describeLogDirs", calcDeadlineMs(now, options.timeoutMs()), new ConstantNodeIdProvider(brokerId)) {

            @Override
            public AbstractRequest.Builder createRequest(int timeoutMs) {
                // Query selected partitions in all log directories
                return new DescribeLogDirsRequest.Builder(null);
            }

            @Override
            public void handleResponse(AbstractResponse abstractResponse) {
                DescribeLogDirsResponse response = (DescribeLogDirsResponse) abstractResponse;
                KafkaFutureImpl<Map<String, DescribeLogDirsResponse.LogDirInfo>> future = futures.get(brokerId);
                if (response.logDirInfos().size() > 0) {
                    future.complete(response.logDirInfos());
                } else {
                    // response.logDirInfos() will be empty if and only if the user is not authorized to describe clsuter resource.
                    future.completeExceptionally(Errors.CLUSTER_AUTHORIZATION_FAILED.exception());
                }
            }

            @Override
            void handleFailure(Throwable throwable) {
                completeAllExceptionally(futures.values(), throwable);
            }
        }, now);
    }
    return new DescribeLogDirsResult(new HashMap<Integer, KafkaFuture<Map<String, DescribeLogDirsResponse.LogDirInfo>>>(futures));
}
Also used : KafkaFuture(org.apache.kafka.common.KafkaFuture) HashMap(java.util.HashMap) AbstractResponse(org.apache.kafka.common.requests.AbstractResponse) ReplicaLogDirInfo(org.apache.kafka.clients.admin.DescribeReplicaLogDirsResult.ReplicaLogDirInfo) ChannelBuilder(org.apache.kafka.common.network.ChannelBuilder) DescribeLogDirsResponse(org.apache.kafka.common.requests.DescribeLogDirsResponse) KafkaFutureImpl(org.apache.kafka.common.internals.KafkaFutureImpl) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) DescribeLogDirsRequest(org.apache.kafka.common.requests.DescribeLogDirsRequest) Map(java.util.Map) HashMap(java.util.HashMap)

Aggregations

DescribeLogDirsResponse (org.apache.kafka.common.requests.DescribeLogDirsResponse)6 HashMap (java.util.HashMap)4 Map (java.util.Map)4 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)4 KafkaFuture (org.apache.kafka.common.KafkaFuture)4 TopicPartitionReplica (org.apache.kafka.common.TopicPartitionReplica)4 KafkaFutureImpl (org.apache.kafka.common.internals.KafkaFutureImpl)4 ChannelBuilder (org.apache.kafka.common.network.ChannelBuilder)4 AbstractResponse (org.apache.kafka.common.requests.AbstractResponse)4 DescribeLogDirsRequest (org.apache.kafka.common.requests.DescribeLogDirsRequest)4 ReplicaLogDirInfo (org.apache.kafka.clients.admin.DescribeReplicaLogDirsResult.ReplicaLogDirInfo)3 TreeMap (java.util.TreeMap)2 TopicPartition (org.apache.kafka.common.TopicPartition)2 DescribeLogDirsRequestData (org.apache.kafka.common.message.DescribeLogDirsRequestData)2 DescribeLogDirsResponseData (org.apache.kafka.common.message.DescribeLogDirsResponseData)2 Test (org.junit.jupiter.api.Test)2 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)2 ArrayList (java.util.ArrayList)1 HashSet (java.util.HashSet)1 Set (java.util.Set)1