Search in sources :

Example 6 with TopicPartitionReplica

use of org.apache.kafka.common.TopicPartitionReplica in project apache-kafka-on-k8s by banzaicloud.

the class KafkaAdminClient method describeReplicaLogDirs.

@Override
public DescribeReplicaLogDirsResult describeReplicaLogDirs(Collection<TopicPartitionReplica> replicas, DescribeReplicaLogDirsOptions options) {
    final Map<TopicPartitionReplica, KafkaFutureImpl<DescribeReplicaLogDirsResult.ReplicaLogDirInfo>> futures = new HashMap<>(replicas.size());
    for (TopicPartitionReplica replica : replicas) {
        futures.put(replica, new KafkaFutureImpl<DescribeReplicaLogDirsResult.ReplicaLogDirInfo>());
    }
    Map<Integer, Set<TopicPartition>> partitionsByBroker = new HashMap<>();
    for (TopicPartitionReplica replica : replicas) {
        if (!partitionsByBroker.containsKey(replica.brokerId()))
            partitionsByBroker.put(replica.brokerId(), new HashSet<TopicPartition>());
        partitionsByBroker.get(replica.brokerId()).add(new TopicPartition(replica.topic(), replica.partition()));
    }
    final long now = time.milliseconds();
    for (Map.Entry<Integer, Set<TopicPartition>> entry : partitionsByBroker.entrySet()) {
        final int brokerId = entry.getKey();
        final Set<TopicPartition> topicPartitions = entry.getValue();
        final Map<TopicPartition, ReplicaLogDirInfo> replicaDirInfoByPartition = new HashMap<>();
        for (TopicPartition topicPartition : topicPartitions) replicaDirInfoByPartition.put(topicPartition, new ReplicaLogDirInfo());
        runnable.call(new Call("describeReplicaLogDirs", calcDeadlineMs(now, options.timeoutMs()), new ConstantNodeIdProvider(brokerId)) {

            @Override
            public AbstractRequest.Builder createRequest(int timeoutMs) {
                // Query selected partitions in all log directories
                return new DescribeLogDirsRequest.Builder(topicPartitions);
            }

            @Override
            public void handleResponse(AbstractResponse abstractResponse) {
                DescribeLogDirsResponse response = (DescribeLogDirsResponse) abstractResponse;
                for (Map.Entry<String, DescribeLogDirsResponse.LogDirInfo> responseEntry : response.logDirInfos().entrySet()) {
                    String logDir = responseEntry.getKey();
                    DescribeLogDirsResponse.LogDirInfo logDirInfo = responseEntry.getValue();
                    // No replica info will be provided if the log directory is offline
                    if (logDirInfo.error == Errors.KAFKA_STORAGE_ERROR)
                        continue;
                    if (logDirInfo.error != Errors.NONE)
                        handleFailure(new IllegalStateException("The error " + logDirInfo.error + " for log directory " + logDir + " in the response from broker " + brokerId + " is illegal"));
                    for (Map.Entry<TopicPartition, DescribeLogDirsResponse.ReplicaInfo> replicaInfoEntry : logDirInfo.replicaInfos.entrySet()) {
                        TopicPartition tp = replicaInfoEntry.getKey();
                        DescribeLogDirsResponse.ReplicaInfo replicaInfo = replicaInfoEntry.getValue();
                        ReplicaLogDirInfo replicaLogDirInfo = replicaDirInfoByPartition.get(tp);
                        if (replicaLogDirInfo == null) {
                            handleFailure(new IllegalStateException("The partition " + tp + " in the response from broker " + brokerId + " is not in the request"));
                        } else if (replicaInfo.isFuture) {
                            replicaDirInfoByPartition.put(tp, new ReplicaLogDirInfo(replicaLogDirInfo.getCurrentReplicaLogDir(), replicaLogDirInfo.getCurrentReplicaOffsetLag(), logDir, replicaInfo.offsetLag));
                        } else {
                            replicaDirInfoByPartition.put(tp, new ReplicaLogDirInfo(logDir, replicaInfo.offsetLag, replicaLogDirInfo.getFutureReplicaLogDir(), replicaLogDirInfo.getFutureReplicaOffsetLag()));
                        }
                    }
                }
                for (Map.Entry<TopicPartition, ReplicaLogDirInfo> entry : replicaDirInfoByPartition.entrySet()) {
                    TopicPartition tp = entry.getKey();
                    KafkaFutureImpl<ReplicaLogDirInfo> future = futures.get(new TopicPartitionReplica(tp.topic(), tp.partition(), brokerId));
                    future.complete(entry.getValue());
                }
            }

            @Override
            void handleFailure(Throwable throwable) {
                completeAllExceptionally(futures.values(), throwable);
            }
        }, now);
    }
    return new DescribeReplicaLogDirsResult(new HashMap<TopicPartitionReplica, KafkaFuture<ReplicaLogDirInfo>>(futures));
}
Also used : ReplicaLogDirInfo(org.apache.kafka.clients.admin.DescribeReplicaLogDirsResult.ReplicaLogDirInfo) Set(java.util.Set) HashSet(java.util.HashSet) HashMap(java.util.HashMap) ChannelBuilder(org.apache.kafka.common.network.ChannelBuilder) ReplicaLogDirInfo(org.apache.kafka.clients.admin.DescribeReplicaLogDirsResult.ReplicaLogDirInfo) TopicPartitionReplica(org.apache.kafka.common.TopicPartitionReplica) HashSet(java.util.HashSet) KafkaFuture(org.apache.kafka.common.KafkaFuture) AbstractResponse(org.apache.kafka.common.requests.AbstractResponse) DescribeLogDirsResponse(org.apache.kafka.common.requests.DescribeLogDirsResponse) KafkaFutureImpl(org.apache.kafka.common.internals.KafkaFutureImpl) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TopicPartition(org.apache.kafka.common.TopicPartition) DescribeLogDirsRequest(org.apache.kafka.common.requests.DescribeLogDirsRequest) Map(java.util.Map) HashMap(java.util.HashMap)

Example 7 with TopicPartitionReplica

use of org.apache.kafka.common.TopicPartitionReplica in project kafka by apache.

the class KafkaAdminClientTest method testAlterReplicaLogDirsSuccess.

@Test
public void testAlterReplicaLogDirsSuccess() throws Exception {
    try (AdminClientUnitTestEnv env = mockClientEnv()) {
        createAlterLogDirsResponse(env, env.cluster().nodeById(0), Errors.NONE, 0);
        createAlterLogDirsResponse(env, env.cluster().nodeById(1), Errors.NONE, 0);
        TopicPartitionReplica tpr0 = new TopicPartitionReplica("topic", 0, 0);
        TopicPartitionReplica tpr1 = new TopicPartitionReplica("topic", 0, 1);
        Map<TopicPartitionReplica, String> logDirs = new HashMap<>();
        logDirs.put(tpr0, "/data0");
        logDirs.put(tpr1, "/data1");
        AlterReplicaLogDirsResult result = env.adminClient().alterReplicaLogDirs(logDirs);
        assertNull(result.values().get(tpr0).get());
        assertNull(result.values().get(tpr1).get());
    }
}
Also used : TopicPartitionReplica(org.apache.kafka.common.TopicPartitionReplica) HashMap(java.util.HashMap) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Test(org.junit.jupiter.api.Test)

Example 8 with TopicPartitionReplica

use of org.apache.kafka.common.TopicPartitionReplica in project kafka by apache.

the class KafkaAdminClientTest method testAlterReplicaLogDirsPartialResponse.

@Test
public void testAlterReplicaLogDirsPartialResponse() throws Exception {
    try (AdminClientUnitTestEnv env = mockClientEnv()) {
        createAlterLogDirsResponse(env, env.cluster().nodeById(0), Errors.NONE, 1);
        TopicPartitionReplica tpr1 = new TopicPartitionReplica("topic", 1, 0);
        TopicPartitionReplica tpr2 = new TopicPartitionReplica("topic", 2, 0);
        Map<TopicPartitionReplica, String> logDirs = new HashMap<>();
        logDirs.put(tpr1, "/data1");
        logDirs.put(tpr2, "/data1");
        AlterReplicaLogDirsResult result = env.adminClient().alterReplicaLogDirs(logDirs);
        assertNull(result.values().get(tpr1).get());
        TestUtils.assertFutureThrows(result.values().get(tpr2), ApiException.class);
    }
}
Also used : TopicPartitionReplica(org.apache.kafka.common.TopicPartitionReplica) HashMap(java.util.HashMap) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Test(org.junit.jupiter.api.Test)

Example 9 with TopicPartitionReplica

use of org.apache.kafka.common.TopicPartitionReplica in project kafka by apache.

the class KafkaAdminClientTest method testAlterReplicaLogDirsUnrequested.

@Test
public void testAlterReplicaLogDirsUnrequested() throws Exception {
    try (AdminClientUnitTestEnv env = mockClientEnv()) {
        createAlterLogDirsResponse(env, env.cluster().nodeById(0), Errors.NONE, 1, 2);
        TopicPartitionReplica tpr1 = new TopicPartitionReplica("topic", 1, 0);
        Map<TopicPartitionReplica, String> logDirs = new HashMap<>();
        logDirs.put(tpr1, "/data1");
        AlterReplicaLogDirsResult result = env.adminClient().alterReplicaLogDirs(logDirs);
        assertNull(result.values().get(tpr1).get());
    }
}
Also used : TopicPartitionReplica(org.apache.kafka.common.TopicPartitionReplica) HashMap(java.util.HashMap) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Test(org.junit.jupiter.api.Test)

Example 10 with TopicPartitionReplica

use of org.apache.kafka.common.TopicPartitionReplica in project kafka by apache.

the class KafkaAdminClientTest method testAlterReplicaLogDirsPartialFailure.

@Test
public void testAlterReplicaLogDirsPartialFailure() throws Exception {
    long defaultApiTimeout = 60000;
    MockTime time = new MockTime();
    try (AdminClientUnitTestEnv env = mockClientEnv(time, AdminClientConfig.RETRIES_CONFIG, "0")) {
        // Provide only one prepared response from node 1
        env.kafkaClient().prepareResponseFrom(prepareAlterLogDirsResponse(Errors.NONE, "topic", 2), env.cluster().nodeById(1));
        TopicPartitionReplica tpr1 = new TopicPartitionReplica("topic", 1, 0);
        TopicPartitionReplica tpr2 = new TopicPartitionReplica("topic", 2, 1);
        Map<TopicPartitionReplica, String> logDirs = new HashMap<>();
        logDirs.put(tpr1, "/data1");
        logDirs.put(tpr2, "/data1");
        AlterReplicaLogDirsResult result = env.adminClient().alterReplicaLogDirs(logDirs);
        // Wait until the prepared attempt has been consumed
        TestUtils.waitForCondition(() -> env.kafkaClient().numAwaitingResponses() == 0, "Failed awaiting requests");
        // Wait until the request is sent out
        TestUtils.waitForCondition(() -> env.kafkaClient().inFlightRequestCount() == 1, "Failed awaiting request");
        // Advance time past the default api timeout to time out the inflight request
        time.sleep(defaultApiTimeout + 1);
        TestUtils.assertFutureThrows(result.values().get(tpr1), ApiException.class);
        assertNull(result.values().get(tpr2).get());
    }
}
Also used : TopicPartitionReplica(org.apache.kafka.common.TopicPartitionReplica) HashMap(java.util.HashMap) MockTime(org.apache.kafka.common.utils.MockTime) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Test(org.junit.jupiter.api.Test)

Aggregations

TopicPartitionReplica (org.apache.kafka.common.TopicPartitionReplica)13 HashMap (java.util.HashMap)11 KafkaFuture (org.apache.kafka.common.KafkaFuture)7 Test (org.junit.jupiter.api.Test)7 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)7 KafkaFutureImpl (org.apache.kafka.common.internals.KafkaFutureImpl)6 Map (java.util.Map)5 ReplicaLogDirInfo (org.apache.kafka.clients.admin.DescribeReplicaLogDirsResult.ReplicaLogDirInfo)5 DescribeLogDirsResponse (org.apache.kafka.common.requests.DescribeLogDirsResponse)5 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)4 TopicPartition (org.apache.kafka.common.TopicPartition)4 ChannelBuilder (org.apache.kafka.common.network.ChannelBuilder)4 AbstractResponse (org.apache.kafka.common.requests.AbstractResponse)4 KafkaStorageException (org.apache.kafka.common.errors.KafkaStorageException)3 DescribeLogDirsResponseData (org.apache.kafka.common.message.DescribeLogDirsResponseData)3 DescribeLogDirsRequest (org.apache.kafka.common.requests.DescribeLogDirsRequest)3 ArrayList (java.util.ArrayList)2 HashSet (java.util.HashSet)2 Set (java.util.Set)2 TreeMap (java.util.TreeMap)2