use of org.apache.kafka.common.TopicPartitionReplica in project apache-kafka-on-k8s by banzaicloud.
the class KafkaAdminClient method describeReplicaLogDirs.
@Override
public DescribeReplicaLogDirsResult describeReplicaLogDirs(Collection<TopicPartitionReplica> replicas, DescribeReplicaLogDirsOptions options) {
final Map<TopicPartitionReplica, KafkaFutureImpl<DescribeReplicaLogDirsResult.ReplicaLogDirInfo>> futures = new HashMap<>(replicas.size());
for (TopicPartitionReplica replica : replicas) {
futures.put(replica, new KafkaFutureImpl<DescribeReplicaLogDirsResult.ReplicaLogDirInfo>());
}
Map<Integer, Set<TopicPartition>> partitionsByBroker = new HashMap<>();
for (TopicPartitionReplica replica : replicas) {
if (!partitionsByBroker.containsKey(replica.brokerId()))
partitionsByBroker.put(replica.brokerId(), new HashSet<TopicPartition>());
partitionsByBroker.get(replica.brokerId()).add(new TopicPartition(replica.topic(), replica.partition()));
}
final long now = time.milliseconds();
for (Map.Entry<Integer, Set<TopicPartition>> entry : partitionsByBroker.entrySet()) {
final int brokerId = entry.getKey();
final Set<TopicPartition> topicPartitions = entry.getValue();
final Map<TopicPartition, ReplicaLogDirInfo> replicaDirInfoByPartition = new HashMap<>();
for (TopicPartition topicPartition : topicPartitions) replicaDirInfoByPartition.put(topicPartition, new ReplicaLogDirInfo());
runnable.call(new Call("describeReplicaLogDirs", calcDeadlineMs(now, options.timeoutMs()), new ConstantNodeIdProvider(brokerId)) {
@Override
public AbstractRequest.Builder createRequest(int timeoutMs) {
// Query selected partitions in all log directories
return new DescribeLogDirsRequest.Builder(topicPartitions);
}
@Override
public void handleResponse(AbstractResponse abstractResponse) {
DescribeLogDirsResponse response = (DescribeLogDirsResponse) abstractResponse;
for (Map.Entry<String, DescribeLogDirsResponse.LogDirInfo> responseEntry : response.logDirInfos().entrySet()) {
String logDir = responseEntry.getKey();
DescribeLogDirsResponse.LogDirInfo logDirInfo = responseEntry.getValue();
// No replica info will be provided if the log directory is offline
if (logDirInfo.error == Errors.KAFKA_STORAGE_ERROR)
continue;
if (logDirInfo.error != Errors.NONE)
handleFailure(new IllegalStateException("The error " + logDirInfo.error + " for log directory " + logDir + " in the response from broker " + brokerId + " is illegal"));
for (Map.Entry<TopicPartition, DescribeLogDirsResponse.ReplicaInfo> replicaInfoEntry : logDirInfo.replicaInfos.entrySet()) {
TopicPartition tp = replicaInfoEntry.getKey();
DescribeLogDirsResponse.ReplicaInfo replicaInfo = replicaInfoEntry.getValue();
ReplicaLogDirInfo replicaLogDirInfo = replicaDirInfoByPartition.get(tp);
if (replicaLogDirInfo == null) {
handleFailure(new IllegalStateException("The partition " + tp + " in the response from broker " + brokerId + " is not in the request"));
} else if (replicaInfo.isFuture) {
replicaDirInfoByPartition.put(tp, new ReplicaLogDirInfo(replicaLogDirInfo.getCurrentReplicaLogDir(), replicaLogDirInfo.getCurrentReplicaOffsetLag(), logDir, replicaInfo.offsetLag));
} else {
replicaDirInfoByPartition.put(tp, new ReplicaLogDirInfo(logDir, replicaInfo.offsetLag, replicaLogDirInfo.getFutureReplicaLogDir(), replicaLogDirInfo.getFutureReplicaOffsetLag()));
}
}
}
for (Map.Entry<TopicPartition, ReplicaLogDirInfo> entry : replicaDirInfoByPartition.entrySet()) {
TopicPartition tp = entry.getKey();
KafkaFutureImpl<ReplicaLogDirInfo> future = futures.get(new TopicPartitionReplica(tp.topic(), tp.partition(), brokerId));
future.complete(entry.getValue());
}
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(futures.values(), throwable);
}
}, now);
}
return new DescribeReplicaLogDirsResult(new HashMap<TopicPartitionReplica, KafkaFuture<ReplicaLogDirInfo>>(futures));
}
use of org.apache.kafka.common.TopicPartitionReplica in project kafka by apache.
the class KafkaAdminClientTest method testAlterReplicaLogDirsSuccess.
@Test
public void testAlterReplicaLogDirsSuccess() throws Exception {
try (AdminClientUnitTestEnv env = mockClientEnv()) {
createAlterLogDirsResponse(env, env.cluster().nodeById(0), Errors.NONE, 0);
createAlterLogDirsResponse(env, env.cluster().nodeById(1), Errors.NONE, 0);
TopicPartitionReplica tpr0 = new TopicPartitionReplica("topic", 0, 0);
TopicPartitionReplica tpr1 = new TopicPartitionReplica("topic", 0, 1);
Map<TopicPartitionReplica, String> logDirs = new HashMap<>();
logDirs.put(tpr0, "/data0");
logDirs.put(tpr1, "/data1");
AlterReplicaLogDirsResult result = env.adminClient().alterReplicaLogDirs(logDirs);
assertNull(result.values().get(tpr0).get());
assertNull(result.values().get(tpr1).get());
}
}
use of org.apache.kafka.common.TopicPartitionReplica in project kafka by apache.
the class KafkaAdminClientTest method testAlterReplicaLogDirsPartialResponse.
@Test
public void testAlterReplicaLogDirsPartialResponse() throws Exception {
try (AdminClientUnitTestEnv env = mockClientEnv()) {
createAlterLogDirsResponse(env, env.cluster().nodeById(0), Errors.NONE, 1);
TopicPartitionReplica tpr1 = new TopicPartitionReplica("topic", 1, 0);
TopicPartitionReplica tpr2 = new TopicPartitionReplica("topic", 2, 0);
Map<TopicPartitionReplica, String> logDirs = new HashMap<>();
logDirs.put(tpr1, "/data1");
logDirs.put(tpr2, "/data1");
AlterReplicaLogDirsResult result = env.adminClient().alterReplicaLogDirs(logDirs);
assertNull(result.values().get(tpr1).get());
TestUtils.assertFutureThrows(result.values().get(tpr2), ApiException.class);
}
}
use of org.apache.kafka.common.TopicPartitionReplica in project kafka by apache.
the class KafkaAdminClientTest method testAlterReplicaLogDirsUnrequested.
@Test
public void testAlterReplicaLogDirsUnrequested() throws Exception {
try (AdminClientUnitTestEnv env = mockClientEnv()) {
createAlterLogDirsResponse(env, env.cluster().nodeById(0), Errors.NONE, 1, 2);
TopicPartitionReplica tpr1 = new TopicPartitionReplica("topic", 1, 0);
Map<TopicPartitionReplica, String> logDirs = new HashMap<>();
logDirs.put(tpr1, "/data1");
AlterReplicaLogDirsResult result = env.adminClient().alterReplicaLogDirs(logDirs);
assertNull(result.values().get(tpr1).get());
}
}
use of org.apache.kafka.common.TopicPartitionReplica in project kafka by apache.
the class KafkaAdminClientTest method testAlterReplicaLogDirsPartialFailure.
@Test
public void testAlterReplicaLogDirsPartialFailure() throws Exception {
long defaultApiTimeout = 60000;
MockTime time = new MockTime();
try (AdminClientUnitTestEnv env = mockClientEnv(time, AdminClientConfig.RETRIES_CONFIG, "0")) {
// Provide only one prepared response from node 1
env.kafkaClient().prepareResponseFrom(prepareAlterLogDirsResponse(Errors.NONE, "topic", 2), env.cluster().nodeById(1));
TopicPartitionReplica tpr1 = new TopicPartitionReplica("topic", 1, 0);
TopicPartitionReplica tpr2 = new TopicPartitionReplica("topic", 2, 1);
Map<TopicPartitionReplica, String> logDirs = new HashMap<>();
logDirs.put(tpr1, "/data1");
logDirs.put(tpr2, "/data1");
AlterReplicaLogDirsResult result = env.adminClient().alterReplicaLogDirs(logDirs);
// Wait until the prepared attempt has been consumed
TestUtils.waitForCondition(() -> env.kafkaClient().numAwaitingResponses() == 0, "Failed awaiting requests");
// Wait until the request is sent out
TestUtils.waitForCondition(() -> env.kafkaClient().inFlightRequestCount() == 1, "Failed awaiting request");
// Advance time past the default api timeout to time out the inflight request
time.sleep(defaultApiTimeout + 1);
TestUtils.assertFutureThrows(result.values().get(tpr1), ApiException.class);
assertNull(result.values().get(tpr2).get());
}
}
Aggregations