use of org.apache.kafka.common.TopicPartitionReplica in project apache-kafka-on-k8s by banzaicloud.
the class KafkaAdminClient method alterReplicaLogDirs.
@Override
public AlterReplicaLogDirsResult alterReplicaLogDirs(Map<TopicPartitionReplica, String> replicaAssignment, final AlterReplicaLogDirsOptions options) {
final Map<TopicPartitionReplica, KafkaFutureImpl<Void>> futures = new HashMap<>(replicaAssignment.size());
for (TopicPartitionReplica replica : replicaAssignment.keySet()) futures.put(replica, new KafkaFutureImpl<Void>());
Map<Integer, Map<TopicPartition, String>> replicaAssignmentByBroker = new HashMap<>();
for (Map.Entry<TopicPartitionReplica, String> entry : replicaAssignment.entrySet()) {
TopicPartitionReplica replica = entry.getKey();
String logDir = entry.getValue();
int brokerId = replica.brokerId();
TopicPartition topicPartition = new TopicPartition(replica.topic(), replica.partition());
if (!replicaAssignmentByBroker.containsKey(brokerId))
replicaAssignmentByBroker.put(brokerId, new HashMap<TopicPartition, String>());
replicaAssignmentByBroker.get(brokerId).put(topicPartition, logDir);
}
final long now = time.milliseconds();
for (Map.Entry<Integer, Map<TopicPartition, String>> entry : replicaAssignmentByBroker.entrySet()) {
final int brokerId = entry.getKey();
final Map<TopicPartition, String> assignment = entry.getValue();
runnable.call(new Call("alterReplicaLogDirs", calcDeadlineMs(now, options.timeoutMs()), new ConstantNodeIdProvider(brokerId)) {
@Override
public AbstractRequest.Builder createRequest(int timeoutMs) {
return new AlterReplicaLogDirsRequest.Builder(assignment);
}
@Override
public void handleResponse(AbstractResponse abstractResponse) {
AlterReplicaLogDirsResponse response = (AlterReplicaLogDirsResponse) abstractResponse;
for (Map.Entry<TopicPartition, Errors> responseEntry : response.responses().entrySet()) {
TopicPartition tp = responseEntry.getKey();
Errors error = responseEntry.getValue();
TopicPartitionReplica replica = new TopicPartitionReplica(tp.topic(), tp.partition(), brokerId);
KafkaFutureImpl<Void> future = futures.get(replica);
if (future == null) {
handleFailure(new IllegalStateException("The partition " + tp + " in the response from broker " + brokerId + " is not in the request"));
} else if (error == Errors.NONE) {
future.complete(null);
} else {
future.completeExceptionally(error.exception());
}
}
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(futures.values(), throwable);
}
}, now);
}
return new AlterReplicaLogDirsResult(new HashMap<TopicPartitionReplica, KafkaFuture<Void>>(futures));
}
use of org.apache.kafka.common.TopicPartitionReplica in project kafka by apache.
the class KafkaAdminClient method describeReplicaLogDirs.
@Override
public DescribeReplicaLogDirsResult describeReplicaLogDirs(Collection<TopicPartitionReplica> replicas, DescribeReplicaLogDirsOptions options) {
final Map<TopicPartitionReplica, KafkaFutureImpl<DescribeReplicaLogDirsResult.ReplicaLogDirInfo>> futures = new HashMap<>(replicas.size());
for (TopicPartitionReplica replica : replicas) {
futures.put(replica, new KafkaFutureImpl<>());
}
Map<Integer, DescribeLogDirsRequestData> partitionsByBroker = new HashMap<>();
for (TopicPartitionReplica replica : replicas) {
DescribeLogDirsRequestData requestData = partitionsByBroker.computeIfAbsent(replica.brokerId(), brokerId -> new DescribeLogDirsRequestData());
DescribableLogDirTopic describableLogDirTopic = requestData.topics().find(replica.topic());
if (describableLogDirTopic == null) {
List<Integer> partitions = new ArrayList<>();
partitions.add(replica.partition());
describableLogDirTopic = new DescribableLogDirTopic().setTopic(replica.topic()).setPartitions(partitions);
requestData.topics().add(describableLogDirTopic);
} else {
describableLogDirTopic.partitions().add(replica.partition());
}
}
final long now = time.milliseconds();
for (Map.Entry<Integer, DescribeLogDirsRequestData> entry : partitionsByBroker.entrySet()) {
final int brokerId = entry.getKey();
final DescribeLogDirsRequestData topicPartitions = entry.getValue();
final Map<TopicPartition, ReplicaLogDirInfo> replicaDirInfoByPartition = new HashMap<>();
for (DescribableLogDirTopic topicPartition : topicPartitions.topics()) {
for (Integer partitionId : topicPartition.partitions()) {
replicaDirInfoByPartition.put(new TopicPartition(topicPartition.topic(), partitionId), new ReplicaLogDirInfo());
}
}
runnable.call(new Call("describeReplicaLogDirs", calcDeadlineMs(now, options.timeoutMs()), new ConstantNodeIdProvider(brokerId)) {
@Override
public DescribeLogDirsRequest.Builder createRequest(int timeoutMs) {
// Query selected partitions in all log directories
return new DescribeLogDirsRequest.Builder(topicPartitions);
}
@Override
public void handleResponse(AbstractResponse abstractResponse) {
DescribeLogDirsResponse response = (DescribeLogDirsResponse) abstractResponse;
for (Map.Entry<String, LogDirDescription> responseEntry : logDirDescriptions(response).entrySet()) {
String logDir = responseEntry.getKey();
LogDirDescription logDirInfo = responseEntry.getValue();
// No replica info will be provided if the log directory is offline
if (logDirInfo.error() instanceof KafkaStorageException)
continue;
if (logDirInfo.error() != null)
handleFailure(new IllegalStateException("The error " + logDirInfo.error().getClass().getName() + " for log directory " + logDir + " in the response from broker " + brokerId + " is illegal"));
for (Map.Entry<TopicPartition, ReplicaInfo> replicaInfoEntry : logDirInfo.replicaInfos().entrySet()) {
TopicPartition tp = replicaInfoEntry.getKey();
ReplicaInfo replicaInfo = replicaInfoEntry.getValue();
ReplicaLogDirInfo replicaLogDirInfo = replicaDirInfoByPartition.get(tp);
if (replicaLogDirInfo == null) {
log.warn("Server response from broker {} mentioned unknown partition {}", brokerId, tp);
} else if (replicaInfo.isFuture()) {
replicaDirInfoByPartition.put(tp, new ReplicaLogDirInfo(replicaLogDirInfo.getCurrentReplicaLogDir(), replicaLogDirInfo.getCurrentReplicaOffsetLag(), logDir, replicaInfo.offsetLag()));
} else {
replicaDirInfoByPartition.put(tp, new ReplicaLogDirInfo(logDir, replicaInfo.offsetLag(), replicaLogDirInfo.getFutureReplicaLogDir(), replicaLogDirInfo.getFutureReplicaOffsetLag()));
}
}
}
for (Map.Entry<TopicPartition, ReplicaLogDirInfo> entry : replicaDirInfoByPartition.entrySet()) {
TopicPartition tp = entry.getKey();
KafkaFutureImpl<ReplicaLogDirInfo> future = futures.get(new TopicPartitionReplica(tp.topic(), tp.partition(), brokerId));
future.complete(entry.getValue());
}
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(futures.values(), throwable);
}
}, now);
}
return new DescribeReplicaLogDirsResult(new HashMap<>(futures));
}
use of org.apache.kafka.common.TopicPartitionReplica in project kafka by apache.
the class MockAdminClient method describeReplicaLogDirs.
@Override
public synchronized DescribeReplicaLogDirsResult describeReplicaLogDirs(Collection<TopicPartitionReplica> replicas, DescribeReplicaLogDirsOptions options) {
Map<TopicPartitionReplica, KafkaFuture<ReplicaLogDirInfo>> results = new HashMap<>();
for (TopicPartitionReplica replica : replicas) {
TopicMetadata topicMetadata = allTopics.get(replica.topic());
if (topicMetadata != null) {
KafkaFutureImpl<ReplicaLogDirInfo> future = new KafkaFutureImpl<>();
results.put(replica, future);
String currentLogDir = currentLogDir(replica);
if (currentLogDir == null) {
future.complete(new ReplicaLogDirInfo(null, DescribeLogDirsResponse.INVALID_OFFSET_LAG, null, DescribeLogDirsResponse.INVALID_OFFSET_LAG));
} else {
ReplicaLogDirInfo info = replicaMoves.get(replica);
if (info == null) {
future.complete(new ReplicaLogDirInfo(currentLogDir, 0, null, 0));
} else {
future.complete(info);
}
}
}
}
return new DescribeReplicaLogDirsResult(results);
}
use of org.apache.kafka.common.TopicPartitionReplica in project kafka by apache.
the class KafkaAdminClientTest method testAlterReplicaLogDirsLogDirNotFound.
@Test
public void testAlterReplicaLogDirsLogDirNotFound() throws Exception {
try (AdminClientUnitTestEnv env = mockClientEnv()) {
createAlterLogDirsResponse(env, env.cluster().nodeById(0), Errors.NONE, 0);
createAlterLogDirsResponse(env, env.cluster().nodeById(1), Errors.LOG_DIR_NOT_FOUND, 0);
TopicPartitionReplica tpr0 = new TopicPartitionReplica("topic", 0, 0);
TopicPartitionReplica tpr1 = new TopicPartitionReplica("topic", 0, 1);
Map<TopicPartitionReplica, String> logDirs = new HashMap<>();
logDirs.put(tpr0, "/data0");
logDirs.put(tpr1, "/data1");
AlterReplicaLogDirsResult result = env.adminClient().alterReplicaLogDirs(logDirs);
assertNull(result.values().get(tpr0).get());
TestUtils.assertFutureError(result.values().get(tpr1), LogDirNotFoundException.class);
}
}
use of org.apache.kafka.common.TopicPartitionReplica in project kafka by apache.
the class KafkaAdminClientTest method testDescribeReplicaLogDirsUnexpected.
@Test
public void testDescribeReplicaLogDirsUnexpected() throws ExecutionException, InterruptedException {
TopicPartitionReplica expected = new TopicPartitionReplica("topic", 12, 1);
TopicPartitionReplica unexpected = new TopicPartitionReplica("topic", 12, 2);
try (AdminClientUnitTestEnv env = mockClientEnv()) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
String broker1log0 = "/var/data/kafka0";
String broker1log1 = "/var/data/kafka1";
int broker1Log0PartitionSize = 987654321;
int broker1Log0OffsetLag = 24;
int broker1Log1PartitionSize = 123456789;
int broker1Log1OffsetLag = 4321;
env.kafkaClient().prepareResponseFrom(new DescribeLogDirsResponse(new DescribeLogDirsResponseData().setResults(asList(prepareDescribeLogDirsResult(expected, broker1log0, broker1Log0PartitionSize, broker1Log0OffsetLag, false), prepareDescribeLogDirsResult(unexpected, broker1log1, broker1Log1PartitionSize, broker1Log1OffsetLag, true)))), env.cluster().nodeById(expected.brokerId()));
DescribeReplicaLogDirsResult result = env.adminClient().describeReplicaLogDirs(asList(expected));
Map<TopicPartitionReplica, KafkaFuture<DescribeReplicaLogDirsResult.ReplicaLogDirInfo>> values = result.values();
assertEquals(TestUtils.toSet(asList(expected)), values.keySet());
assertNotNull(values.get(expected));
assertEquals(broker1log0, values.get(expected).get().getCurrentReplicaLogDir());
assertEquals(broker1Log0OffsetLag, values.get(expected).get().getCurrentReplicaOffsetLag());
assertEquals(broker1log1, values.get(expected).get().getFutureReplicaLogDir());
assertEquals(broker1Log1OffsetLag, values.get(expected).get().getFutureReplicaOffsetLag());
}
}
Aggregations