use of org.apache.kafka.common.KafkaFuture in project apache-kafka-on-k8s by banzaicloud.
the class MockAdminClient method describeTopics.
@Override
public DescribeTopicsResult describeTopics(Collection<String> topicNames, DescribeTopicsOptions options) {
Map<String, KafkaFuture<TopicDescription>> topicDescriptions = new HashMap<>();
if (timeoutNextRequests > 0) {
for (String requestedTopic : topicNames) {
KafkaFutureImpl<TopicDescription> future = new KafkaFutureImpl<>();
future.completeExceptionally(new TimeoutException());
topicDescriptions.put(requestedTopic, future);
}
--timeoutNextRequests;
return new DescribeTopicsResult(topicDescriptions);
}
for (String requestedTopic : topicNames) {
for (Map.Entry<String, TopicMetadata> topicDescription : allTopics.entrySet()) {
String topicName = topicDescription.getKey();
if (topicName.equals(requestedTopic)) {
TopicMetadata topicMetadata = topicDescription.getValue();
KafkaFutureImpl<TopicDescription> future = new KafkaFutureImpl<>();
future.complete(new TopicDescription(topicName, topicMetadata.isInternalTopic, topicMetadata.partitions));
topicDescriptions.put(topicName, future);
break;
}
}
if (!topicDescriptions.containsKey(requestedTopic)) {
KafkaFutureImpl<TopicDescription> future = new KafkaFutureImpl<>();
future.completeExceptionally(new UnknownTopicOrPartitionException(String.format("Topic %s unknown.", requestedTopic)));
topicDescriptions.put(requestedTopic, future);
}
}
return new DescribeTopicsResult(topicDescriptions);
}
use of org.apache.kafka.common.KafkaFuture in project apache-kafka-on-k8s by banzaicloud.
the class InternalTopicManager method getNumPartitions.
/**
* Get the number of partitions for the given topics
*/
// visible for testing
protected Map<String, Integer> getNumPartitions(final Set<String> topics) {
int remainingRetries = retries;
boolean retry;
do {
retry = false;
final DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(topics);
final Map<String, KafkaFuture<TopicDescription>> futures = describeTopicsResult.values();
final Map<String, Integer> existingNumberOfPartitionsPerTopic = new HashMap<>();
for (final Map.Entry<String, KafkaFuture<TopicDescription>> topicFuture : futures.entrySet()) {
try {
final TopicDescription topicDescription = topicFuture.getValue().get();
existingNumberOfPartitionsPerTopic.put(topicFuture.getKey(), topicDescription.partitions().size());
} catch (final InterruptedException fatalException) {
Thread.currentThread().interrupt();
log.error(INTERRUPTED_ERROR_MESSAGE, fatalException);
throw new IllegalStateException(INTERRUPTED_ERROR_MESSAGE, fatalException);
} catch (final ExecutionException couldNotDescribeTopicException) {
final Throwable cause = couldNotDescribeTopicException.getCause();
if (cause instanceof TimeoutException) {
retry = true;
log.debug("Could not get number of partitions for topic {} due to timeout. " + "Will try again (remaining retries {}).", topicFuture.getKey(), remainingRetries - 1);
} else {
final String error = "Could not get number of partitions for topic {}.";
log.debug(error, topicFuture.getKey(), cause.getMessage());
}
}
}
if (retry) {
topics.removeAll(existingNumberOfPartitionsPerTopic.keySet());
continue;
}
return existingNumberOfPartitionsPerTopic;
} while (remainingRetries-- > 0);
return Collections.emptyMap();
}
use of org.apache.kafka.common.KafkaFuture in project apache-kafka-on-k8s by banzaicloud.
the class StreamsResetter method doDelete.
// visible for testing
public void doDelete(final List<String> topicsToDelete, final AdminClient adminClient) {
boolean hasDeleteErrors = false;
final DeleteTopicsResult deleteTopicsResult = adminClient.deleteTopics(topicsToDelete);
final Map<String, KafkaFuture<Void>> results = deleteTopicsResult.values();
for (final Map.Entry<String, KafkaFuture<Void>> entry : results.entrySet()) {
try {
entry.getValue().get(30, TimeUnit.SECONDS);
} catch (Exception e) {
System.err.println("ERROR: deleting topic " + entry.getKey());
e.printStackTrace(System.err);
hasDeleteErrors = true;
}
}
if (hasDeleteErrors) {
throw new RuntimeException("Encountered an error deleting one or more topics");
}
}
use of org.apache.kafka.common.KafkaFuture in project apache-kafka-on-k8s by banzaicloud.
the class KafkaAdminClient method describeReplicaLogDirs.
@Override
public DescribeReplicaLogDirsResult describeReplicaLogDirs(Collection<TopicPartitionReplica> replicas, DescribeReplicaLogDirsOptions options) {
final Map<TopicPartitionReplica, KafkaFutureImpl<DescribeReplicaLogDirsResult.ReplicaLogDirInfo>> futures = new HashMap<>(replicas.size());
for (TopicPartitionReplica replica : replicas) {
futures.put(replica, new KafkaFutureImpl<DescribeReplicaLogDirsResult.ReplicaLogDirInfo>());
}
Map<Integer, Set<TopicPartition>> partitionsByBroker = new HashMap<>();
for (TopicPartitionReplica replica : replicas) {
if (!partitionsByBroker.containsKey(replica.brokerId()))
partitionsByBroker.put(replica.brokerId(), new HashSet<TopicPartition>());
partitionsByBroker.get(replica.brokerId()).add(new TopicPartition(replica.topic(), replica.partition()));
}
final long now = time.milliseconds();
for (Map.Entry<Integer, Set<TopicPartition>> entry : partitionsByBroker.entrySet()) {
final int brokerId = entry.getKey();
final Set<TopicPartition> topicPartitions = entry.getValue();
final Map<TopicPartition, ReplicaLogDirInfo> replicaDirInfoByPartition = new HashMap<>();
for (TopicPartition topicPartition : topicPartitions) replicaDirInfoByPartition.put(topicPartition, new ReplicaLogDirInfo());
runnable.call(new Call("describeReplicaLogDirs", calcDeadlineMs(now, options.timeoutMs()), new ConstantNodeIdProvider(brokerId)) {
@Override
public AbstractRequest.Builder createRequest(int timeoutMs) {
// Query selected partitions in all log directories
return new DescribeLogDirsRequest.Builder(topicPartitions);
}
@Override
public void handleResponse(AbstractResponse abstractResponse) {
DescribeLogDirsResponse response = (DescribeLogDirsResponse) abstractResponse;
for (Map.Entry<String, DescribeLogDirsResponse.LogDirInfo> responseEntry : response.logDirInfos().entrySet()) {
String logDir = responseEntry.getKey();
DescribeLogDirsResponse.LogDirInfo logDirInfo = responseEntry.getValue();
// No replica info will be provided if the log directory is offline
if (logDirInfo.error == Errors.KAFKA_STORAGE_ERROR)
continue;
if (logDirInfo.error != Errors.NONE)
handleFailure(new IllegalStateException("The error " + logDirInfo.error + " for log directory " + logDir + " in the response from broker " + brokerId + " is illegal"));
for (Map.Entry<TopicPartition, DescribeLogDirsResponse.ReplicaInfo> replicaInfoEntry : logDirInfo.replicaInfos.entrySet()) {
TopicPartition tp = replicaInfoEntry.getKey();
DescribeLogDirsResponse.ReplicaInfo replicaInfo = replicaInfoEntry.getValue();
ReplicaLogDirInfo replicaLogDirInfo = replicaDirInfoByPartition.get(tp);
if (replicaLogDirInfo == null) {
handleFailure(new IllegalStateException("The partition " + tp + " in the response from broker " + brokerId + " is not in the request"));
} else if (replicaInfo.isFuture) {
replicaDirInfoByPartition.put(tp, new ReplicaLogDirInfo(replicaLogDirInfo.getCurrentReplicaLogDir(), replicaLogDirInfo.getCurrentReplicaOffsetLag(), logDir, replicaInfo.offsetLag));
} else {
replicaDirInfoByPartition.put(tp, new ReplicaLogDirInfo(logDir, replicaInfo.offsetLag, replicaLogDirInfo.getFutureReplicaLogDir(), replicaLogDirInfo.getFutureReplicaOffsetLag()));
}
}
}
for (Map.Entry<TopicPartition, ReplicaLogDirInfo> entry : replicaDirInfoByPartition.entrySet()) {
TopicPartition tp = entry.getKey();
KafkaFutureImpl<ReplicaLogDirInfo> future = futures.get(new TopicPartitionReplica(tp.topic(), tp.partition(), brokerId));
future.complete(entry.getValue());
}
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(futures.values(), throwable);
}
}, now);
}
return new DescribeReplicaLogDirsResult(new HashMap<TopicPartitionReplica, KafkaFuture<ReplicaLogDirInfo>>(futures));
}
use of org.apache.kafka.common.KafkaFuture in project apache-kafka-on-k8s by banzaicloud.
the class KafkaAdminClient method describeLogDirs.
@Override
public DescribeLogDirsResult describeLogDirs(Collection<Integer> brokers, DescribeLogDirsOptions options) {
final Map<Integer, KafkaFutureImpl<Map<String, DescribeLogDirsResponse.LogDirInfo>>> futures = new HashMap<>(brokers.size());
for (Integer brokerId : brokers) {
futures.put(brokerId, new KafkaFutureImpl<Map<String, DescribeLogDirsResponse.LogDirInfo>>());
}
final long now = time.milliseconds();
for (final Integer brokerId : brokers) {
runnable.call(new Call("describeLogDirs", calcDeadlineMs(now, options.timeoutMs()), new ConstantNodeIdProvider(brokerId)) {
@Override
public AbstractRequest.Builder createRequest(int timeoutMs) {
// Query selected partitions in all log directories
return new DescribeLogDirsRequest.Builder(null);
}
@Override
public void handleResponse(AbstractResponse abstractResponse) {
DescribeLogDirsResponse response = (DescribeLogDirsResponse) abstractResponse;
KafkaFutureImpl<Map<String, DescribeLogDirsResponse.LogDirInfo>> future = futures.get(brokerId);
if (response.logDirInfos().size() > 0) {
future.complete(response.logDirInfos());
} else {
// response.logDirInfos() will be empty if and only if the user is not authorized to describe clsuter resource.
future.completeExceptionally(Errors.CLUSTER_AUTHORIZATION_FAILED.exception());
}
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(futures.values(), throwable);
}
}, now);
}
return new DescribeLogDirsResult(new HashMap<Integer, KafkaFuture<Map<String, DescribeLogDirsResponse.LogDirInfo>>>(futures));
}
Aggregations