use of org.apache.kafka.common.message.MetadataRequestData in project kafka by apache.
the class KafkaAdminClient method handleDescribeTopicsByIds.
private Map<Uuid, KafkaFuture<TopicDescription>> handleDescribeTopicsByIds(Collection<Uuid> topicIds, DescribeTopicsOptions options) {
final Map<Uuid, KafkaFutureImpl<TopicDescription>> topicFutures = new HashMap<>(topicIds.size());
final List<Uuid> topicIdsList = new ArrayList<>();
for (Uuid topicId : topicIds) {
if (topicIdIsUnrepresentable(topicId)) {
KafkaFutureImpl<TopicDescription> future = new KafkaFutureImpl<>();
future.completeExceptionally(new InvalidTopicException("The given topic id '" + topicId + "' cannot be represented in a request."));
topicFutures.put(topicId, future);
} else if (!topicFutures.containsKey(topicId)) {
topicFutures.put(topicId, new KafkaFutureImpl<>());
topicIdsList.add(topicId);
}
}
final long now = time.milliseconds();
Call call = new Call("describeTopicsWithIds", calcDeadlineMs(now, options.timeoutMs()), new LeastLoadedNodeProvider()) {
@Override
MetadataRequest.Builder createRequest(int timeoutMs) {
return new MetadataRequest.Builder(new MetadataRequestData().setTopics(convertTopicIdsToMetadataRequestTopic(topicIdsList)).setAllowAutoTopicCreation(false).setIncludeTopicAuthorizedOperations(options.includeAuthorizedOperations()));
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
MetadataResponse response = (MetadataResponse) abstractResponse;
// Handle server responses for particular topics.
Cluster cluster = response.buildCluster();
Map<Uuid, Errors> errors = response.errorsByTopicId();
for (Map.Entry<Uuid, KafkaFutureImpl<TopicDescription>> entry : topicFutures.entrySet()) {
Uuid topicId = entry.getKey();
KafkaFutureImpl<TopicDescription> future = entry.getValue();
String topicName = cluster.topicName(topicId);
if (topicName == null) {
future.completeExceptionally(new InvalidTopicException("TopicId " + topicId + " not found."));
continue;
}
Errors topicError = errors.get(topicId);
if (topicError != null) {
future.completeExceptionally(topicError.exception());
continue;
}
Integer authorizedOperations = response.topicAuthorizedOperations(topicName).get();
TopicDescription topicDescription = getTopicDescriptionFromCluster(cluster, topicName, topicId, authorizedOperations);
future.complete(topicDescription);
}
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(topicFutures.values(), throwable);
}
};
if (!topicIdsList.isEmpty()) {
runnable.call(call, now);
}
return new HashMap<>(topicFutures);
}
use of org.apache.kafka.common.message.MetadataRequestData in project kafka by apache.
the class KafkaAdminClient method handleDescribeTopicsByNames.
private Map<String, KafkaFuture<TopicDescription>> handleDescribeTopicsByNames(final Collection<String> topicNames, DescribeTopicsOptions options) {
final Map<String, KafkaFutureImpl<TopicDescription>> topicFutures = new HashMap<>(topicNames.size());
final ArrayList<String> topicNamesList = new ArrayList<>();
for (String topicName : topicNames) {
if (topicNameIsUnrepresentable(topicName)) {
KafkaFutureImpl<TopicDescription> future = new KafkaFutureImpl<>();
future.completeExceptionally(new InvalidTopicException("The given topic name '" + topicName + "' cannot be represented in a request."));
topicFutures.put(topicName, future);
} else if (!topicFutures.containsKey(topicName)) {
topicFutures.put(topicName, new KafkaFutureImpl<>());
topicNamesList.add(topicName);
}
}
final long now = time.milliseconds();
Call call = new Call("describeTopics", calcDeadlineMs(now, options.timeoutMs()), new LeastLoadedNodeProvider()) {
private boolean supportsDisablingTopicCreation = true;
@Override
MetadataRequest.Builder createRequest(int timeoutMs) {
if (supportsDisablingTopicCreation)
return new MetadataRequest.Builder(new MetadataRequestData().setTopics(convertToMetadataRequestTopic(topicNamesList)).setAllowAutoTopicCreation(false).setIncludeTopicAuthorizedOperations(options.includeAuthorizedOperations()));
else
return MetadataRequest.Builder.allTopics();
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
MetadataResponse response = (MetadataResponse) abstractResponse;
// Handle server responses for particular topics.
Cluster cluster = response.buildCluster();
Map<String, Errors> errors = response.errors();
for (Map.Entry<String, KafkaFutureImpl<TopicDescription>> entry : topicFutures.entrySet()) {
String topicName = entry.getKey();
KafkaFutureImpl<TopicDescription> future = entry.getValue();
Errors topicError = errors.get(topicName);
if (topicError != null) {
future.completeExceptionally(topicError.exception());
continue;
}
if (!cluster.topics().contains(topicName)) {
future.completeExceptionally(new UnknownTopicOrPartitionException("Topic " + topicName + " not found."));
continue;
}
Uuid topicId = cluster.topicId(topicName);
Integer authorizedOperations = response.topicAuthorizedOperations(topicName).get();
TopicDescription topicDescription = getTopicDescriptionFromCluster(cluster, topicName, topicId, authorizedOperations);
future.complete(topicDescription);
}
}
@Override
boolean handleUnsupportedVersionException(UnsupportedVersionException exception) {
if (supportsDisablingTopicCreation) {
supportsDisablingTopicCreation = false;
return true;
}
return false;
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(topicFutures.values(), throwable);
}
};
if (!topicNamesList.isEmpty()) {
runnable.call(call, now);
}
return new HashMap<>(topicFutures);
}
use of org.apache.kafka.common.message.MetadataRequestData in project kafka by apache.
the class MetadataRequestTest method testEmptyMeansEmptyForVersionsAboveV0.
@Test
public void testEmptyMeansEmptyForVersionsAboveV0() {
for (int i = 1; i < MetadataRequestData.SCHEMAS.length; i++) {
MetadataRequestData data = new MetadataRequestData();
data.setAllowAutoTopicCreation(true);
MetadataRequest parsedRequest = new MetadataRequest(data, (short) i);
assertFalse(parsedRequest.isAllTopics());
assertEquals(Collections.emptyList(), parsedRequest.topics());
}
}
use of org.apache.kafka.common.message.MetadataRequestData in project kafka by apache.
the class MetadataRequestTest method testEmptyMeansAllTopicsV0.
@Test
public void testEmptyMeansAllTopicsV0() {
MetadataRequestData data = new MetadataRequestData();
MetadataRequest parsedRequest = new MetadataRequest(data, (short) 0);
assertTrue(parsedRequest.isAllTopics());
assertNull(parsedRequest.topics());
}
use of org.apache.kafka.common.message.MetadataRequestData in project kafka by apache.
the class KafkaAdminClient method deleteRecords.
@Override
public DeleteRecordsResult deleteRecords(final Map<TopicPartition, RecordsToDelete> recordsToDelete, final DeleteRecordsOptions options) {
// requests need to be sent to partitions leader nodes so ...
// ... from the provided map it's needed to create more maps grouping topic/partition per leader
final Map<TopicPartition, KafkaFutureImpl<DeletedRecords>> futures = new HashMap<>(recordsToDelete.size());
for (TopicPartition topicPartition : recordsToDelete.keySet()) {
futures.put(topicPartition, new KafkaFutureImpl<>());
}
// preparing topics list for asking metadata about them
final Set<String> topics = new HashSet<>();
for (TopicPartition topicPartition : recordsToDelete.keySet()) {
topics.add(topicPartition.topic());
}
final long nowMetadata = time.milliseconds();
final long deadline = calcDeadlineMs(nowMetadata, options.timeoutMs());
// asking for topics metadata for getting partitions leaders
runnable.call(new Call("topicsMetadata", deadline, new LeastLoadedNodeProvider()) {
@Override
MetadataRequest.Builder createRequest(int timeoutMs) {
return new MetadataRequest.Builder(new MetadataRequestData().setTopics(convertToMetadataRequestTopic(topics)).setAllowAutoTopicCreation(false));
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
MetadataResponse response = (MetadataResponse) abstractResponse;
Map<String, Errors> errors = response.errors();
Cluster cluster = response.buildCluster();
// Group topic partitions by leader
Map<Node, Map<String, DeleteRecordsTopic>> leaders = new HashMap<>();
for (Map.Entry<TopicPartition, RecordsToDelete> entry : recordsToDelete.entrySet()) {
TopicPartition topicPartition = entry.getKey();
KafkaFutureImpl<DeletedRecords> future = futures.get(topicPartition);
// Fail partitions with topic errors
Errors topicError = errors.get(topicPartition.topic());
if (errors.containsKey(topicPartition.topic())) {
future.completeExceptionally(topicError.exception());
} else {
Node node = cluster.leaderFor(topicPartition);
if (node != null) {
Map<String, DeleteRecordsTopic> deletionsForLeader = leaders.computeIfAbsent(node, key -> new HashMap<>());
DeleteRecordsTopic deleteRecords = deletionsForLeader.get(topicPartition.topic());
if (deleteRecords == null) {
deleteRecords = new DeleteRecordsTopic().setName(topicPartition.topic());
deletionsForLeader.put(topicPartition.topic(), deleteRecords);
}
deleteRecords.partitions().add(new DeleteRecordsPartition().setPartitionIndex(topicPartition.partition()).setOffset(entry.getValue().beforeOffset()));
} else {
future.completeExceptionally(Errors.LEADER_NOT_AVAILABLE.exception());
}
}
}
final long deleteRecordsCallTimeMs = time.milliseconds();
for (final Map.Entry<Node, Map<String, DeleteRecordsTopic>> entry : leaders.entrySet()) {
final Map<String, DeleteRecordsTopic> partitionDeleteOffsets = entry.getValue();
final int brokerId = entry.getKey().id();
runnable.call(new Call("deleteRecords", deadline, new ConstantNodeIdProvider(brokerId)) {
@Override
DeleteRecordsRequest.Builder createRequest(int timeoutMs) {
return new DeleteRecordsRequest.Builder(new DeleteRecordsRequestData().setTimeoutMs(timeoutMs).setTopics(new ArrayList<>(partitionDeleteOffsets.values())));
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
DeleteRecordsResponse response = (DeleteRecordsResponse) abstractResponse;
for (DeleteRecordsTopicResult topicResult : response.data().topics()) {
for (DeleteRecordsResponseData.DeleteRecordsPartitionResult partitionResult : topicResult.partitions()) {
KafkaFutureImpl<DeletedRecords> future = futures.get(new TopicPartition(topicResult.name(), partitionResult.partitionIndex()));
if (partitionResult.errorCode() == Errors.NONE.code()) {
future.complete(new DeletedRecords(partitionResult.lowWatermark()));
} else {
future.completeExceptionally(Errors.forCode(partitionResult.errorCode()).exception());
}
}
}
}
@Override
void handleFailure(Throwable throwable) {
Stream<KafkaFutureImpl<DeletedRecords>> callFutures = partitionDeleteOffsets.values().stream().flatMap(recordsToDelete -> recordsToDelete.partitions().stream().map(partitionsToDelete -> new TopicPartition(recordsToDelete.name(), partitionsToDelete.partitionIndex()))).map(futures::get);
completeAllExceptionally(callFutures, throwable);
}
}, deleteRecordsCallTimeMs);
}
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(futures.values(), throwable);
}
}, nowMetadata);
return new DeleteRecordsResult(new HashMap<>(futures));
}
Aggregations