use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.
the class PartitionLeaderStrategy method handleResponse.
@Override
public LookupResult<TopicPartition> handleResponse(Set<TopicPartition> requestPartitions, AbstractResponse abstractResponse) {
MetadataResponse response = (MetadataResponse) abstractResponse;
Map<TopicPartition, Throwable> failed = new HashMap<>();
Map<TopicPartition, Integer> mapped = new HashMap<>();
for (MetadataResponseData.MetadataResponseTopic topicMetadata : response.data().topics()) {
String topic = topicMetadata.name();
Errors topicError = Errors.forCode(topicMetadata.errorCode());
if (topicError != Errors.NONE) {
handleTopicError(topic, topicError, requestPartitions, failed);
continue;
}
for (MetadataResponseData.MetadataResponsePartition partitionMetadata : topicMetadata.partitions()) {
TopicPartition topicPartition = new TopicPartition(topic, partitionMetadata.partitionIndex());
Errors partitionError = Errors.forCode(partitionMetadata.errorCode());
if (!requestPartitions.contains(topicPartition)) {
// topics, so we have to filter any that we are not interested in.
continue;
}
if (partitionError != Errors.NONE) {
handlePartitionError(topicPartition, partitionError, failed);
continue;
}
int leaderId = partitionMetadata.leaderId();
if (leaderId >= 0) {
mapped.put(topicPartition, leaderId);
} else {
log.debug("Metadata request for {} returned no error, but the leader is unknown. Will retry", topicPartition);
}
}
}
return new LookupResult<>(failed, mapped);
}
use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.
the class AllBrokersStrategy method handleResponse.
@Override
public LookupResult<BrokerKey> handleResponse(Set<BrokerKey> keys, AbstractResponse abstractResponse) {
validateLookupKeys(keys);
MetadataResponse response = (MetadataResponse) abstractResponse;
MetadataResponseData.MetadataResponseBrokerCollection brokers = response.data().brokers();
if (brokers.isEmpty()) {
log.debug("Metadata response contained no brokers. Will backoff and retry");
return LookupResult.empty();
} else {
log.debug("Discovered all brokers {} to send requests to", brokers);
}
Map<BrokerKey, Integer> brokerKeys = brokers.stream().collect(Collectors.toMap(broker -> new BrokerKey(OptionalInt.of(broker.nodeId())), MetadataResponseData.MetadataResponseBroker::nodeId));
return new LookupResult<>(Collections.singletonList(ANY_BROKER), Collections.emptyMap(), brokerKeys);
}
use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.
the class ConsumerCoordinatorTest method testRefreshOffsetWithValidation.
@Test
public void testRefreshOffsetWithValidation() {
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
subscriptions.assignFromUser(singleton(t1p));
// Initial leader epoch of 4
MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith("kafka-cluster", 1, Collections.emptyMap(), singletonMap(topic1, 1), tp -> 4);
client.updateMetadata(metadataResponse);
// Load offsets from previous epoch
client.prepareResponse(offsetFetchResponse(t1p, Errors.NONE, "", 100L, Optional.of(3)));
coordinator.refreshCommittedOffsetsIfNeeded(time.timer(Long.MAX_VALUE));
// Offset gets loaded, but requires validation
assertEquals(Collections.emptySet(), subscriptions.initializingPartitions());
assertFalse(subscriptions.hasAllFetchPositions());
assertTrue(subscriptions.awaitingValidation(t1p));
assertEquals(subscriptions.position(t1p).offset, 100L);
assertNull(subscriptions.validPosition(t1p));
}
use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.
the class KafkaConsumerTest method testSubscriptionOnInvalidTopic.
@Test
public void testSubscriptionOnInvalidTopic() {
ConsumerMetadata metadata = createMetadata(subscription);
MockClient client = new MockClient(time, metadata);
initMetadata(client, Collections.singletonMap(topic, 1));
Cluster cluster = metadata.fetch();
// Invalid topic name due to space
String invalidTopicName = "topic abc";
List<MetadataResponse.TopicMetadata> topicMetadata = new ArrayList<>();
topicMetadata.add(new MetadataResponse.TopicMetadata(Errors.INVALID_TOPIC_EXCEPTION, invalidTopicName, false, Collections.emptyList()));
MetadataResponse updateResponse = RequestTestUtils.metadataResponse(cluster.nodes(), cluster.clusterResource().clusterId(), cluster.controller().id(), topicMetadata);
client.prepareMetadataUpdate(updateResponse);
KafkaConsumer<String, String> consumer = newConsumer(time, client, subscription, metadata, assignor, true, groupInstanceId);
consumer.subscribe(singleton(invalidTopicName), getConsumerRebalanceListener(consumer));
assertThrows(InvalidTopicException.class, () -> consumer.poll(Duration.ZERO));
}
use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.
the class KafkaConsumerTest method initMetadata.
private void initMetadata(MockClient mockClient, Map<String, Integer> partitionCounts) {
Map<String, Uuid> metadataIds = new HashMap<>();
for (String name : partitionCounts.keySet()) {
metadataIds.put(name, topicIds.get(name));
}
MetadataResponse initialMetadata = RequestTestUtils.metadataUpdateWithIds(1, partitionCounts, metadataIds);
mockClient.updateMetadata(initialMetadata);
}
Aggregations