use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.
the class FetcherTest method testGetOffsetsForTimesWithUnknownOffset.
private void testGetOffsetsForTimesWithUnknownOffset() {
client.reset();
// Ensure metadata has both partitions.
MetadataResponse initialMetadataUpdate = RequestTestUtils.metadataUpdateWithIds(1, singletonMap(topicName, 1), topicIds);
client.updateMetadata(initialMetadataUpdate);
ListOffsetsResponseData data = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(Collections.singletonList(new ListOffsetsTopicResponse().setName(tp0.topic()).setPartitions(Collections.singletonList(new ListOffsetsPartitionResponse().setPartitionIndex(tp0.partition()).setErrorCode(Errors.NONE.code()).setTimestamp(ListOffsetsResponse.UNKNOWN_TIMESTAMP).setOffset(ListOffsetsResponse.UNKNOWN_OFFSET)))));
client.prepareResponseFrom(new ListOffsetsResponse(data), metadata.fetch().leaderFor(tp0));
Map<TopicPartition, Long> timestampToSearch = new HashMap<>();
timestampToSearch.put(tp0, 0L);
Map<TopicPartition, OffsetAndTimestamp> offsetAndTimestampMap = fetcher.offsetsForTimes(timestampToSearch, time.timer(Long.MAX_VALUE));
assertTrue(offsetAndTimestampMap.containsKey(tp0));
assertNull(offsetAndTimestampMap.get(tp0));
}
use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.
the class ConsumerNetworkClientTest method testInvalidTopicExceptionPropagatedFromMetadata.
@Test
public void testInvalidTopicExceptionPropagatedFromMetadata() {
MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith("clusterId", 1, Collections.singletonMap("topic", Errors.INVALID_TOPIC_EXCEPTION), Collections.emptyMap());
metadata.updateWithCurrentRequestVersion(metadataResponse, false, time.milliseconds());
assertThrows(InvalidTopicException.class, () -> consumerClient.poll(time.timer(Duration.ZERO)));
}
use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.
the class StreamsKafkaClient method fetchMetadata.
/**
* Fetch the metadata for all topics
*/
public MetadataResponse fetchMetadata() {
final ClientRequest clientRequest = kafkaClient.newClientRequest(getAnyReadyBrokerId(), new MetadataRequest.Builder(null), Time.SYSTEM.milliseconds(), true);
final ClientResponse clientResponse = sendRequest(clientRequest);
if (!clientResponse.hasResponse()) {
throw new StreamsException("Empty response for client request.");
}
if (!(clientResponse.responseBody() instanceof MetadataResponse)) {
throw new StreamsException("Inconsistent response type for internal topic metadata request. " + "Expected MetadataResponse but received " + clientResponse.responseBody().getClass().getName());
}
final MetadataResponse metadataResponse = (MetadataResponse) clientResponse.responseBody();
return metadataResponse;
}
use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.
the class Fetcher method getTopicMetadata.
/**
* Get metadata for all topics present in Kafka cluster
*
* @param request The MetadataRequest to send
* @param timeout time for which getting topic metadata is attempted
* @return The map of topics with their partition information
*/
public Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout) {
// Save the round trip if no topics are requested.
if (!request.isAllTopics() && request.topics().isEmpty())
return Collections.emptyMap();
long start = time.milliseconds();
long remaining = timeout;
do {
RequestFuture<ClientResponse> future = sendMetadataRequest(request);
client.poll(future, remaining);
if (future.failed() && !future.isRetriable())
throw future.exception();
if (future.succeeded()) {
MetadataResponse response = (MetadataResponse) future.value().responseBody();
Cluster cluster = response.cluster();
Set<String> unauthorizedTopics = cluster.unauthorizedTopics();
if (!unauthorizedTopics.isEmpty())
throw new TopicAuthorizationException(unauthorizedTopics);
boolean shouldRetry = false;
Map<String, Errors> errors = response.errors();
if (!errors.isEmpty()) {
// if there were errors, we need to check whether they were fatal or whether
// we should just retry
log.debug("Topic metadata fetch included errors: {}", errors);
for (Map.Entry<String, Errors> errorEntry : errors.entrySet()) {
String topic = errorEntry.getKey();
Errors error = errorEntry.getValue();
if (error == Errors.INVALID_TOPIC_EXCEPTION)
throw new InvalidTopicException("Topic '" + topic + "' is invalid");
else if (error == Errors.UNKNOWN_TOPIC_OR_PARTITION)
// in the returned map
continue;
else if (error.exception() instanceof RetriableException)
shouldRetry = true;
else
throw new KafkaException("Unexpected error fetching metadata for topic " + topic, error.exception());
}
}
if (!shouldRetry) {
HashMap<String, List<PartitionInfo>> topicsPartitionInfos = new HashMap<>();
for (String topic : cluster.topics()) topicsPartitionInfos.put(topic, cluster.availablePartitionsForTopic(topic));
return topicsPartitionInfos;
}
}
long elapsed = time.milliseconds() - start;
remaining = timeout - elapsed;
if (remaining > 0) {
long backoff = Math.min(remaining, retryBackoffMs);
time.sleep(backoff);
remaining -= backoff;
}
} while (remaining > 0);
throw new TimeoutException("Timeout expired while fetching topic metadata");
}
use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.
the class FetcherTest method newMetadataResponse.
private MetadataResponse newMetadataResponse(String topic, Errors error) {
List<MetadataResponse.PartitionMetadata> partitionsMetadata = new ArrayList<>();
if (error == Errors.NONE) {
for (PartitionInfo partitionInfo : cluster.partitionsForTopic(topic)) {
partitionsMetadata.add(new MetadataResponse.PartitionMetadata(Errors.NONE, partitionInfo.partition(), partitionInfo.leader(), Arrays.asList(partitionInfo.replicas()), Arrays.asList(partitionInfo.inSyncReplicas())));
}
}
MetadataResponse.TopicMetadata topicMetadata = new MetadataResponse.TopicMetadata(error, topic, false, partitionsMetadata);
return new MetadataResponse(cluster.nodes(), null, MetadataResponse.NO_CONTROLLER_ID, Arrays.asList(topicMetadata));
}
Aggregations