use of org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo in project kafka by apache.
the class KafkaAdminClient method listOffsets.
@Override
public ListOffsetsResult listOffsets(Map<TopicPartition, OffsetSpec> topicPartitionOffsets, ListOffsetsOptions options) {
// preparing topics list for asking metadata about them
final Map<TopicPartition, KafkaFutureImpl<ListOffsetsResultInfo>> futures = new HashMap<>(topicPartitionOffsets.size());
final Set<String> topics = new HashSet<>();
for (TopicPartition topicPartition : topicPartitionOffsets.keySet()) {
topics.add(topicPartition.topic());
futures.put(topicPartition, new KafkaFutureImpl<>());
}
final long nowMetadata = time.milliseconds();
final long deadline = calcDeadlineMs(nowMetadata, options.timeoutMs());
MetadataOperationContext<ListOffsetsResultInfo, ListOffsetsOptions> context = new MetadataOperationContext<>(topics, options, deadline, futures);
Call metadataCall = getMetadataCall(context, () -> KafkaAdminClient.this.getListOffsetsCalls(context, topicPartitionOffsets, futures));
runnable.call(metadataCall, nowMetadata);
return new ListOffsetsResult(new HashMap<>(futures));
}
use of org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo in project kafka by apache.
the class KafkaAdminClient method getListOffsetsCalls.
// visible for benchmark
List<Call> getListOffsetsCalls(MetadataOperationContext<ListOffsetsResultInfo, ListOffsetsOptions> context, Map<TopicPartition, OffsetSpec> topicPartitionOffsets, Map<TopicPartition, KafkaFutureImpl<ListOffsetsResultInfo>> futures) {
MetadataResponse mr = context.response().orElseThrow(() -> new IllegalStateException("No Metadata response"));
Cluster clusterSnapshot = mr.buildCluster();
List<Call> calls = new ArrayList<>();
// grouping topic partitions per leader
Map<Node, Map<String, ListOffsetsTopic>> leaders = new HashMap<>();
for (Map.Entry<TopicPartition, OffsetSpec> entry : topicPartitionOffsets.entrySet()) {
OffsetSpec offsetSpec = entry.getValue();
TopicPartition tp = entry.getKey();
KafkaFutureImpl<ListOffsetsResultInfo> future = futures.get(tp);
long offsetQuery = getOffsetFromOffsetSpec(offsetSpec);
// avoid sending listOffsets request for topics with errors
if (!mr.errors().containsKey(tp.topic())) {
Node node = clusterSnapshot.leaderFor(tp);
if (node != null) {
Map<String, ListOffsetsTopic> leadersOnNode = leaders.computeIfAbsent(node, k -> new HashMap<>());
ListOffsetsTopic topic = leadersOnNode.computeIfAbsent(tp.topic(), k -> new ListOffsetsTopic().setName(tp.topic()));
topic.partitions().add(new ListOffsetsPartition().setPartitionIndex(tp.partition()).setTimestamp(offsetQuery));
} else {
future.completeExceptionally(Errors.LEADER_NOT_AVAILABLE.exception());
}
} else {
future.completeExceptionally(mr.errors().get(tp.topic()).exception());
}
}
for (final Map.Entry<Node, Map<String, ListOffsetsTopic>> entry : leaders.entrySet()) {
final int brokerId = entry.getKey().id();
calls.add(new Call("listOffsets on broker " + brokerId, context.deadline(), new ConstantNodeIdProvider(brokerId)) {
final List<ListOffsetsTopic> partitionsToQuery = new ArrayList<>(entry.getValue().values());
private boolean supportsMaxTimestamp = partitionsToQuery.stream().flatMap(t -> t.partitions().stream()).anyMatch(p -> p.timestamp() == ListOffsetsRequest.MAX_TIMESTAMP);
@Override
ListOffsetsRequest.Builder createRequest(int timeoutMs) {
return ListOffsetsRequest.Builder.forConsumer(true, context.options().isolationLevel(), supportsMaxTimestamp).setTargetTimes(partitionsToQuery);
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
ListOffsetsResponse response = (ListOffsetsResponse) abstractResponse;
Map<TopicPartition, OffsetSpec> retryTopicPartitionOffsets = new HashMap<>();
for (ListOffsetsTopicResponse topic : response.topics()) {
for (ListOffsetsPartitionResponse partition : topic.partitions()) {
TopicPartition tp = new TopicPartition(topic.name(), partition.partitionIndex());
KafkaFutureImpl<ListOffsetsResultInfo> future = futures.get(tp);
Errors error = Errors.forCode(partition.errorCode());
OffsetSpec offsetRequestSpec = topicPartitionOffsets.get(tp);
if (offsetRequestSpec == null) {
log.warn("Server response mentioned unknown topic partition {}", tp);
} else if (MetadataOperationContext.shouldRefreshMetadata(error)) {
retryTopicPartitionOffsets.put(tp, offsetRequestSpec);
} else if (error == Errors.NONE) {
Optional<Integer> leaderEpoch = (partition.leaderEpoch() == ListOffsetsResponse.UNKNOWN_EPOCH) ? Optional.empty() : Optional.of(partition.leaderEpoch());
future.complete(new ListOffsetsResultInfo(partition.offset(), partition.timestamp(), leaderEpoch));
} else {
future.completeExceptionally(error.exception());
}
}
}
if (retryTopicPartitionOffsets.isEmpty()) {
// The server should send back a response for every topic partition. But do a sanity check anyway.
for (ListOffsetsTopic topic : partitionsToQuery) {
for (ListOffsetsPartition partition : topic.partitions()) {
TopicPartition tp = new TopicPartition(topic.name(), partition.partitionIndex());
ApiException error = new ApiException("The response from broker " + brokerId + " did not contain a result for topic partition " + tp);
futures.get(tp).completeExceptionally(error);
}
}
} else {
Set<String> retryTopics = retryTopicPartitionOffsets.keySet().stream().map(TopicPartition::topic).collect(Collectors.toSet());
MetadataOperationContext<ListOffsetsResultInfo, ListOffsetsOptions> retryContext = new MetadataOperationContext<>(retryTopics, context.options(), context.deadline(), futures);
rescheduleMetadataTask(retryContext, () -> getListOffsetsCalls(retryContext, retryTopicPartitionOffsets, futures));
}
}
@Override
void handleFailure(Throwable throwable) {
for (ListOffsetsTopic topic : entry.getValue().values()) {
for (ListOffsetsPartition partition : topic.partitions()) {
TopicPartition tp = new TopicPartition(topic.name(), partition.partitionIndex());
KafkaFutureImpl<ListOffsetsResultInfo> future = futures.get(tp);
future.completeExceptionally(throwable);
}
}
}
@Override
boolean handleUnsupportedVersionException(UnsupportedVersionException exception) {
if (supportsMaxTimestamp) {
supportsMaxTimestamp = false;
// fail any unsupported futures and remove partitions from the downgraded retry
Iterator<ListOffsetsTopic> topicIterator = partitionsToQuery.iterator();
while (topicIterator.hasNext()) {
ListOffsetsTopic topic = topicIterator.next();
Iterator<ListOffsetsPartition> partitionIterator = topic.partitions().iterator();
while (partitionIterator.hasNext()) {
ListOffsetsPartition partition = partitionIterator.next();
if (partition.timestamp() == ListOffsetsRequest.MAX_TIMESTAMP) {
futures.get(new TopicPartition(topic.name(), partition.partitionIndex())).completeExceptionally(new UnsupportedVersionException("Broker " + brokerId + " does not support MAX_TIMESTAMP offset spec"));
partitionIterator.remove();
}
}
if (topic.partitions().isEmpty()) {
topicIterator.remove();
}
}
return !partitionsToQuery.isEmpty();
}
return false;
}
});
}
return calls;
}
use of org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo in project kafka by apache.
the class TopicAdmin method endOffsets.
/**
* Fetch the most recent offset for each of the supplied {@link TopicPartition} objects.
*
* @param partitions the topic partitions
* @return the map of offset for each topic partition, or an empty map if the supplied partitions
* are null or empty
* @throws UnsupportedVersionException if the admin client cannot read end offsets
* @throws TimeoutException if the offset metadata could not be fetched before the amount of time allocated
* by {@code request.timeout.ms} expires, and this call can be retried
* @throws LeaderNotAvailableException if the leader was not available and this call can be retried
* @throws RetriableException if a retriable error occurs, or the thread is interrupted while attempting
* to perform this operation
* @throws ConnectException if a non retriable error occurs
*/
public Map<TopicPartition, Long> endOffsets(Set<TopicPartition> partitions) {
if (partitions == null || partitions.isEmpty()) {
return Collections.emptyMap();
}
Map<TopicPartition, OffsetSpec> offsetSpecMap = partitions.stream().collect(Collectors.toMap(Function.identity(), tp -> OffsetSpec.latest()));
ListOffsetsResult resultFuture = admin.listOffsets(offsetSpecMap);
// Get the individual result for each topic partition so we have better error messages
Map<TopicPartition, Long> result = new HashMap<>();
for (TopicPartition partition : partitions) {
try {
ListOffsetsResultInfo info = resultFuture.partitionResult(partition).get();
result.put(partition, info.offset());
} catch (ExecutionException e) {
Throwable cause = e.getCause();
String topic = partition.topic();
if (cause instanceof AuthorizationException) {
String msg = String.format("Not authorized to get the end offsets for topic '%s' on brokers at %s", topic, bootstrapServers());
throw new ConnectException(msg, e);
} else if (cause instanceof UnsupportedVersionException) {
// Should theoretically never happen, because this method is the same as what the consumer uses and therefore
// should exist in the broker since before the admin client was added
String msg = String.format("API to get the get the end offsets for topic '%s' is unsupported on brokers at %s", topic, bootstrapServers());
throw new UnsupportedVersionException(msg, e);
} else if (cause instanceof TimeoutException) {
String msg = String.format("Timed out while waiting to get end offsets for topic '%s' on brokers at %s", topic, bootstrapServers());
throw new TimeoutException(msg, e);
} else if (cause instanceof LeaderNotAvailableException) {
String msg = String.format("Unable to get end offsets during leader election for topic '%s' on brokers at %s", topic, bootstrapServers());
throw new LeaderNotAvailableException(msg, e);
} else if (cause instanceof org.apache.kafka.common.errors.RetriableException) {
throw (org.apache.kafka.common.errors.RetriableException) cause;
} else {
String msg = String.format("Error while getting end offsets for topic '%s' on brokers at %s", topic, bootstrapServers());
throw new ConnectException(msg, e);
}
} catch (InterruptedException e) {
Thread.interrupted();
String msg = String.format("Interrupted while attempting to read end offsets for topic '%s' on brokers at %s", partition.topic(), bootstrapServers());
throw new RetriableException(msg, e);
}
}
return result;
}
use of org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo in project kafka by apache.
the class KafkaAdminClientTest method testListOffsetsMaxTimestampUnsupportedMultipleOffsetSpec.
@Test
public void testListOffsetsMaxTimestampUnsupportedMultipleOffsetSpec() throws Exception {
Node node = new Node(0, "localhost", 8120);
List<Node> nodes = Collections.singletonList(node);
List<PartitionInfo> pInfos = new ArrayList<>();
pInfos.add(new PartitionInfo("foo", 0, node, new Node[] { node }, new Node[] { node }));
pInfos.add(new PartitionInfo("foo", 1, node, new Node[] { node }, new Node[] { node }));
final Cluster cluster = new Cluster("mockClusterId", nodes, pInfos, Collections.emptySet(), Collections.emptySet(), node);
final TopicPartition tp0 = new TopicPartition("foo", 0);
final TopicPartition tp1 = new TopicPartition("foo", 1);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(cluster, AdminClientConfig.RETRIES_CONFIG, "2")) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create(ApiKeys.LIST_OFFSETS.id, (short) 0, (short) 6));
env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE));
// listoffsets response from broker 0
env.kafkaClient().prepareUnsupportedVersionResponse(request -> request instanceof ListOffsetsRequest);
ListOffsetsTopicResponse topicResponse = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp1, Errors.NONE, -1L, 345L, 543);
ListOffsetsResponseData responseData = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(Arrays.asList(topicResponse));
env.kafkaClient().prepareResponseFrom(// ensure that no max timestamp requests are retried
request -> request instanceof ListOffsetsRequest && ((ListOffsetsRequest) request).topics().stream().flatMap(t -> t.partitions().stream()).noneMatch(p -> p.timestamp() == ListOffsetsRequest.MAX_TIMESTAMP), new ListOffsetsResponse(responseData), node);
ListOffsetsResult result = env.adminClient().listOffsets(new HashMap<TopicPartition, OffsetSpec>() {
{
put(tp0, OffsetSpec.maxTimestamp());
put(tp1, OffsetSpec.latest());
}
});
TestUtils.assertFutureThrows(result.partitionResult(tp0), UnsupportedVersionException.class);
ListOffsetsResultInfo tp1Offset = result.partitionResult(tp1).get();
assertEquals(345L, tp1Offset.offset());
assertEquals(543, tp1Offset.leaderEpoch().get().intValue());
assertEquals(-1L, tp1Offset.timestamp());
}
}
use of org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo in project kafka by apache.
the class KafkaAdminClientTest method testListOffsetsMetadataRetriableErrors.
@Test
public void testListOffsetsMetadataRetriableErrors() throws Exception {
Node node0 = new Node(0, "localhost", 8120);
Node node1 = new Node(1, "localhost", 8121);
List<Node> nodes = Arrays.asList(node0, node1);
List<PartitionInfo> pInfos = new ArrayList<>();
pInfos.add(new PartitionInfo("foo", 0, node0, new Node[] { node0 }, new Node[] { node0 }));
pInfos.add(new PartitionInfo("foo", 1, node1, new Node[] { node1 }, new Node[] { node1 }));
final Cluster cluster = new Cluster("mockClusterId", nodes, pInfos, Collections.<String>emptySet(), Collections.<String>emptySet(), node0);
final TopicPartition tp0 = new TopicPartition("foo", 0);
final TopicPartition tp1 = new TopicPartition("foo", 1);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(cluster)) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.LEADER_NOT_AVAILABLE));
env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.UNKNOWN_TOPIC_OR_PARTITION));
env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE));
// listoffsets response from broker 0
ListOffsetsTopicResponse t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.NONE, -1L, 345L, 543);
ListOffsetsResponseData responseData = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(Arrays.asList(t0));
env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node0);
// listoffsets response from broker 1
ListOffsetsTopicResponse t1 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp1, Errors.NONE, -1L, 789L, 987);
responseData = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(Arrays.asList(t1));
env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node1);
Map<TopicPartition, OffsetSpec> partitions = new HashMap<>();
partitions.put(tp0, OffsetSpec.latest());
partitions.put(tp1, OffsetSpec.latest());
ListOffsetsResult result = env.adminClient().listOffsets(partitions);
Map<TopicPartition, ListOffsetsResultInfo> offsets = result.all().get();
assertFalse(offsets.isEmpty());
assertEquals(345L, offsets.get(tp0).offset());
assertEquals(543, offsets.get(tp0).leaderEpoch().get().intValue());
assertEquals(-1L, offsets.get(tp0).timestamp());
assertEquals(789L, offsets.get(tp1).offset());
assertEquals(987, offsets.get(tp1).leaderEpoch().get().intValue());
assertEquals(-1L, offsets.get(tp1).timestamp());
}
}
Aggregations