use of org.apache.kafka.common.requests.ListOffsetsResponse in project kafka by apache.
the class KafkaAdminClient method getListOffsetsCalls.
// visible for benchmark
List<Call> getListOffsetsCalls(MetadataOperationContext<ListOffsetsResultInfo, ListOffsetsOptions> context, Map<TopicPartition, OffsetSpec> topicPartitionOffsets, Map<TopicPartition, KafkaFutureImpl<ListOffsetsResultInfo>> futures) {
MetadataResponse mr = context.response().orElseThrow(() -> new IllegalStateException("No Metadata response"));
Cluster clusterSnapshot = mr.buildCluster();
List<Call> calls = new ArrayList<>();
// grouping topic partitions per leader
Map<Node, Map<String, ListOffsetsTopic>> leaders = new HashMap<>();
for (Map.Entry<TopicPartition, OffsetSpec> entry : topicPartitionOffsets.entrySet()) {
OffsetSpec offsetSpec = entry.getValue();
TopicPartition tp = entry.getKey();
KafkaFutureImpl<ListOffsetsResultInfo> future = futures.get(tp);
long offsetQuery = getOffsetFromOffsetSpec(offsetSpec);
// avoid sending listOffsets request for topics with errors
if (!mr.errors().containsKey(tp.topic())) {
Node node = clusterSnapshot.leaderFor(tp);
if (node != null) {
Map<String, ListOffsetsTopic> leadersOnNode = leaders.computeIfAbsent(node, k -> new HashMap<>());
ListOffsetsTopic topic = leadersOnNode.computeIfAbsent(tp.topic(), k -> new ListOffsetsTopic().setName(tp.topic()));
topic.partitions().add(new ListOffsetsPartition().setPartitionIndex(tp.partition()).setTimestamp(offsetQuery));
} else {
future.completeExceptionally(Errors.LEADER_NOT_AVAILABLE.exception());
}
} else {
future.completeExceptionally(mr.errors().get(tp.topic()).exception());
}
}
for (final Map.Entry<Node, Map<String, ListOffsetsTopic>> entry : leaders.entrySet()) {
final int brokerId = entry.getKey().id();
calls.add(new Call("listOffsets on broker " + brokerId, context.deadline(), new ConstantNodeIdProvider(brokerId)) {
final List<ListOffsetsTopic> partitionsToQuery = new ArrayList<>(entry.getValue().values());
private boolean supportsMaxTimestamp = partitionsToQuery.stream().flatMap(t -> t.partitions().stream()).anyMatch(p -> p.timestamp() == ListOffsetsRequest.MAX_TIMESTAMP);
@Override
ListOffsetsRequest.Builder createRequest(int timeoutMs) {
return ListOffsetsRequest.Builder.forConsumer(true, context.options().isolationLevel(), supportsMaxTimestamp).setTargetTimes(partitionsToQuery);
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
ListOffsetsResponse response = (ListOffsetsResponse) abstractResponse;
Map<TopicPartition, OffsetSpec> retryTopicPartitionOffsets = new HashMap<>();
for (ListOffsetsTopicResponse topic : response.topics()) {
for (ListOffsetsPartitionResponse partition : topic.partitions()) {
TopicPartition tp = new TopicPartition(topic.name(), partition.partitionIndex());
KafkaFutureImpl<ListOffsetsResultInfo> future = futures.get(tp);
Errors error = Errors.forCode(partition.errorCode());
OffsetSpec offsetRequestSpec = topicPartitionOffsets.get(tp);
if (offsetRequestSpec == null) {
log.warn("Server response mentioned unknown topic partition {}", tp);
} else if (MetadataOperationContext.shouldRefreshMetadata(error)) {
retryTopicPartitionOffsets.put(tp, offsetRequestSpec);
} else if (error == Errors.NONE) {
Optional<Integer> leaderEpoch = (partition.leaderEpoch() == ListOffsetsResponse.UNKNOWN_EPOCH) ? Optional.empty() : Optional.of(partition.leaderEpoch());
future.complete(new ListOffsetsResultInfo(partition.offset(), partition.timestamp(), leaderEpoch));
} else {
future.completeExceptionally(error.exception());
}
}
}
if (retryTopicPartitionOffsets.isEmpty()) {
// The server should send back a response for every topic partition. But do a sanity check anyway.
for (ListOffsetsTopic topic : partitionsToQuery) {
for (ListOffsetsPartition partition : topic.partitions()) {
TopicPartition tp = new TopicPartition(topic.name(), partition.partitionIndex());
ApiException error = new ApiException("The response from broker " + brokerId + " did not contain a result for topic partition " + tp);
futures.get(tp).completeExceptionally(error);
}
}
} else {
Set<String> retryTopics = retryTopicPartitionOffsets.keySet().stream().map(TopicPartition::topic).collect(Collectors.toSet());
MetadataOperationContext<ListOffsetsResultInfo, ListOffsetsOptions> retryContext = new MetadataOperationContext<>(retryTopics, context.options(), context.deadline(), futures);
rescheduleMetadataTask(retryContext, () -> getListOffsetsCalls(retryContext, retryTopicPartitionOffsets, futures));
}
}
@Override
void handleFailure(Throwable throwable) {
for (ListOffsetsTopic topic : entry.getValue().values()) {
for (ListOffsetsPartition partition : topic.partitions()) {
TopicPartition tp = new TopicPartition(topic.name(), partition.partitionIndex());
KafkaFutureImpl<ListOffsetsResultInfo> future = futures.get(tp);
future.completeExceptionally(throwable);
}
}
}
@Override
boolean handleUnsupportedVersionException(UnsupportedVersionException exception) {
if (supportsMaxTimestamp) {
supportsMaxTimestamp = false;
// fail any unsupported futures and remove partitions from the downgraded retry
Iterator<ListOffsetsTopic> topicIterator = partitionsToQuery.iterator();
while (topicIterator.hasNext()) {
ListOffsetsTopic topic = topicIterator.next();
Iterator<ListOffsetsPartition> partitionIterator = topic.partitions().iterator();
while (partitionIterator.hasNext()) {
ListOffsetsPartition partition = partitionIterator.next();
if (partition.timestamp() == ListOffsetsRequest.MAX_TIMESTAMP) {
futures.get(new TopicPartition(topic.name(), partition.partitionIndex())).completeExceptionally(new UnsupportedVersionException("Broker " + brokerId + " does not support MAX_TIMESTAMP offset spec"));
partitionIterator.remove();
}
}
if (topic.partitions().isEmpty()) {
topicIterator.remove();
}
}
return !partitionsToQuery.isEmpty();
}
return false;
}
});
}
return calls;
}
use of org.apache.kafka.common.requests.ListOffsetsResponse in project kafka by apache.
the class KafkaConsumerTest method listOffsetsResponse.
private ListOffsetsResponse listOffsetsResponse(Map<TopicPartition, Long> partitionOffsets, Map<TopicPartition, Errors> partitionErrors) {
Map<String, ListOffsetsTopicResponse> responses = new HashMap<>();
for (Map.Entry<TopicPartition, Long> partitionOffset : partitionOffsets.entrySet()) {
TopicPartition tp = partitionOffset.getKey();
ListOffsetsTopicResponse topic = responses.computeIfAbsent(tp.topic(), k -> new ListOffsetsTopicResponse().setName(tp.topic()));
topic.partitions().add(new ListOffsetsPartitionResponse().setPartitionIndex(tp.partition()).setErrorCode(Errors.NONE.code()).setTimestamp(ListOffsetsResponse.UNKNOWN_TIMESTAMP).setOffset(partitionOffset.getValue()));
}
for (Map.Entry<TopicPartition, Errors> partitionError : partitionErrors.entrySet()) {
TopicPartition tp = partitionError.getKey();
ListOffsetsTopicResponse topic = responses.computeIfAbsent(tp.topic(), k -> new ListOffsetsTopicResponse().setName(tp.topic()));
topic.partitions().add(new ListOffsetsPartitionResponse().setPartitionIndex(tp.partition()).setErrorCode(partitionError.getValue().code()).setTimestamp(ListOffsetsResponse.UNKNOWN_TIMESTAMP).setOffset(ListOffsetsResponse.UNKNOWN_OFFSET));
}
ListOffsetsResponseData data = new ListOffsetsResponseData().setTopics(new ArrayList<>(responses.values()));
return new ListOffsetsResponse(data);
}
use of org.apache.kafka.common.requests.ListOffsetsResponse in project kafka by apache.
the class SaslAuthenticatorTest method testConvertListOffsetResponseToSaslHandshakeResponse.
@Test
public void testConvertListOffsetResponseToSaslHandshakeResponse() {
ListOffsetsResponseData data = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(Collections.singletonList(new ListOffsetsTopicResponse().setName("topic").setPartitions(Collections.singletonList(new ListOffsetsPartitionResponse().setErrorCode(Errors.NONE.code()).setLeaderEpoch(ListOffsetsResponse.UNKNOWN_EPOCH).setPartitionIndex(0).setOffset(0).setTimestamp(0)))));
ListOffsetsResponse response = new ListOffsetsResponse(data);
ByteBuffer buffer = RequestTestUtils.serializeResponseWithHeader(response, LIST_OFFSETS.latestVersion(), 0);
final RequestHeader header0 = new RequestHeader(LIST_OFFSETS, LIST_OFFSETS.latestVersion(), "id", SaslClientAuthenticator.MIN_RESERVED_CORRELATION_ID);
assertThrows(SchemaException.class, () -> NetworkClient.parseResponse(buffer.duplicate(), header0));
final RequestHeader header1 = new RequestHeader(LIST_OFFSETS, LIST_OFFSETS.latestVersion(), "id", 1);
assertThrows(IllegalStateException.class, () -> NetworkClient.parseResponse(buffer.duplicate(), header1));
}
use of org.apache.kafka.common.requests.ListOffsetsResponse in project kafka by apache.
the class KafkaAdminClientTest method testListOffsetsMaxTimestampUnsupportedMultipleOffsetSpec.
@Test
public void testListOffsetsMaxTimestampUnsupportedMultipleOffsetSpec() throws Exception {
Node node = new Node(0, "localhost", 8120);
List<Node> nodes = Collections.singletonList(node);
List<PartitionInfo> pInfos = new ArrayList<>();
pInfos.add(new PartitionInfo("foo", 0, node, new Node[] { node }, new Node[] { node }));
pInfos.add(new PartitionInfo("foo", 1, node, new Node[] { node }, new Node[] { node }));
final Cluster cluster = new Cluster("mockClusterId", nodes, pInfos, Collections.emptySet(), Collections.emptySet(), node);
final TopicPartition tp0 = new TopicPartition("foo", 0);
final TopicPartition tp1 = new TopicPartition("foo", 1);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(cluster, AdminClientConfig.RETRIES_CONFIG, "2")) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create(ApiKeys.LIST_OFFSETS.id, (short) 0, (short) 6));
env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE));
// listoffsets response from broker 0
env.kafkaClient().prepareUnsupportedVersionResponse(request -> request instanceof ListOffsetsRequest);
ListOffsetsTopicResponse topicResponse = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp1, Errors.NONE, -1L, 345L, 543);
ListOffsetsResponseData responseData = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(Arrays.asList(topicResponse));
env.kafkaClient().prepareResponseFrom(// ensure that no max timestamp requests are retried
request -> request instanceof ListOffsetsRequest && ((ListOffsetsRequest) request).topics().stream().flatMap(t -> t.partitions().stream()).noneMatch(p -> p.timestamp() == ListOffsetsRequest.MAX_TIMESTAMP), new ListOffsetsResponse(responseData), node);
ListOffsetsResult result = env.adminClient().listOffsets(new HashMap<TopicPartition, OffsetSpec>() {
{
put(tp0, OffsetSpec.maxTimestamp());
put(tp1, OffsetSpec.latest());
}
});
TestUtils.assertFutureThrows(result.partitionResult(tp0), UnsupportedVersionException.class);
ListOffsetsResultInfo tp1Offset = result.partitionResult(tp1).get();
assertEquals(345L, tp1Offset.offset());
assertEquals(543, tp1Offset.leaderEpoch().get().intValue());
assertEquals(-1L, tp1Offset.timestamp());
}
}
use of org.apache.kafka.common.requests.ListOffsetsResponse in project kafka by apache.
the class KafkaAdminClientTest method testListOffsetsMetadataRetriableErrors.
@Test
public void testListOffsetsMetadataRetriableErrors() throws Exception {
Node node0 = new Node(0, "localhost", 8120);
Node node1 = new Node(1, "localhost", 8121);
List<Node> nodes = Arrays.asList(node0, node1);
List<PartitionInfo> pInfos = new ArrayList<>();
pInfos.add(new PartitionInfo("foo", 0, node0, new Node[] { node0 }, new Node[] { node0 }));
pInfos.add(new PartitionInfo("foo", 1, node1, new Node[] { node1 }, new Node[] { node1 }));
final Cluster cluster = new Cluster("mockClusterId", nodes, pInfos, Collections.<String>emptySet(), Collections.<String>emptySet(), node0);
final TopicPartition tp0 = new TopicPartition("foo", 0);
final TopicPartition tp1 = new TopicPartition("foo", 1);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(cluster)) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.LEADER_NOT_AVAILABLE));
env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.UNKNOWN_TOPIC_OR_PARTITION));
env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE));
// listoffsets response from broker 0
ListOffsetsTopicResponse t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.NONE, -1L, 345L, 543);
ListOffsetsResponseData responseData = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(Arrays.asList(t0));
env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node0);
// listoffsets response from broker 1
ListOffsetsTopicResponse t1 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp1, Errors.NONE, -1L, 789L, 987);
responseData = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(Arrays.asList(t1));
env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node1);
Map<TopicPartition, OffsetSpec> partitions = new HashMap<>();
partitions.put(tp0, OffsetSpec.latest());
partitions.put(tp1, OffsetSpec.latest());
ListOffsetsResult result = env.adminClient().listOffsets(partitions);
Map<TopicPartition, ListOffsetsResultInfo> offsets = result.all().get();
assertFalse(offsets.isEmpty());
assertEquals(345L, offsets.get(tp0).offset());
assertEquals(543, offsets.get(tp0).leaderEpoch().get().intValue());
assertEquals(-1L, offsets.get(tp0).timestamp());
assertEquals(789L, offsets.get(tp1).offset());
assertEquals(987, offsets.get(tp1).leaderEpoch().get().intValue());
assertEquals(-1L, offsets.get(tp1).timestamp());
}
}
Aggregations