use of org.apache.kafka.common.requests.OffsetsForLeaderEpochRequest in project kafka by apache.
the class OffsetsForLeaderEpochClient method prepareRequest.
@Override
protected AbstractRequest.Builder<OffsetsForLeaderEpochRequest> prepareRequest(Node node, Map<TopicPartition, SubscriptionState.FetchPosition> requestData) {
OffsetForLeaderTopicCollection topics = new OffsetForLeaderTopicCollection(requestData.size());
requestData.forEach((topicPartition, fetchPosition) -> fetchPosition.offsetEpoch.ifPresent(fetchEpoch -> {
OffsetForLeaderTopic topic = topics.find(topicPartition.topic());
if (topic == null) {
topic = new OffsetForLeaderTopic().setTopic(topicPartition.topic());
topics.add(topic);
}
topic.partitions().add(new OffsetForLeaderPartition().setPartition(topicPartition.partition()).setLeaderEpoch(fetchEpoch).setCurrentLeaderEpoch(fetchPosition.currentLeader.epoch.orElse(RecordBatch.NO_PARTITION_LEADER_EPOCH)));
}));
return OffsetsForLeaderEpochRequest.Builder.forConsumer(topics);
}
use of org.apache.kafka.common.requests.OffsetsForLeaderEpochRequest in project kafka by apache.
the class FetcherTest method testOffsetValidationRequestGrouping.
@Test
public void testOffsetValidationRequestGrouping() {
buildFetcher();
assignFromUser(mkSet(tp0, tp1, tp2, tp3));
metadata.updateWithCurrentRequestVersion(RequestTestUtils.metadataUpdateWithIds("dummy", 3, Collections.emptyMap(), singletonMap(topicName, 4), tp -> 5, topicIds), false, 0L);
for (TopicPartition tp : subscriptions.assignedPartitions()) {
Metadata.LeaderAndEpoch leaderAndEpoch = new Metadata.LeaderAndEpoch(metadata.currentLeader(tp).leader, Optional.of(4));
subscriptions.seekUnvalidated(tp, new SubscriptionState.FetchPosition(0, Optional.of(4), leaderAndEpoch));
}
Set<TopicPartition> allRequestedPartitions = new HashSet<>();
for (Node node : metadata.fetch().nodes()) {
apiVersions.update(node.idString(), NodeApiVersions.create());
Set<TopicPartition> expectedPartitions = subscriptions.assignedPartitions().stream().filter(tp -> metadata.currentLeader(tp).leader.equals(Optional.of(node))).collect(Collectors.toSet());
assertTrue(expectedPartitions.stream().noneMatch(allRequestedPartitions::contains));
assertTrue(expectedPartitions.size() > 0);
allRequestedPartitions.addAll(expectedPartitions);
OffsetForLeaderEpochResponseData data = new OffsetForLeaderEpochResponseData();
expectedPartitions.forEach(tp -> {
OffsetForLeaderTopicResult topic = data.topics().find(tp.topic());
if (topic == null) {
topic = new OffsetForLeaderTopicResult().setTopic(tp.topic());
data.topics().add(topic);
}
topic.partitions().add(new EpochEndOffset().setPartition(tp.partition()).setErrorCode(Errors.NONE.code()).setLeaderEpoch(4).setEndOffset(0));
});
OffsetsForLeaderEpochResponse response = new OffsetsForLeaderEpochResponse(data);
client.prepareResponseFrom(body -> {
OffsetsForLeaderEpochRequest request = (OffsetsForLeaderEpochRequest) body;
return expectedPartitions.equals(offsetForLeaderPartitionMap(request.data()).keySet());
}, response, node);
}
assertEquals(subscriptions.assignedPartitions(), allRequestedPartitions);
fetcher.validateOffsetsIfNeeded();
consumerClient.pollNoWakeup();
assertTrue(subscriptions.assignedPartitions().stream().noneMatch(subscriptions::awaitingValidation));
}
Aggregations