use of org.apache.kafka.common.message.OffsetForLeaderEpochResponseData in project kafka by apache.
the class FetcherTest method prepareOffsetsForLeaderEpochResponse.
private OffsetsForLeaderEpochResponse prepareOffsetsForLeaderEpochResponse(TopicPartition topicPartition, Errors error, int leaderEpoch, long endOffset) {
OffsetForLeaderEpochResponseData data = new OffsetForLeaderEpochResponseData();
data.topics().add(new OffsetForLeaderTopicResult().setTopic(topicPartition.topic()).setPartitions(Collections.singletonList(new EpochEndOffset().setPartition(topicPartition.partition()).setErrorCode(error.code()).setLeaderEpoch(leaderEpoch).setEndOffset(endOffset))));
return new OffsetsForLeaderEpochResponse(data);
}
use of org.apache.kafka.common.message.OffsetForLeaderEpochResponseData in project kafka by apache.
the class FetcherTest method testOffsetValidationRequestGrouping.
@Test
public void testOffsetValidationRequestGrouping() {
buildFetcher();
assignFromUser(mkSet(tp0, tp1, tp2, tp3));
metadata.updateWithCurrentRequestVersion(RequestTestUtils.metadataUpdateWithIds("dummy", 3, Collections.emptyMap(), singletonMap(topicName, 4), tp -> 5, topicIds), false, 0L);
for (TopicPartition tp : subscriptions.assignedPartitions()) {
Metadata.LeaderAndEpoch leaderAndEpoch = new Metadata.LeaderAndEpoch(metadata.currentLeader(tp).leader, Optional.of(4));
subscriptions.seekUnvalidated(tp, new SubscriptionState.FetchPosition(0, Optional.of(4), leaderAndEpoch));
}
Set<TopicPartition> allRequestedPartitions = new HashSet<>();
for (Node node : metadata.fetch().nodes()) {
apiVersions.update(node.idString(), NodeApiVersions.create());
Set<TopicPartition> expectedPartitions = subscriptions.assignedPartitions().stream().filter(tp -> metadata.currentLeader(tp).leader.equals(Optional.of(node))).collect(Collectors.toSet());
assertTrue(expectedPartitions.stream().noneMatch(allRequestedPartitions::contains));
assertTrue(expectedPartitions.size() > 0);
allRequestedPartitions.addAll(expectedPartitions);
OffsetForLeaderEpochResponseData data = new OffsetForLeaderEpochResponseData();
expectedPartitions.forEach(tp -> {
OffsetForLeaderTopicResult topic = data.topics().find(tp.topic());
if (topic == null) {
topic = new OffsetForLeaderTopicResult().setTopic(tp.topic());
data.topics().add(topic);
}
topic.partitions().add(new EpochEndOffset().setPartition(tp.partition()).setErrorCode(Errors.NONE.code()).setLeaderEpoch(4).setEndOffset(0));
});
OffsetsForLeaderEpochResponse response = new OffsetsForLeaderEpochResponse(data);
client.prepareResponseFrom(body -> {
OffsetsForLeaderEpochRequest request = (OffsetsForLeaderEpochRequest) body;
return expectedPartitions.equals(offsetForLeaderPartitionMap(request.data()).keySet());
}, response, node);
}
assertEquals(subscriptions.assignedPartitions(), allRequestedPartitions);
fetcher.validateOffsetsIfNeeded();
consumerClient.pollNoWakeup();
assertTrue(subscriptions.assignedPartitions().stream().noneMatch(subscriptions::awaitingValidation));
}
Aggregations