use of org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse in project kafka by apache.
the class FetcherTest method testGetOffsetsForTimesWithUnknownOffsetV0.
@Test
public void testGetOffsetsForTimesWithUnknownOffsetV0() {
buildFetcher();
// Empty map
assertTrue(fetcher.offsetsForTimes(new HashMap<>(), time.timer(100L)).isEmpty());
// Unknown Offset
client.reset();
// Ensure metadata has both partition.
MetadataResponse initialMetadataUpdate = RequestTestUtils.metadataUpdateWithIds(1, singletonMap(topicName, 1), topicIds);
client.updateMetadata(initialMetadataUpdate);
// Force LIST_OFFSETS version 0
Node node = metadata.fetch().nodes().get(0);
apiVersions.update(node.idString(), NodeApiVersions.create(ApiKeys.LIST_OFFSETS.id, (short) 0, (short) 0));
ListOffsetsResponseData data = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(Collections.singletonList(new ListOffsetsTopicResponse().setName(tp0.topic()).setPartitions(Collections.singletonList(new ListOffsetsPartitionResponse().setPartitionIndex(tp0.partition()).setErrorCode(Errors.NONE.code()).setTimestamp(ListOffsetsResponse.UNKNOWN_TIMESTAMP).setOldStyleOffsets(Collections.emptyList())))));
client.prepareResponseFrom(new ListOffsetsResponse(data), metadata.fetch().leaderFor(tp0));
Map<TopicPartition, Long> timestampToSearch = new HashMap<>();
timestampToSearch.put(tp0, 0L);
Map<TopicPartition, OffsetAndTimestamp> offsetAndTimestampMap = fetcher.offsetsForTimes(timestampToSearch, time.timer(Long.MAX_VALUE));
assertTrue(offsetAndTimestampMap.containsKey(tp0));
assertNull(offsetAndTimestampMap.get(tp0));
}
use of org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse in project kafka by apache.
the class FetcherTest method testBatchedListOffsetsMetadataErrors.
@Test
public void testBatchedListOffsetsMetadataErrors() {
buildFetcher();
ListOffsetsResponseData data = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(Collections.singletonList(new ListOffsetsTopicResponse().setName(tp0.topic()).setPartitions(Arrays.asList(new ListOffsetsPartitionResponse().setPartitionIndex(tp0.partition()).setErrorCode(Errors.NOT_LEADER_OR_FOLLOWER.code()).setTimestamp(ListOffsetsResponse.UNKNOWN_TIMESTAMP).setOffset(ListOffsetsResponse.UNKNOWN_OFFSET), new ListOffsetsPartitionResponse().setPartitionIndex(tp1.partition()).setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code()).setTimestamp(ListOffsetsResponse.UNKNOWN_TIMESTAMP).setOffset(ListOffsetsResponse.UNKNOWN_OFFSET)))));
client.prepareResponse(new ListOffsetsResponse(data));
Map<TopicPartition, Long> offsetsToSearch = new HashMap<>();
offsetsToSearch.put(tp0, ListOffsetsRequest.EARLIEST_TIMESTAMP);
offsetsToSearch.put(tp1, ListOffsetsRequest.EARLIEST_TIMESTAMP);
assertThrows(TimeoutException.class, () -> fetcher.offsetsForTimes(offsetsToSearch, time.timer(1)));
}
use of org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse in project kafka by apache.
the class FetcherTest method listOffsetResponse.
private ListOffsetsResponse listOffsetResponse(Map<TopicPartition, Long> offsets, Errors error, long timestamp, int leaderEpoch) {
Map<String, List<ListOffsetsPartitionResponse>> responses = new HashMap<>();
for (Map.Entry<TopicPartition, Long> entry : offsets.entrySet()) {
TopicPartition tp = entry.getKey();
responses.putIfAbsent(tp.topic(), new ArrayList<>());
responses.get(tp.topic()).add(new ListOffsetsPartitionResponse().setPartitionIndex(tp.partition()).setErrorCode(error.code()).setOffset(entry.getValue()).setTimestamp(timestamp).setLeaderEpoch(leaderEpoch));
}
List<ListOffsetsTopicResponse> topics = new ArrayList<>();
for (Map.Entry<String, List<ListOffsetsPartitionResponse>> response : responses.entrySet()) {
topics.add(new ListOffsetsTopicResponse().setName(response.getKey()).setPartitions(response.getValue()));
}
ListOffsetsResponseData data = new ListOffsetsResponseData().setTopics(topics);
return new ListOffsetsResponse(data);
}
use of org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse in project kafka by apache.
the class FetcherTest method testGetOffsetsForTimesWithUnknownOffset.
private void testGetOffsetsForTimesWithUnknownOffset() {
client.reset();
// Ensure metadata has both partitions.
MetadataResponse initialMetadataUpdate = RequestTestUtils.metadataUpdateWithIds(1, singletonMap(topicName, 1), topicIds);
client.updateMetadata(initialMetadataUpdate);
ListOffsetsResponseData data = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(Collections.singletonList(new ListOffsetsTopicResponse().setName(tp0.topic()).setPartitions(Collections.singletonList(new ListOffsetsPartitionResponse().setPartitionIndex(tp0.partition()).setErrorCode(Errors.NONE.code()).setTimestamp(ListOffsetsResponse.UNKNOWN_TIMESTAMP).setOffset(ListOffsetsResponse.UNKNOWN_OFFSET)))));
client.prepareResponseFrom(new ListOffsetsResponse(data), metadata.fetch().leaderFor(tp0));
Map<TopicPartition, Long> timestampToSearch = new HashMap<>();
timestampToSearch.put(tp0, 0L);
Map<TopicPartition, OffsetAndTimestamp> offsetAndTimestampMap = fetcher.offsetsForTimes(timestampToSearch, time.timer(Long.MAX_VALUE));
assertTrue(offsetAndTimestampMap.containsKey(tp0));
assertNull(offsetAndTimestampMap.get(tp0));
}
use of org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse in project kafka by apache.
the class MessageTest method testListOffsetsResponseVersions.
@Test
public void testListOffsetsResponseVersions() throws Exception {
ListOffsetsPartitionResponse partition = new ListOffsetsPartitionResponse().setErrorCode(Errors.NONE.code()).setPartitionIndex(0).setOldStyleOffsets(Collections.singletonList(321L));
List<ListOffsetsTopicResponse> topics = Collections.singletonList(new ListOffsetsTopicResponse().setName("topic").setPartitions(Collections.singletonList(partition)));
Supplier<ListOffsetsResponseData> response = () -> new ListOffsetsResponseData().setTopics(topics);
for (short version : ApiKeys.LIST_OFFSETS.allVersions()) {
ListOffsetsResponseData responseData = response.get();
if (version > 0) {
responseData.topics().get(0).partitions().get(0).setOldStyleOffsets(Collections.emptyList()).setOffset(456L).setTimestamp(123L);
}
if (version > 1) {
responseData.setThrottleTimeMs(1000);
}
if (version > 3) {
partition.setLeaderEpoch(1);
}
testEquivalentMessageRoundTrip(version, responseData);
}
}
Aggregations