use of org.apache.kafka.common.requests.ListOffsetsResponse in project kafka by apache.
the class FetcherTest method listOffsetResponse.
private ListOffsetsResponse listOffsetResponse(Map<TopicPartition, Long> offsets, Errors error, long timestamp, int leaderEpoch) {
Map<String, List<ListOffsetsPartitionResponse>> responses = new HashMap<>();
for (Map.Entry<TopicPartition, Long> entry : offsets.entrySet()) {
TopicPartition tp = entry.getKey();
responses.putIfAbsent(tp.topic(), new ArrayList<>());
responses.get(tp.topic()).add(new ListOffsetsPartitionResponse().setPartitionIndex(tp.partition()).setErrorCode(error.code()).setOffset(entry.getValue()).setTimestamp(timestamp).setLeaderEpoch(leaderEpoch));
}
List<ListOffsetsTopicResponse> topics = new ArrayList<>();
for (Map.Entry<String, List<ListOffsetsPartitionResponse>> response : responses.entrySet()) {
topics.add(new ListOffsetsTopicResponse().setName(response.getKey()).setPartitions(response.getValue()));
}
ListOffsetsResponseData data = new ListOffsetsResponseData().setTopics(topics);
return new ListOffsetsResponse(data);
}
use of org.apache.kafka.common.requests.ListOffsetsResponse in project kafka by apache.
the class FetcherTest method testGetOffsetsForTimesWithUnknownOffset.
private void testGetOffsetsForTimesWithUnknownOffset() {
client.reset();
// Ensure metadata has both partitions.
MetadataResponse initialMetadataUpdate = RequestTestUtils.metadataUpdateWithIds(1, singletonMap(topicName, 1), topicIds);
client.updateMetadata(initialMetadataUpdate);
ListOffsetsResponseData data = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(Collections.singletonList(new ListOffsetsTopicResponse().setName(tp0.topic()).setPartitions(Collections.singletonList(new ListOffsetsPartitionResponse().setPartitionIndex(tp0.partition()).setErrorCode(Errors.NONE.code()).setTimestamp(ListOffsetsResponse.UNKNOWN_TIMESTAMP).setOffset(ListOffsetsResponse.UNKNOWN_OFFSET)))));
client.prepareResponseFrom(new ListOffsetsResponse(data), metadata.fetch().leaderFor(tp0));
Map<TopicPartition, Long> timestampToSearch = new HashMap<>();
timestampToSearch.put(tp0, 0L);
Map<TopicPartition, OffsetAndTimestamp> offsetAndTimestampMap = fetcher.offsetsForTimes(timestampToSearch, time.timer(Long.MAX_VALUE));
assertTrue(offsetAndTimestampMap.containsKey(tp0));
assertNull(offsetAndTimestampMap.get(tp0));
}
use of org.apache.kafka.common.requests.ListOffsetsResponse in project kafka by apache.
the class KafkaAdminClientTest method testListOffsetsWithLeaderChange.
@Test
public void testListOffsetsWithLeaderChange() throws Exception {
Node node0 = new Node(0, "localhost", 8120);
Node node1 = new Node(1, "localhost", 8121);
Node node2 = new Node(2, "localhost", 8122);
List<Node> nodes = Arrays.asList(node0, node1, node2);
final PartitionInfo oldPartitionInfo = new PartitionInfo("foo", 0, node0, new Node[] { node0, node1, node2 }, new Node[] { node0, node1, node2 });
final Cluster oldCluster = new Cluster("mockClusterId", nodes, singletonList(oldPartitionInfo), Collections.emptySet(), Collections.emptySet(), node0);
final TopicPartition tp0 = new TopicPartition("foo", 0);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(oldCluster)) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponse(prepareMetadataResponse(oldCluster, Errors.NONE));
ListOffsetsTopicResponse t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.NOT_LEADER_OR_FOLLOWER, -1L, 345L, 543);
ListOffsetsResponseData responseData = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(Arrays.asList(t0));
env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node0);
// updating leader from node0 to node1 and metadata refresh because of NOT_LEADER_OR_FOLLOWER
final PartitionInfo newPartitionInfo = new PartitionInfo("foo", 0, node1, new Node[] { node0, node1, node2 }, new Node[] { node0, node1, node2 });
final Cluster newCluster = new Cluster("mockClusterId", nodes, singletonList(newPartitionInfo), Collections.emptySet(), Collections.emptySet(), node0);
env.kafkaClient().prepareResponse(prepareMetadataResponse(newCluster, Errors.NONE));
t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.NONE, -2L, 123L, 456);
responseData = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(Arrays.asList(t0));
env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node1);
Map<TopicPartition, OffsetSpec> partitions = new HashMap<>();
partitions.put(tp0, OffsetSpec.latest());
ListOffsetsResult result = env.adminClient().listOffsets(partitions);
Map<TopicPartition, ListOffsetsResultInfo> offsets = result.all().get();
assertFalse(offsets.isEmpty());
assertEquals(123L, offsets.get(tp0).offset());
assertEquals(456, offsets.get(tp0).leaderEpoch().get().intValue());
assertEquals(-2L, offsets.get(tp0).timestamp());
}
}
use of org.apache.kafka.common.requests.ListOffsetsResponse in project kafka by apache.
the class KafkaAdminClientTest method testListOffsetsNonRetriableErrors.
@Test
public void testListOffsetsNonRetriableErrors() throws Exception {
Node node0 = new Node(0, "localhost", 8120);
Node node1 = new Node(1, "localhost", 8121);
List<Node> nodes = Arrays.asList(node0, node1);
List<PartitionInfo> pInfos = new ArrayList<>();
pInfos.add(new PartitionInfo("foo", 0, node0, new Node[] { node0, node1 }, new Node[] { node0, node1 }));
final Cluster cluster = new Cluster("mockClusterId", nodes, pInfos, Collections.<String>emptySet(), Collections.<String>emptySet(), node0);
final TopicPartition tp0 = new TopicPartition("foo", 0);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(cluster)) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE));
ListOffsetsTopicResponse t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.TOPIC_AUTHORIZATION_FAILED, -1L, -1L, -1);
ListOffsetsResponseData responseData = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(Arrays.asList(t0));
env.kafkaClient().prepareResponse(new ListOffsetsResponse(responseData));
Map<TopicPartition, OffsetSpec> partitions = new HashMap<>();
partitions.put(tp0, OffsetSpec.latest());
ListOffsetsResult result = env.adminClient().listOffsets(partitions);
TestUtils.assertFutureError(result.all(), TopicAuthorizationException.class);
}
}
use of org.apache.kafka.common.requests.ListOffsetsResponse in project kafka by apache.
the class KafkaAdminClientTest method testListOffsetsRetriableErrors.
@Test
public void testListOffsetsRetriableErrors() throws Exception {
Node node0 = new Node(0, "localhost", 8120);
Node node1 = new Node(1, "localhost", 8121);
List<Node> nodes = Arrays.asList(node0, node1);
List<PartitionInfo> pInfos = new ArrayList<>();
pInfos.add(new PartitionInfo("foo", 0, node0, new Node[] { node0, node1 }, new Node[] { node0, node1 }));
pInfos.add(new PartitionInfo("foo", 1, node0, new Node[] { node0, node1 }, new Node[] { node0, node1 }));
pInfos.add(new PartitionInfo("bar", 0, node1, new Node[] { node1, node0 }, new Node[] { node1, node0 }));
final Cluster cluster = new Cluster("mockClusterId", nodes, pInfos, Collections.<String>emptySet(), Collections.<String>emptySet(), node0);
final TopicPartition tp0 = new TopicPartition("foo", 0);
final TopicPartition tp1 = new TopicPartition("foo", 1);
final TopicPartition tp2 = new TopicPartition("bar", 0);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(cluster)) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE));
// listoffsets response from broker 0
ListOffsetsTopicResponse t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.LEADER_NOT_AVAILABLE, -1L, 123L, 321);
ListOffsetsTopicResponse t1 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp1, Errors.NONE, -1L, 987L, 789);
ListOffsetsResponseData responseData = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(Arrays.asList(t0, t1));
env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node0);
// listoffsets response from broker 1
ListOffsetsTopicResponse t2 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp2, Errors.NONE, -1L, 456L, 654);
responseData = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(Arrays.asList(t2));
env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node1);
// metadata refresh because of LEADER_NOT_AVAILABLE
env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE));
// listoffsets response from broker 0
t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.NONE, -1L, 345L, 543);
responseData = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(Arrays.asList(t0));
env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node0);
Map<TopicPartition, OffsetSpec> partitions = new HashMap<>();
partitions.put(tp0, OffsetSpec.latest());
partitions.put(tp1, OffsetSpec.latest());
partitions.put(tp2, OffsetSpec.latest());
ListOffsetsResult result = env.adminClient().listOffsets(partitions);
Map<TopicPartition, ListOffsetsResultInfo> offsets = result.all().get();
assertFalse(offsets.isEmpty());
assertEquals(345L, offsets.get(tp0).offset());
assertEquals(543, offsets.get(tp0).leaderEpoch().get().intValue());
assertEquals(-1L, offsets.get(tp0).timestamp());
assertEquals(987L, offsets.get(tp1).offset());
assertEquals(789, offsets.get(tp1).leaderEpoch().get().intValue());
assertEquals(-1L, offsets.get(tp1).timestamp());
assertEquals(456L, offsets.get(tp2).offset());
assertEquals(654, offsets.get(tp2).leaderEpoch().get().intValue());
assertEquals(-1L, offsets.get(tp2).timestamp());
}
}
Aggregations