use of org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo in project kafka by apache.
the class KafkaAdminClientTest method testListOffsetsRetriableErrorOnMetadata.
@Test
public void testListOffsetsRetriableErrorOnMetadata() throws Exception {
Node node = new Node(0, "localhost", 8120);
List<Node> nodes = Collections.singletonList(node);
final Cluster cluster = new Cluster("mockClusterId", nodes, Collections.singleton(new PartitionInfo("foo", 0, node, new Node[] { node }, new Node[] { node })), Collections.emptySet(), Collections.emptySet(), node);
final TopicPartition tp0 = new TopicPartition("foo", 0);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(cluster)) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.UNKNOWN_TOPIC_OR_PARTITION, Errors.NONE));
// metadata refresh because of UNKNOWN_TOPIC_OR_PARTITION
env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE));
// listoffsets response from broker 0
ListOffsetsResponseData responseData = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(Collections.singletonList(ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.NONE, -1L, 123L, 321)));
env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node);
ListOffsetsResult result = env.adminClient().listOffsets(Collections.singletonMap(tp0, OffsetSpec.latest()));
Map<TopicPartition, ListOffsetsResultInfo> offsets = result.all().get(3, TimeUnit.SECONDS);
assertEquals(1, offsets.size());
assertEquals(123L, offsets.get(tp0).offset());
assertEquals(321, offsets.get(tp0).leaderEpoch().get().intValue());
assertEquals(-1L, offsets.get(tp0).timestamp());
}
}
use of org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo in project kafka by apache.
the class KafkaAdminClientTest method testListOffsetsWithMultiplePartitionsLeaderChange.
@Test
public void testListOffsetsWithMultiplePartitionsLeaderChange() throws Exception {
Node node0 = new Node(0, "localhost", 8120);
Node node1 = new Node(1, "localhost", 8121);
Node node2 = new Node(2, "localhost", 8122);
List<Node> nodes = Arrays.asList(node0, node1, node2);
final PartitionInfo oldPInfo1 = new PartitionInfo("foo", 0, node0, new Node[] { node0, node1, node2 }, new Node[] { node0, node1, node2 });
final PartitionInfo oldPnfo2 = new PartitionInfo("foo", 1, node0, new Node[] { node0, node1, node2 }, new Node[] { node0, node1, node2 });
List<PartitionInfo> oldPInfos = Arrays.asList(oldPInfo1, oldPnfo2);
final Cluster oldCluster = new Cluster("mockClusterId", nodes, oldPInfos, Collections.emptySet(), Collections.emptySet(), node0);
final TopicPartition tp0 = new TopicPartition("foo", 0);
final TopicPartition tp1 = new TopicPartition("foo", 1);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(oldCluster)) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponse(prepareMetadataResponse(oldCluster, Errors.NONE));
ListOffsetsTopicResponse t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.NOT_LEADER_OR_FOLLOWER, -1L, 345L, 543);
ListOffsetsTopicResponse t1 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp1, Errors.LEADER_NOT_AVAILABLE, -2L, 123L, 456);
ListOffsetsResponseData responseData = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(Arrays.asList(t0, t1));
env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node0);
final PartitionInfo newPInfo1 = new PartitionInfo("foo", 0, node1, new Node[] { node0, node1, node2 }, new Node[] { node0, node1, node2 });
final PartitionInfo newPInfo2 = new PartitionInfo("foo", 1, node2, new Node[] { node0, node1, node2 }, new Node[] { node0, node1, node2 });
List<PartitionInfo> newPInfos = Arrays.asList(newPInfo1, newPInfo2);
final Cluster newCluster = new Cluster("mockClusterId", nodes, newPInfos, Collections.emptySet(), Collections.emptySet(), node0);
env.kafkaClient().prepareResponse(prepareMetadataResponse(newCluster, Errors.NONE));
t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.NONE, -1L, 345L, 543);
responseData = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(Arrays.asList(t0));
env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node1);
t1 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp1, Errors.NONE, -2L, 123L, 456);
responseData = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(Arrays.asList(t1));
env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node2);
Map<TopicPartition, OffsetSpec> partitions = new HashMap<>();
partitions.put(tp0, OffsetSpec.latest());
partitions.put(tp1, OffsetSpec.latest());
ListOffsetsResult result = env.adminClient().listOffsets(partitions);
Map<TopicPartition, ListOffsetsResultInfo> offsets = result.all().get();
assertFalse(offsets.isEmpty());
assertEquals(345L, offsets.get(tp0).offset());
assertEquals(543, offsets.get(tp0).leaderEpoch().get().intValue());
assertEquals(-1L, offsets.get(tp0).timestamp());
assertEquals(123L, offsets.get(tp1).offset());
assertEquals(456, offsets.get(tp1).leaderEpoch().get().intValue());
assertEquals(-2L, offsets.get(tp1).timestamp());
}
}
use of org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo in project kafka by apache.
the class AssignmentTestUtils method createMockAdminClientForAssignor.
// If you don't care about setting the end offsets for each specific topic partition, the helper method
// getTopicPartitionOffsetMap is useful for building this input map for all partitions
public static AdminClient createMockAdminClientForAssignor(final Map<TopicPartition, Long> changelogEndOffsets) {
final AdminClient adminClient = EasyMock.createMock(AdminClient.class);
final ListOffsetsResult result = EasyMock.createNiceMock(ListOffsetsResult.class);
final KafkaFutureImpl<Map<TopicPartition, ListOffsetsResultInfo>> allFuture = new KafkaFutureImpl<>();
allFuture.complete(changelogEndOffsets.entrySet().stream().collect(Collectors.toMap(Entry::getKey, t -> {
final ListOffsetsResultInfo info = EasyMock.createNiceMock(ListOffsetsResultInfo.class);
expect(info.offset()).andStubReturn(t.getValue());
EasyMock.replay(info);
return info;
})));
expect(adminClient.listOffsets(anyObject())).andStubReturn(result);
expect(result.all()).andStubReturn(allFuture);
EasyMock.replay(result);
return adminClient;
}
use of org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo in project kafka by apache.
the class StreamsPartitionAssignor method populateClientStatesMap.
/**
* Builds a map from client to state, and readies each ClientState for assignment by adding any missing prev tasks
* and computing the per-task overall lag based on the fetched end offsets for each changelog.
*
* @param clientStates a map from each client to its state, including offset lags. Populated by this method.
* @param clientMetadataMap a map from each client to its full metadata
* @param taskForPartition map from topic partition to its corresponding task
* @param changelogTopics object that manages changelog topics
*
* @return whether we were able to successfully fetch the changelog end offsets and compute each client's lag
*/
private boolean populateClientStatesMap(final Map<UUID, ClientState> clientStates, final Map<UUID, ClientMetadata> clientMetadataMap, final Map<TopicPartition, TaskId> taskForPartition, final ChangelogTopics changelogTopics) {
boolean fetchEndOffsetsSuccessful;
Map<TaskId, Long> allTaskEndOffsetSums;
try {
// Make the listOffsets request first so it can fetch the offsets for non-source changelogs
// asynchronously while we use the blocking Consumer#committed call to fetch source-changelog offsets
final KafkaFuture<Map<TopicPartition, ListOffsetsResultInfo>> endOffsetsFuture = fetchEndOffsetsFuture(changelogTopics.preExistingNonSourceTopicBasedPartitions(), adminClient);
final Map<TopicPartition, Long> sourceChangelogEndOffsets = fetchCommittedOffsets(changelogTopics.preExistingSourceTopicBasedPartitions(), mainConsumerSupplier.get());
final Map<TopicPartition, ListOffsetsResultInfo> endOffsets = ClientUtils.getEndOffsets(endOffsetsFuture);
allTaskEndOffsetSums = computeEndOffsetSumsByTask(endOffsets, sourceChangelogEndOffsets, changelogTopics);
fetchEndOffsetsSuccessful = true;
} catch (final StreamsException | TimeoutException e) {
allTaskEndOffsetSums = changelogTopics.statefulTaskIds().stream().collect(Collectors.toMap(t -> t, t -> UNKNOWN_OFFSET_SUM));
fetchEndOffsetsSuccessful = false;
}
for (final Map.Entry<UUID, ClientMetadata> entry : clientMetadataMap.entrySet()) {
final UUID uuid = entry.getKey();
final ClientState state = entry.getValue().state;
state.initializePrevTasks(taskForPartition, taskManager.topologyMetadata().hasNamedTopologies());
state.computeTaskLags(uuid, allTaskEndOffsetSums);
clientStates.put(uuid, state);
}
return fetchEndOffsetsSuccessful;
}
use of org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo in project kafka by apache.
the class KafkaAdminClientTest method testListOffsetsWithLeaderChange.
@Test
public void testListOffsetsWithLeaderChange() throws Exception {
Node node0 = new Node(0, "localhost", 8120);
Node node1 = new Node(1, "localhost", 8121);
Node node2 = new Node(2, "localhost", 8122);
List<Node> nodes = Arrays.asList(node0, node1, node2);
final PartitionInfo oldPartitionInfo = new PartitionInfo("foo", 0, node0, new Node[] { node0, node1, node2 }, new Node[] { node0, node1, node2 });
final Cluster oldCluster = new Cluster("mockClusterId", nodes, singletonList(oldPartitionInfo), Collections.emptySet(), Collections.emptySet(), node0);
final TopicPartition tp0 = new TopicPartition("foo", 0);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(oldCluster)) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponse(prepareMetadataResponse(oldCluster, Errors.NONE));
ListOffsetsTopicResponse t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.NOT_LEADER_OR_FOLLOWER, -1L, 345L, 543);
ListOffsetsResponseData responseData = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(Arrays.asList(t0));
env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node0);
// updating leader from node0 to node1 and metadata refresh because of NOT_LEADER_OR_FOLLOWER
final PartitionInfo newPartitionInfo = new PartitionInfo("foo", 0, node1, new Node[] { node0, node1, node2 }, new Node[] { node0, node1, node2 });
final Cluster newCluster = new Cluster("mockClusterId", nodes, singletonList(newPartitionInfo), Collections.emptySet(), Collections.emptySet(), node0);
env.kafkaClient().prepareResponse(prepareMetadataResponse(newCluster, Errors.NONE));
t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.NONE, -2L, 123L, 456);
responseData = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(Arrays.asList(t0));
env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node1);
Map<TopicPartition, OffsetSpec> partitions = new HashMap<>();
partitions.put(tp0, OffsetSpec.latest());
ListOffsetsResult result = env.adminClient().listOffsets(partitions);
Map<TopicPartition, ListOffsetsResultInfo> offsets = result.all().get();
assertFalse(offsets.isEmpty());
assertEquals(123L, offsets.get(tp0).offset());
assertEquals(456, offsets.get(tp0).leaderEpoch().get().intValue());
assertEquals(-2L, offsets.get(tp0).timestamp());
}
}
Aggregations