use of org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo in project kafka by apache.
the class KafkaAdminClientTest method testListOffsetsRetriableErrors.
@Test
public void testListOffsetsRetriableErrors() throws Exception {
Node node0 = new Node(0, "localhost", 8120);
Node node1 = new Node(1, "localhost", 8121);
List<Node> nodes = Arrays.asList(node0, node1);
List<PartitionInfo> pInfos = new ArrayList<>();
pInfos.add(new PartitionInfo("foo", 0, node0, new Node[] { node0, node1 }, new Node[] { node0, node1 }));
pInfos.add(new PartitionInfo("foo", 1, node0, new Node[] { node0, node1 }, new Node[] { node0, node1 }));
pInfos.add(new PartitionInfo("bar", 0, node1, new Node[] { node1, node0 }, new Node[] { node1, node0 }));
final Cluster cluster = new Cluster("mockClusterId", nodes, pInfos, Collections.<String>emptySet(), Collections.<String>emptySet(), node0);
final TopicPartition tp0 = new TopicPartition("foo", 0);
final TopicPartition tp1 = new TopicPartition("foo", 1);
final TopicPartition tp2 = new TopicPartition("bar", 0);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(cluster)) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE));
// listoffsets response from broker 0
ListOffsetsTopicResponse t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.LEADER_NOT_AVAILABLE, -1L, 123L, 321);
ListOffsetsTopicResponse t1 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp1, Errors.NONE, -1L, 987L, 789);
ListOffsetsResponseData responseData = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(Arrays.asList(t0, t1));
env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node0);
// listoffsets response from broker 1
ListOffsetsTopicResponse t2 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp2, Errors.NONE, -1L, 456L, 654);
responseData = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(Arrays.asList(t2));
env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node1);
// metadata refresh because of LEADER_NOT_AVAILABLE
env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE));
// listoffsets response from broker 0
t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.NONE, -1L, 345L, 543);
responseData = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(Arrays.asList(t0));
env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node0);
Map<TopicPartition, OffsetSpec> partitions = new HashMap<>();
partitions.put(tp0, OffsetSpec.latest());
partitions.put(tp1, OffsetSpec.latest());
partitions.put(tp2, OffsetSpec.latest());
ListOffsetsResult result = env.adminClient().listOffsets(partitions);
Map<TopicPartition, ListOffsetsResultInfo> offsets = result.all().get();
assertFalse(offsets.isEmpty());
assertEquals(345L, offsets.get(tp0).offset());
assertEquals(543, offsets.get(tp0).leaderEpoch().get().intValue());
assertEquals(-1L, offsets.get(tp0).timestamp());
assertEquals(987L, offsets.get(tp1).offset());
assertEquals(789, offsets.get(tp1).leaderEpoch().get().intValue());
assertEquals(-1L, offsets.get(tp1).timestamp());
assertEquals(456L, offsets.get(tp2).offset());
assertEquals(654, offsets.get(tp2).leaderEpoch().get().intValue());
assertEquals(-1L, offsets.get(tp2).timestamp());
}
}
use of org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo in project kafka by apache.
the class KafkaAdminClientTest method testListOffsets.
@Test
public void testListOffsets() throws Exception {
// Happy path
Node node0 = new Node(0, "localhost", 8120);
List<PartitionInfo> pInfos = new ArrayList<>();
pInfos.add(new PartitionInfo("foo", 0, node0, new Node[] { node0 }, new Node[] { node0 }));
pInfos.add(new PartitionInfo("bar", 0, node0, new Node[] { node0 }, new Node[] { node0 }));
pInfos.add(new PartitionInfo("baz", 0, node0, new Node[] { node0 }, new Node[] { node0 }));
pInfos.add(new PartitionInfo("qux", 0, node0, new Node[] { node0 }, new Node[] { node0 }));
final Cluster cluster = new Cluster("mockClusterId", Arrays.asList(node0), pInfos, Collections.<String>emptySet(), Collections.<String>emptySet(), node0);
final TopicPartition tp0 = new TopicPartition("foo", 0);
final TopicPartition tp1 = new TopicPartition("bar", 0);
final TopicPartition tp2 = new TopicPartition("baz", 0);
final TopicPartition tp3 = new TopicPartition("qux", 0);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(cluster)) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE));
ListOffsetsTopicResponse t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.NONE, -1L, 123L, 321);
ListOffsetsTopicResponse t1 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp1, Errors.NONE, -1L, 234L, 432);
ListOffsetsTopicResponse t2 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp2, Errors.NONE, 123456789L, 345L, 543);
ListOffsetsTopicResponse t3 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp3, Errors.NONE, 234567890L, 456L, 654);
ListOffsetsResponseData responseData = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(Arrays.asList(t0, t1, t2, t3));
env.kafkaClient().prepareResponse(new ListOffsetsResponse(responseData));
Map<TopicPartition, OffsetSpec> partitions = new HashMap<>();
partitions.put(tp0, OffsetSpec.latest());
partitions.put(tp1, OffsetSpec.earliest());
partitions.put(tp2, OffsetSpec.forTimestamp(System.currentTimeMillis()));
partitions.put(tp3, OffsetSpec.maxTimestamp());
ListOffsetsResult result = env.adminClient().listOffsets(partitions);
Map<TopicPartition, ListOffsetsResultInfo> offsets = result.all().get();
assertFalse(offsets.isEmpty());
assertEquals(123L, offsets.get(tp0).offset());
assertEquals(321, offsets.get(tp0).leaderEpoch().get().intValue());
assertEquals(-1L, offsets.get(tp0).timestamp());
assertEquals(234L, offsets.get(tp1).offset());
assertEquals(432, offsets.get(tp1).leaderEpoch().get().intValue());
assertEquals(-1L, offsets.get(tp1).timestamp());
assertEquals(345L, offsets.get(tp2).offset());
assertEquals(543, offsets.get(tp2).leaderEpoch().get().intValue());
assertEquals(123456789L, offsets.get(tp2).timestamp());
assertEquals(456L, offsets.get(tp3).offset());
assertEquals(654, offsets.get(tp3).leaderEpoch().get().intValue());
assertEquals(234567890L, offsets.get(tp3).timestamp());
assertEquals(offsets.get(tp0), result.partitionResult(tp0).get());
assertEquals(offsets.get(tp1), result.partitionResult(tp1).get());
assertEquals(offsets.get(tp2), result.partitionResult(tp2).get());
assertEquals(offsets.get(tp3), result.partitionResult(tp3).get());
try {
result.partitionResult(new TopicPartition("unknown", 0)).get();
fail("should have thrown IllegalArgumentException");
} catch (IllegalArgumentException expected) {
}
}
}
use of org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo in project kafka by apache.
the class KafkaAdminClientTest method testListOffsetsNonMaxTimestampDowngradedImmediately.
@Test
public void testListOffsetsNonMaxTimestampDowngradedImmediately() throws Exception {
Node node = new Node(0, "localhost", 8120);
List<Node> nodes = Collections.singletonList(node);
List<PartitionInfo> pInfos = new ArrayList<>();
pInfos.add(new PartitionInfo("foo", 0, node, new Node[] { node }, new Node[] { node }));
final Cluster cluster = new Cluster("mockClusterId", nodes, pInfos, Collections.emptySet(), Collections.emptySet(), node);
final TopicPartition tp0 = new TopicPartition("foo", 0);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(cluster, AdminClientConfig.RETRIES_CONFIG, "2")) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create(ApiKeys.LIST_OFFSETS.id, (short) 0, (short) 6));
env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE));
ListOffsetsTopicResponse t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.NONE, -1L, 123L, 321);
ListOffsetsResponseData responseData = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(Arrays.asList(t0));
// listoffsets response from broker 0
env.kafkaClient().prepareResponse(request -> request instanceof ListOffsetsRequest, new ListOffsetsResponse(responseData));
ListOffsetsResult result = env.adminClient().listOffsets(Collections.singletonMap(tp0, OffsetSpec.latest()));
ListOffsetsResultInfo tp0Offset = result.partitionResult(tp0).get();
assertEquals(123L, tp0Offset.offset());
assertEquals(321, tp0Offset.leaderEpoch().get().intValue());
assertEquals(-1L, tp0Offset.timestamp());
}
}
use of org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo in project kafka by apache.
the class HighAvailabilityStreamsPartitionAssignorTest method createMockAdminClient.
// If you don't care about setting the end offsets for each specific topic partition, the helper method
// getTopicPartitionOffsetMap is useful for building this input map for all partitions
private void createMockAdminClient(final Map<TopicPartition, Long> changelogEndOffsets) {
adminClient = EasyMock.createMock(AdminClient.class);
final ListOffsetsResult result = EasyMock.createNiceMock(ListOffsetsResult.class);
final KafkaFutureImpl<Map<TopicPartition, ListOffsetsResultInfo>> allFuture = new KafkaFutureImpl<>();
allFuture.complete(changelogEndOffsets.entrySet().stream().collect(Collectors.toMap(Entry::getKey, t -> {
final ListOffsetsResultInfo info = EasyMock.createNiceMock(ListOffsetsResultInfo.class);
expect(info.offset()).andStubReturn(t.getValue());
EasyMock.replay(info);
return info;
})));
expect(adminClient.listOffsets(anyObject())).andStubReturn(result);
expect(result.all()).andReturn(allFuture);
EasyMock.replay(result);
}
use of org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo in project kafka by apache.
the class KafkaStreams method allLocalStorePartitionLags.
protected Map<String, Map<Integer, LagInfo>> allLocalStorePartitionLags(final List<Task> tasksToCollectLagFor) {
final Map<String, Map<Integer, LagInfo>> localStorePartitionLags = new TreeMap<>();
final Collection<TopicPartition> allPartitions = new LinkedList<>();
final Map<TopicPartition, Long> allChangelogPositions = new HashMap<>();
// Obtain the current positions, of all the active-restoring and standby tasks
for (final Task task : tasksToCollectLagFor) {
allPartitions.addAll(task.changelogPartitions());
// Note that not all changelog partitions, will have positions; since some may not have started
allChangelogPositions.putAll(task.changelogOffsets());
}
log.debug("Current changelog positions: {}", allChangelogPositions);
final Map<TopicPartition, ListOffsetsResultInfo> allEndOffsets;
allEndOffsets = fetchEndOffsets(allPartitions, adminClient);
log.debug("Current end offsets :{}", allEndOffsets);
for (final Map.Entry<TopicPartition, ListOffsetsResultInfo> entry : allEndOffsets.entrySet()) {
// Avoiding an extra admin API lookup by computing lags for not-yet-started restorations
// from zero instead of the real "earliest offset" for the changelog.
// This will yield the correct relative order of lagginess for the tasks in the cluster,
// but it is an over-estimate of how much work remains to restore the task from scratch.
final long earliestOffset = 0L;
final long changelogPosition = allChangelogPositions.getOrDefault(entry.getKey(), earliestOffset);
final long latestOffset = entry.getValue().offset();
final LagInfo lagInfo = new LagInfo(changelogPosition == Task.LATEST_OFFSET ? latestOffset : changelogPosition, latestOffset);
final String storeName = streamsMetadataState.getStoreForChangelogTopic(entry.getKey().topic());
localStorePartitionLags.computeIfAbsent(storeName, ignored -> new TreeMap<>()).put(entry.getKey().partition(), lagInfo);
}
return Collections.unmodifiableMap(localStorePartitionLags);
}
Aggregations