Search in sources :

Example 11 with ListOffsetsResultInfo

use of org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo in project kafka by apache.

the class KafkaAdminClientTest method testListOffsetsRetriableErrors.

@Test
public void testListOffsetsRetriableErrors() throws Exception {
    Node node0 = new Node(0, "localhost", 8120);
    Node node1 = new Node(1, "localhost", 8121);
    List<Node> nodes = Arrays.asList(node0, node1);
    List<PartitionInfo> pInfos = new ArrayList<>();
    pInfos.add(new PartitionInfo("foo", 0, node0, new Node[] { node0, node1 }, new Node[] { node0, node1 }));
    pInfos.add(new PartitionInfo("foo", 1, node0, new Node[] { node0, node1 }, new Node[] { node0, node1 }));
    pInfos.add(new PartitionInfo("bar", 0, node1, new Node[] { node1, node0 }, new Node[] { node1, node0 }));
    final Cluster cluster = new Cluster("mockClusterId", nodes, pInfos, Collections.<String>emptySet(), Collections.<String>emptySet(), node0);
    final TopicPartition tp0 = new TopicPartition("foo", 0);
    final TopicPartition tp1 = new TopicPartition("foo", 1);
    final TopicPartition tp2 = new TopicPartition("bar", 0);
    try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(cluster)) {
        env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
        env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE));
        // listoffsets response from broker 0
        ListOffsetsTopicResponse t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.LEADER_NOT_AVAILABLE, -1L, 123L, 321);
        ListOffsetsTopicResponse t1 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp1, Errors.NONE, -1L, 987L, 789);
        ListOffsetsResponseData responseData = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(Arrays.asList(t0, t1));
        env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node0);
        // listoffsets response from broker 1
        ListOffsetsTopicResponse t2 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp2, Errors.NONE, -1L, 456L, 654);
        responseData = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(Arrays.asList(t2));
        env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node1);
        // metadata refresh because of LEADER_NOT_AVAILABLE
        env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE));
        // listoffsets response from broker 0
        t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.NONE, -1L, 345L, 543);
        responseData = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(Arrays.asList(t0));
        env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node0);
        Map<TopicPartition, OffsetSpec> partitions = new HashMap<>();
        partitions.put(tp0, OffsetSpec.latest());
        partitions.put(tp1, OffsetSpec.latest());
        partitions.put(tp2, OffsetSpec.latest());
        ListOffsetsResult result = env.adminClient().listOffsets(partitions);
        Map<TopicPartition, ListOffsetsResultInfo> offsets = result.all().get();
        assertFalse(offsets.isEmpty());
        assertEquals(345L, offsets.get(tp0).offset());
        assertEquals(543, offsets.get(tp0).leaderEpoch().get().intValue());
        assertEquals(-1L, offsets.get(tp0).timestamp());
        assertEquals(987L, offsets.get(tp1).offset());
        assertEquals(789, offsets.get(tp1).leaderEpoch().get().intValue());
        assertEquals(-1L, offsets.get(tp1).timestamp());
        assertEquals(456L, offsets.get(tp2).offset());
        assertEquals(654, offsets.get(tp2).leaderEpoch().get().intValue());
        assertEquals(-1L, offsets.get(tp2).timestamp());
    }
}
Also used : ListOffsetsResultInfo(org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo) HashMap(java.util.HashMap) ListOffsetsTopicResponse(org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse) Node(org.apache.kafka.common.Node) ArrayList(java.util.ArrayList) Cluster(org.apache.kafka.common.Cluster) ListOffsetsResponseData(org.apache.kafka.common.message.ListOffsetsResponseData) TopicPartition(org.apache.kafka.common.TopicPartition) ListOffsetsResponse(org.apache.kafka.common.requests.ListOffsetsResponse) PartitionInfo(org.apache.kafka.common.PartitionInfo) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Test(org.junit.jupiter.api.Test)

Example 12 with ListOffsetsResultInfo

use of org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo in project kafka by apache.

the class KafkaAdminClientTest method testListOffsets.

@Test
public void testListOffsets() throws Exception {
    // Happy path
    Node node0 = new Node(0, "localhost", 8120);
    List<PartitionInfo> pInfos = new ArrayList<>();
    pInfos.add(new PartitionInfo("foo", 0, node0, new Node[] { node0 }, new Node[] { node0 }));
    pInfos.add(new PartitionInfo("bar", 0, node0, new Node[] { node0 }, new Node[] { node0 }));
    pInfos.add(new PartitionInfo("baz", 0, node0, new Node[] { node0 }, new Node[] { node0 }));
    pInfos.add(new PartitionInfo("qux", 0, node0, new Node[] { node0 }, new Node[] { node0 }));
    final Cluster cluster = new Cluster("mockClusterId", Arrays.asList(node0), pInfos, Collections.<String>emptySet(), Collections.<String>emptySet(), node0);
    final TopicPartition tp0 = new TopicPartition("foo", 0);
    final TopicPartition tp1 = new TopicPartition("bar", 0);
    final TopicPartition tp2 = new TopicPartition("baz", 0);
    final TopicPartition tp3 = new TopicPartition("qux", 0);
    try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(cluster)) {
        env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
        env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE));
        ListOffsetsTopicResponse t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.NONE, -1L, 123L, 321);
        ListOffsetsTopicResponse t1 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp1, Errors.NONE, -1L, 234L, 432);
        ListOffsetsTopicResponse t2 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp2, Errors.NONE, 123456789L, 345L, 543);
        ListOffsetsTopicResponse t3 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp3, Errors.NONE, 234567890L, 456L, 654);
        ListOffsetsResponseData responseData = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(Arrays.asList(t0, t1, t2, t3));
        env.kafkaClient().prepareResponse(new ListOffsetsResponse(responseData));
        Map<TopicPartition, OffsetSpec> partitions = new HashMap<>();
        partitions.put(tp0, OffsetSpec.latest());
        partitions.put(tp1, OffsetSpec.earliest());
        partitions.put(tp2, OffsetSpec.forTimestamp(System.currentTimeMillis()));
        partitions.put(tp3, OffsetSpec.maxTimestamp());
        ListOffsetsResult result = env.adminClient().listOffsets(partitions);
        Map<TopicPartition, ListOffsetsResultInfo> offsets = result.all().get();
        assertFalse(offsets.isEmpty());
        assertEquals(123L, offsets.get(tp0).offset());
        assertEquals(321, offsets.get(tp0).leaderEpoch().get().intValue());
        assertEquals(-1L, offsets.get(tp0).timestamp());
        assertEquals(234L, offsets.get(tp1).offset());
        assertEquals(432, offsets.get(tp1).leaderEpoch().get().intValue());
        assertEquals(-1L, offsets.get(tp1).timestamp());
        assertEquals(345L, offsets.get(tp2).offset());
        assertEquals(543, offsets.get(tp2).leaderEpoch().get().intValue());
        assertEquals(123456789L, offsets.get(tp2).timestamp());
        assertEquals(456L, offsets.get(tp3).offset());
        assertEquals(654, offsets.get(tp3).leaderEpoch().get().intValue());
        assertEquals(234567890L, offsets.get(tp3).timestamp());
        assertEquals(offsets.get(tp0), result.partitionResult(tp0).get());
        assertEquals(offsets.get(tp1), result.partitionResult(tp1).get());
        assertEquals(offsets.get(tp2), result.partitionResult(tp2).get());
        assertEquals(offsets.get(tp3), result.partitionResult(tp3).get());
        try {
            result.partitionResult(new TopicPartition("unknown", 0)).get();
            fail("should have thrown IllegalArgumentException");
        } catch (IllegalArgumentException expected) {
        }
    }
}
Also used : ListOffsetsResultInfo(org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo) HashMap(java.util.HashMap) ListOffsetsTopicResponse(org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse) Node(org.apache.kafka.common.Node) ArrayList(java.util.ArrayList) Cluster(org.apache.kafka.common.Cluster) ListOffsetsResponseData(org.apache.kafka.common.message.ListOffsetsResponseData) TopicPartition(org.apache.kafka.common.TopicPartition) ListOffsetsResponse(org.apache.kafka.common.requests.ListOffsetsResponse) PartitionInfo(org.apache.kafka.common.PartitionInfo) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Test(org.junit.jupiter.api.Test)

Example 13 with ListOffsetsResultInfo

use of org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo in project kafka by apache.

the class KafkaAdminClientTest method testListOffsetsNonMaxTimestampDowngradedImmediately.

@Test
public void testListOffsetsNonMaxTimestampDowngradedImmediately() throws Exception {
    Node node = new Node(0, "localhost", 8120);
    List<Node> nodes = Collections.singletonList(node);
    List<PartitionInfo> pInfos = new ArrayList<>();
    pInfos.add(new PartitionInfo("foo", 0, node, new Node[] { node }, new Node[] { node }));
    final Cluster cluster = new Cluster("mockClusterId", nodes, pInfos, Collections.emptySet(), Collections.emptySet(), node);
    final TopicPartition tp0 = new TopicPartition("foo", 0);
    try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(cluster, AdminClientConfig.RETRIES_CONFIG, "2")) {
        env.kafkaClient().setNodeApiVersions(NodeApiVersions.create(ApiKeys.LIST_OFFSETS.id, (short) 0, (short) 6));
        env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE));
        ListOffsetsTopicResponse t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.NONE, -1L, 123L, 321);
        ListOffsetsResponseData responseData = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(Arrays.asList(t0));
        // listoffsets response from broker 0
        env.kafkaClient().prepareResponse(request -> request instanceof ListOffsetsRequest, new ListOffsetsResponse(responseData));
        ListOffsetsResult result = env.adminClient().listOffsets(Collections.singletonMap(tp0, OffsetSpec.latest()));
        ListOffsetsResultInfo tp0Offset = result.partitionResult(tp0).get();
        assertEquals(123L, tp0Offset.offset());
        assertEquals(321, tp0Offset.leaderEpoch().get().intValue());
        assertEquals(-1L, tp0Offset.timestamp());
    }
}
Also used : ListOffsetsResultInfo(org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo) ListOffsetsTopicResponse(org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse) Node(org.apache.kafka.common.Node) ArrayList(java.util.ArrayList) Cluster(org.apache.kafka.common.Cluster) ListOffsetsResponseData(org.apache.kafka.common.message.ListOffsetsResponseData) ListOffsetsRequest(org.apache.kafka.common.requests.ListOffsetsRequest) TopicPartition(org.apache.kafka.common.TopicPartition) ListOffsetsResponse(org.apache.kafka.common.requests.ListOffsetsResponse) PartitionInfo(org.apache.kafka.common.PartitionInfo) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Test(org.junit.jupiter.api.Test)

Example 14 with ListOffsetsResultInfo

use of org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo in project kafka by apache.

the class HighAvailabilityStreamsPartitionAssignorTest method createMockAdminClient.

// If you don't care about setting the end offsets for each specific topic partition, the helper method
// getTopicPartitionOffsetMap is useful for building this input map for all partitions
private void createMockAdminClient(final Map<TopicPartition, Long> changelogEndOffsets) {
    adminClient = EasyMock.createMock(AdminClient.class);
    final ListOffsetsResult result = EasyMock.createNiceMock(ListOffsetsResult.class);
    final KafkaFutureImpl<Map<TopicPartition, ListOffsetsResultInfo>> allFuture = new KafkaFutureImpl<>();
    allFuture.complete(changelogEndOffsets.entrySet().stream().collect(Collectors.toMap(Entry::getKey, t -> {
        final ListOffsetsResultInfo info = EasyMock.createNiceMock(ListOffsetsResultInfo.class);
        expect(info.offset()).andStubReturn(t.getValue());
        EasyMock.replay(info);
        return info;
    })));
    expect(adminClient.listOffsets(anyObject())).andStubReturn(result);
    expect(result.all()).andReturn(allFuture);
    EasyMock.replay(result);
}
Also used : ListOffsetsResult(org.apache.kafka.clients.admin.ListOffsetsResult) Entry(java.util.Map.Entry) ListOffsetsResultInfo(org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo) KafkaFutureImpl(org.apache.kafka.common.internals.KafkaFutureImpl) Map(java.util.Map) HashMap(java.util.HashMap) Collections.singletonMap(java.util.Collections.singletonMap) AdminClient(org.apache.kafka.clients.admin.AdminClient)

Example 15 with ListOffsetsResultInfo

use of org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo in project kafka by apache.

the class KafkaStreams method allLocalStorePartitionLags.

protected Map<String, Map<Integer, LagInfo>> allLocalStorePartitionLags(final List<Task> tasksToCollectLagFor) {
    final Map<String, Map<Integer, LagInfo>> localStorePartitionLags = new TreeMap<>();
    final Collection<TopicPartition> allPartitions = new LinkedList<>();
    final Map<TopicPartition, Long> allChangelogPositions = new HashMap<>();
    // Obtain the current positions, of all the active-restoring and standby tasks
    for (final Task task : tasksToCollectLagFor) {
        allPartitions.addAll(task.changelogPartitions());
        // Note that not all changelog partitions, will have positions; since some may not have started
        allChangelogPositions.putAll(task.changelogOffsets());
    }
    log.debug("Current changelog positions: {}", allChangelogPositions);
    final Map<TopicPartition, ListOffsetsResultInfo> allEndOffsets;
    allEndOffsets = fetchEndOffsets(allPartitions, adminClient);
    log.debug("Current end offsets :{}", allEndOffsets);
    for (final Map.Entry<TopicPartition, ListOffsetsResultInfo> entry : allEndOffsets.entrySet()) {
        // Avoiding an extra admin API lookup by computing lags for not-yet-started restorations
        // from zero instead of the real "earliest offset" for the changelog.
        // This will yield the correct relative order of lagginess for the tasks in the cluster,
        // but it is an over-estimate of how much work remains to restore the task from scratch.
        final long earliestOffset = 0L;
        final long changelogPosition = allChangelogPositions.getOrDefault(entry.getKey(), earliestOffset);
        final long latestOffset = entry.getValue().offset();
        final LagInfo lagInfo = new LagInfo(changelogPosition == Task.LATEST_OFFSET ? latestOffset : changelogPosition, latestOffset);
        final String storeName = streamsMetadataState.getStoreForChangelogTopic(entry.getKey().topic());
        localStorePartitionLags.computeIfAbsent(storeName, ignored -> new TreeMap<>()).put(entry.getKey().partition(), lagInfo);
    }
    return Collections.unmodifiableMap(localStorePartitionLags);
}
Also used : SHUTDOWN_CLIENT(org.apache.kafka.streams.errors.StreamsUncaughtExceptionHandler.StreamThreadExceptionResponse.SHUTDOWN_CLIENT) RecordingLevel(org.apache.kafka.common.metrics.Sensor.RecordingLevel) DefaultKafkaClientSupplier(org.apache.kafka.streams.processor.internals.DefaultKafkaClientSupplier) Arrays(java.util.Arrays) TopologyMetadata(org.apache.kafka.streams.processor.internals.TopologyMetadata) StreamsException(org.apache.kafka.streams.errors.StreamsException) StateDirectory(org.apache.kafka.streams.processor.internals.StateDirectory) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) LogContext(org.apache.kafka.common.utils.LogContext) Duration(java.time.Duration) Map(java.util.Map) Metric(org.apache.kafka.common.Metric) MetricName(org.apache.kafka.common.MetricName) StreamsMetricsImpl(org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl) CommonClientConfigs(org.apache.kafka.clients.CommonClientConfigs) TopicPartition(org.apache.kafka.common.TopicPartition) Sensor(org.apache.kafka.common.metrics.Sensor) QueryableStoreProvider(org.apache.kafka.streams.state.internals.QueryableStoreProvider) Evolving(org.apache.kafka.common.annotation.InterfaceStability.Evolving) QueryConfig(org.apache.kafka.streams.query.QueryConfig) MemberToRemove(org.apache.kafka.clients.admin.MemberToRemove) StreamsMetadataState(org.apache.kafka.streams.processor.internals.StreamsMetadataState) Time(org.apache.kafka.common.utils.Time) Collection(java.util.Collection) MetricConfig(org.apache.kafka.common.metrics.MetricConfig) FailureReason(org.apache.kafka.streams.query.FailureReason) ProcessorStateException(org.apache.kafka.streams.errors.ProcessorStateException) Set(java.util.Set) UUID(java.util.UUID) StreamsNotStartedException(org.apache.kafka.streams.errors.StreamsNotStartedException) StateRestoreListener(org.apache.kafka.streams.processor.StateRestoreListener) Collectors(java.util.stream.Collectors) Executors(java.util.concurrent.Executors) Objects(java.util.Objects) ListOffsetsResultInfo(org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo) List(java.util.List) Metrics(org.apache.kafka.common.metrics.Metrics) ClientUtils(org.apache.kafka.streams.processor.internals.ClientUtils) StateQueryResult(org.apache.kafka.streams.query.StateQueryResult) GlobalStreamThread(org.apache.kafka.streams.processor.internals.GlobalStreamThread) StreamThreadStateStoreProvider(org.apache.kafka.streams.state.internals.StreamThreadStateStoreProvider) ApiUtils.validateMillisecondDuration(org.apache.kafka.streams.internals.ApiUtils.validateMillisecondDuration) MetricsReporter(org.apache.kafka.common.metrics.MetricsReporter) Entry(java.util.Map.Entry) Optional(java.util.Optional) InvalidStateStoreException(org.apache.kafka.streams.errors.InvalidStateStoreException) ClientMetrics(org.apache.kafka.streams.internals.metrics.ClientMetrics) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) TaskId(org.apache.kafka.streams.processor.TaskId) KafkaMetricsContext(org.apache.kafka.common.metrics.KafkaMetricsContext) HostInfo(org.apache.kafka.streams.state.HostInfo) PositionBound(org.apache.kafka.streams.query.PositionBound) HashMap(java.util.HashMap) RemoveMembersFromConsumerGroupResult(org.apache.kafka.clients.admin.RemoveMembersFromConsumerGroupResult) UnknownStateStoreException(org.apache.kafka.streams.errors.UnknownStateStoreException) ArrayList(java.util.ArrayList) MetricsContext(org.apache.kafka.common.metrics.MetricsContext) HashSet(java.util.HashSet) LinkedHashMap(java.util.LinkedHashMap) StreamsStoppedException(org.apache.kafka.streams.errors.StreamsStoppedException) StateQueryRequest(org.apache.kafka.streams.query.StateQueryRequest) KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) State(org.apache.kafka.streams.processor.internals.GlobalStreamThread.State) BiConsumer(java.util.function.BiConsumer) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) Admin(org.apache.kafka.clients.admin.Admin) LinkedList(java.util.LinkedList) QueryResult(org.apache.kafka.streams.query.QueryResult) JmxReporter(org.apache.kafka.common.metrics.JmxReporter) TimeoutException(org.apache.kafka.common.errors.TimeoutException) StreamPartitioner(org.apache.kafka.streams.processor.StreamPartitioner) Logger(org.slf4j.Logger) Properties(java.util.Properties) StreamsUncaughtExceptionHandler(org.apache.kafka.streams.errors.StreamsUncaughtExceptionHandler) StreamThread(org.apache.kafka.streams.processor.internals.StreamThread) InvalidStateStorePartitionException(org.apache.kafka.streams.errors.InvalidStateStorePartitionException) ThreadStateTransitionValidator(org.apache.kafka.streams.processor.internals.ThreadStateTransitionValidator) RemoveMembersFromConsumerGroupOptions(org.apache.kafka.clients.admin.RemoveMembersFromConsumerGroupOptions) ClientUtils.fetchEndOffsets(org.apache.kafka.streams.processor.internals.ClientUtils.fetchEndOffsets) GlobalStateStoreProvider(org.apache.kafka.streams.state.internals.GlobalStateStoreProvider) Task(org.apache.kafka.streams.processor.internals.Task) AssignorError(org.apache.kafka.streams.processor.internals.assignment.AssignorError) ExecutionException(java.util.concurrent.ExecutionException) TimeUnit(java.util.concurrent.TimeUnit) Consumer(java.util.function.Consumer) ApiUtils.prepareMillisCheckFailMsgPrefix(org.apache.kafka.streams.internals.ApiUtils.prepareMillisCheckFailMsgPrefix) StateStore(org.apache.kafka.streams.processor.StateStore) TreeMap(java.util.TreeMap) Serializer(org.apache.kafka.common.serialization.Serializer) Collections(java.util.Collections) METRICS_RECORDING_LEVEL_CONFIG(org.apache.kafka.streams.StreamsConfig.METRICS_RECORDING_LEVEL_CONFIG) Task(org.apache.kafka.streams.processor.internals.Task) ListOffsetsResultInfo(org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) TreeMap(java.util.TreeMap) LinkedList(java.util.LinkedList) TopicPartition(org.apache.kafka.common.TopicPartition) Map(java.util.Map) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) TreeMap(java.util.TreeMap)

Aggregations

ListOffsetsResultInfo (org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo)16 HashMap (java.util.HashMap)14 TopicPartition (org.apache.kafka.common.TopicPartition)14 Cluster (org.apache.kafka.common.Cluster)10 Node (org.apache.kafka.common.Node)10 PartitionInfo (org.apache.kafka.common.PartitionInfo)10 ListOffsetsResponse (org.apache.kafka.common.requests.ListOffsetsResponse)9 ArrayList (java.util.ArrayList)8 Map (java.util.Map)8 ListOffsetsResponseData (org.apache.kafka.common.message.ListOffsetsResponseData)8 ListOffsetsTopicResponse (org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse)8 Test (org.junit.jupiter.api.Test)8 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)8 HashSet (java.util.HashSet)6 Collection (java.util.Collection)5 Collections (java.util.Collections)5 Set (java.util.Set)5 KafkaFutureImpl (org.apache.kafka.common.internals.KafkaFutureImpl)5 Arrays (java.util.Arrays)4 List (java.util.List)4