Search in sources :

Example 16 with ListOffsetsTopicResponse

use of org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse in project kafka by apache.

the class KafkaAdminClientTest method testListOffsetsWithLeaderChange.

@Test
public void testListOffsetsWithLeaderChange() throws Exception {
    Node node0 = new Node(0, "localhost", 8120);
    Node node1 = new Node(1, "localhost", 8121);
    Node node2 = new Node(2, "localhost", 8122);
    List<Node> nodes = Arrays.asList(node0, node1, node2);
    final PartitionInfo oldPartitionInfo = new PartitionInfo("foo", 0, node0, new Node[] { node0, node1, node2 }, new Node[] { node0, node1, node2 });
    final Cluster oldCluster = new Cluster("mockClusterId", nodes, singletonList(oldPartitionInfo), Collections.emptySet(), Collections.emptySet(), node0);
    final TopicPartition tp0 = new TopicPartition("foo", 0);
    try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(oldCluster)) {
        env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
        env.kafkaClient().prepareResponse(prepareMetadataResponse(oldCluster, Errors.NONE));
        ListOffsetsTopicResponse t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.NOT_LEADER_OR_FOLLOWER, -1L, 345L, 543);
        ListOffsetsResponseData responseData = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(Arrays.asList(t0));
        env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node0);
        // updating leader from node0 to node1 and metadata refresh because of NOT_LEADER_OR_FOLLOWER
        final PartitionInfo newPartitionInfo = new PartitionInfo("foo", 0, node1, new Node[] { node0, node1, node2 }, new Node[] { node0, node1, node2 });
        final Cluster newCluster = new Cluster("mockClusterId", nodes, singletonList(newPartitionInfo), Collections.emptySet(), Collections.emptySet(), node0);
        env.kafkaClient().prepareResponse(prepareMetadataResponse(newCluster, Errors.NONE));
        t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.NONE, -2L, 123L, 456);
        responseData = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(Arrays.asList(t0));
        env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node1);
        Map<TopicPartition, OffsetSpec> partitions = new HashMap<>();
        partitions.put(tp0, OffsetSpec.latest());
        ListOffsetsResult result = env.adminClient().listOffsets(partitions);
        Map<TopicPartition, ListOffsetsResultInfo> offsets = result.all().get();
        assertFalse(offsets.isEmpty());
        assertEquals(123L, offsets.get(tp0).offset());
        assertEquals(456, offsets.get(tp0).leaderEpoch().get().intValue());
        assertEquals(-2L, offsets.get(tp0).timestamp());
    }
}
Also used : ListOffsetsResultInfo(org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo) HashMap(java.util.HashMap) ListOffsetsTopicResponse(org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse) Node(org.apache.kafka.common.Node) Cluster(org.apache.kafka.common.Cluster) ListOffsetsResponseData(org.apache.kafka.common.message.ListOffsetsResponseData) TopicPartition(org.apache.kafka.common.TopicPartition) ListOffsetsResponse(org.apache.kafka.common.requests.ListOffsetsResponse) PartitionInfo(org.apache.kafka.common.PartitionInfo) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Test(org.junit.jupiter.api.Test)

Example 17 with ListOffsetsTopicResponse

use of org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse in project kafka by apache.

the class KafkaAdminClientTest method testListOffsetsNonRetriableErrors.

@Test
public void testListOffsetsNonRetriableErrors() throws Exception {
    Node node0 = new Node(0, "localhost", 8120);
    Node node1 = new Node(1, "localhost", 8121);
    List<Node> nodes = Arrays.asList(node0, node1);
    List<PartitionInfo> pInfos = new ArrayList<>();
    pInfos.add(new PartitionInfo("foo", 0, node0, new Node[] { node0, node1 }, new Node[] { node0, node1 }));
    final Cluster cluster = new Cluster("mockClusterId", nodes, pInfos, Collections.<String>emptySet(), Collections.<String>emptySet(), node0);
    final TopicPartition tp0 = new TopicPartition("foo", 0);
    try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(cluster)) {
        env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
        env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE));
        ListOffsetsTopicResponse t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.TOPIC_AUTHORIZATION_FAILED, -1L, -1L, -1);
        ListOffsetsResponseData responseData = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(Arrays.asList(t0));
        env.kafkaClient().prepareResponse(new ListOffsetsResponse(responseData));
        Map<TopicPartition, OffsetSpec> partitions = new HashMap<>();
        partitions.put(tp0, OffsetSpec.latest());
        ListOffsetsResult result = env.adminClient().listOffsets(partitions);
        TestUtils.assertFutureError(result.all(), TopicAuthorizationException.class);
    }
}
Also used : HashMap(java.util.HashMap) ListOffsetsTopicResponse(org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse) Node(org.apache.kafka.common.Node) ArrayList(java.util.ArrayList) Cluster(org.apache.kafka.common.Cluster) ListOffsetsResponseData(org.apache.kafka.common.message.ListOffsetsResponseData) TopicPartition(org.apache.kafka.common.TopicPartition) ListOffsetsResponse(org.apache.kafka.common.requests.ListOffsetsResponse) PartitionInfo(org.apache.kafka.common.PartitionInfo) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Test(org.junit.jupiter.api.Test)

Example 18 with ListOffsetsTopicResponse

use of org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse in project kafka by apache.

the class KafkaAdminClientTest method testListOffsetsRetriableErrors.

@Test
public void testListOffsetsRetriableErrors() throws Exception {
    Node node0 = new Node(0, "localhost", 8120);
    Node node1 = new Node(1, "localhost", 8121);
    List<Node> nodes = Arrays.asList(node0, node1);
    List<PartitionInfo> pInfos = new ArrayList<>();
    pInfos.add(new PartitionInfo("foo", 0, node0, new Node[] { node0, node1 }, new Node[] { node0, node1 }));
    pInfos.add(new PartitionInfo("foo", 1, node0, new Node[] { node0, node1 }, new Node[] { node0, node1 }));
    pInfos.add(new PartitionInfo("bar", 0, node1, new Node[] { node1, node0 }, new Node[] { node1, node0 }));
    final Cluster cluster = new Cluster("mockClusterId", nodes, pInfos, Collections.<String>emptySet(), Collections.<String>emptySet(), node0);
    final TopicPartition tp0 = new TopicPartition("foo", 0);
    final TopicPartition tp1 = new TopicPartition("foo", 1);
    final TopicPartition tp2 = new TopicPartition("bar", 0);
    try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(cluster)) {
        env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
        env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE));
        // listoffsets response from broker 0
        ListOffsetsTopicResponse t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.LEADER_NOT_AVAILABLE, -1L, 123L, 321);
        ListOffsetsTopicResponse t1 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp1, Errors.NONE, -1L, 987L, 789);
        ListOffsetsResponseData responseData = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(Arrays.asList(t0, t1));
        env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node0);
        // listoffsets response from broker 1
        ListOffsetsTopicResponse t2 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp2, Errors.NONE, -1L, 456L, 654);
        responseData = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(Arrays.asList(t2));
        env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node1);
        // metadata refresh because of LEADER_NOT_AVAILABLE
        env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE));
        // listoffsets response from broker 0
        t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.NONE, -1L, 345L, 543);
        responseData = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(Arrays.asList(t0));
        env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node0);
        Map<TopicPartition, OffsetSpec> partitions = new HashMap<>();
        partitions.put(tp0, OffsetSpec.latest());
        partitions.put(tp1, OffsetSpec.latest());
        partitions.put(tp2, OffsetSpec.latest());
        ListOffsetsResult result = env.adminClient().listOffsets(partitions);
        Map<TopicPartition, ListOffsetsResultInfo> offsets = result.all().get();
        assertFalse(offsets.isEmpty());
        assertEquals(345L, offsets.get(tp0).offset());
        assertEquals(543, offsets.get(tp0).leaderEpoch().get().intValue());
        assertEquals(-1L, offsets.get(tp0).timestamp());
        assertEquals(987L, offsets.get(tp1).offset());
        assertEquals(789, offsets.get(tp1).leaderEpoch().get().intValue());
        assertEquals(-1L, offsets.get(tp1).timestamp());
        assertEquals(456L, offsets.get(tp2).offset());
        assertEquals(654, offsets.get(tp2).leaderEpoch().get().intValue());
        assertEquals(-1L, offsets.get(tp2).timestamp());
    }
}
Also used : ListOffsetsResultInfo(org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo) HashMap(java.util.HashMap) ListOffsetsTopicResponse(org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse) Node(org.apache.kafka.common.Node) ArrayList(java.util.ArrayList) Cluster(org.apache.kafka.common.Cluster) ListOffsetsResponseData(org.apache.kafka.common.message.ListOffsetsResponseData) TopicPartition(org.apache.kafka.common.TopicPartition) ListOffsetsResponse(org.apache.kafka.common.requests.ListOffsetsResponse) PartitionInfo(org.apache.kafka.common.PartitionInfo) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Test(org.junit.jupiter.api.Test)

Example 19 with ListOffsetsTopicResponse

use of org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse in project kafka by apache.

the class KafkaAdminClientTest method testListOffsets.

@Test
public void testListOffsets() throws Exception {
    // Happy path
    Node node0 = new Node(0, "localhost", 8120);
    List<PartitionInfo> pInfos = new ArrayList<>();
    pInfos.add(new PartitionInfo("foo", 0, node0, new Node[] { node0 }, new Node[] { node0 }));
    pInfos.add(new PartitionInfo("bar", 0, node0, new Node[] { node0 }, new Node[] { node0 }));
    pInfos.add(new PartitionInfo("baz", 0, node0, new Node[] { node0 }, new Node[] { node0 }));
    pInfos.add(new PartitionInfo("qux", 0, node0, new Node[] { node0 }, new Node[] { node0 }));
    final Cluster cluster = new Cluster("mockClusterId", Arrays.asList(node0), pInfos, Collections.<String>emptySet(), Collections.<String>emptySet(), node0);
    final TopicPartition tp0 = new TopicPartition("foo", 0);
    final TopicPartition tp1 = new TopicPartition("bar", 0);
    final TopicPartition tp2 = new TopicPartition("baz", 0);
    final TopicPartition tp3 = new TopicPartition("qux", 0);
    try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(cluster)) {
        env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
        env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE));
        ListOffsetsTopicResponse t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.NONE, -1L, 123L, 321);
        ListOffsetsTopicResponse t1 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp1, Errors.NONE, -1L, 234L, 432);
        ListOffsetsTopicResponse t2 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp2, Errors.NONE, 123456789L, 345L, 543);
        ListOffsetsTopicResponse t3 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp3, Errors.NONE, 234567890L, 456L, 654);
        ListOffsetsResponseData responseData = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(Arrays.asList(t0, t1, t2, t3));
        env.kafkaClient().prepareResponse(new ListOffsetsResponse(responseData));
        Map<TopicPartition, OffsetSpec> partitions = new HashMap<>();
        partitions.put(tp0, OffsetSpec.latest());
        partitions.put(tp1, OffsetSpec.earliest());
        partitions.put(tp2, OffsetSpec.forTimestamp(System.currentTimeMillis()));
        partitions.put(tp3, OffsetSpec.maxTimestamp());
        ListOffsetsResult result = env.adminClient().listOffsets(partitions);
        Map<TopicPartition, ListOffsetsResultInfo> offsets = result.all().get();
        assertFalse(offsets.isEmpty());
        assertEquals(123L, offsets.get(tp0).offset());
        assertEquals(321, offsets.get(tp0).leaderEpoch().get().intValue());
        assertEquals(-1L, offsets.get(tp0).timestamp());
        assertEquals(234L, offsets.get(tp1).offset());
        assertEquals(432, offsets.get(tp1).leaderEpoch().get().intValue());
        assertEquals(-1L, offsets.get(tp1).timestamp());
        assertEquals(345L, offsets.get(tp2).offset());
        assertEquals(543, offsets.get(tp2).leaderEpoch().get().intValue());
        assertEquals(123456789L, offsets.get(tp2).timestamp());
        assertEquals(456L, offsets.get(tp3).offset());
        assertEquals(654, offsets.get(tp3).leaderEpoch().get().intValue());
        assertEquals(234567890L, offsets.get(tp3).timestamp());
        assertEquals(offsets.get(tp0), result.partitionResult(tp0).get());
        assertEquals(offsets.get(tp1), result.partitionResult(tp1).get());
        assertEquals(offsets.get(tp2), result.partitionResult(tp2).get());
        assertEquals(offsets.get(tp3), result.partitionResult(tp3).get());
        try {
            result.partitionResult(new TopicPartition("unknown", 0)).get();
            fail("should have thrown IllegalArgumentException");
        } catch (IllegalArgumentException expected) {
        }
    }
}
Also used : ListOffsetsResultInfo(org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo) HashMap(java.util.HashMap) ListOffsetsTopicResponse(org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse) Node(org.apache.kafka.common.Node) ArrayList(java.util.ArrayList) Cluster(org.apache.kafka.common.Cluster) ListOffsetsResponseData(org.apache.kafka.common.message.ListOffsetsResponseData) TopicPartition(org.apache.kafka.common.TopicPartition) ListOffsetsResponse(org.apache.kafka.common.requests.ListOffsetsResponse) PartitionInfo(org.apache.kafka.common.PartitionInfo) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Test(org.junit.jupiter.api.Test)

Example 20 with ListOffsetsTopicResponse

use of org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse in project kafka by apache.

the class KafkaAdminClientTest method testListOffsetsNonMaxTimestampDowngradedImmediately.

@Test
public void testListOffsetsNonMaxTimestampDowngradedImmediately() throws Exception {
    Node node = new Node(0, "localhost", 8120);
    List<Node> nodes = Collections.singletonList(node);
    List<PartitionInfo> pInfos = new ArrayList<>();
    pInfos.add(new PartitionInfo("foo", 0, node, new Node[] { node }, new Node[] { node }));
    final Cluster cluster = new Cluster("mockClusterId", nodes, pInfos, Collections.emptySet(), Collections.emptySet(), node);
    final TopicPartition tp0 = new TopicPartition("foo", 0);
    try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(cluster, AdminClientConfig.RETRIES_CONFIG, "2")) {
        env.kafkaClient().setNodeApiVersions(NodeApiVersions.create(ApiKeys.LIST_OFFSETS.id, (short) 0, (short) 6));
        env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE));
        ListOffsetsTopicResponse t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.NONE, -1L, 123L, 321);
        ListOffsetsResponseData responseData = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(Arrays.asList(t0));
        // listoffsets response from broker 0
        env.kafkaClient().prepareResponse(request -> request instanceof ListOffsetsRequest, new ListOffsetsResponse(responseData));
        ListOffsetsResult result = env.adminClient().listOffsets(Collections.singletonMap(tp0, OffsetSpec.latest()));
        ListOffsetsResultInfo tp0Offset = result.partitionResult(tp0).get();
        assertEquals(123L, tp0Offset.offset());
        assertEquals(321, tp0Offset.leaderEpoch().get().intValue());
        assertEquals(-1L, tp0Offset.timestamp());
    }
}
Also used : ListOffsetsResultInfo(org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo) ListOffsetsTopicResponse(org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse) Node(org.apache.kafka.common.Node) ArrayList(java.util.ArrayList) Cluster(org.apache.kafka.common.Cluster) ListOffsetsResponseData(org.apache.kafka.common.message.ListOffsetsResponseData) ListOffsetsRequest(org.apache.kafka.common.requests.ListOffsetsRequest) TopicPartition(org.apache.kafka.common.TopicPartition) ListOffsetsResponse(org.apache.kafka.common.requests.ListOffsetsResponse) PartitionInfo(org.apache.kafka.common.PartitionInfo) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Test(org.junit.jupiter.api.Test)

Aggregations

ListOffsetsTopicResponse (org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse)23 ListOffsetsResponseData (org.apache.kafka.common.message.ListOffsetsResponseData)20 TopicPartition (org.apache.kafka.common.TopicPartition)18 ListOffsetsResponse (org.apache.kafka.common.requests.ListOffsetsResponse)18 Test (org.junit.jupiter.api.Test)16 HashMap (java.util.HashMap)15 ListOffsetsPartitionResponse (org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsPartitionResponse)13 Node (org.apache.kafka.common.Node)12 ArrayList (java.util.ArrayList)11 Cluster (org.apache.kafka.common.Cluster)10 PartitionInfo (org.apache.kafka.common.PartitionInfo)10 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)9 ListOffsetsResultInfo (org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo)8 LinkedHashMap (java.util.LinkedHashMap)6 ListOffsetsPartition (org.apache.kafka.common.message.ListOffsetsRequestData.ListOffsetsPartition)5 ListOffsetsTopic (org.apache.kafka.common.message.ListOffsetsRequestData.ListOffsetsTopic)5 Errors (org.apache.kafka.common.protocol.Errors)5 MetadataResponse (org.apache.kafka.common.requests.MetadataResponse)5 Map (java.util.Map)4 HashSet (java.util.HashSet)3