Search in sources :

Example 1 with ListConsumerLagsResponse

use of io.confluent.kafkarest.entities.v3.ListConsumerLagsResponse in project kafka-rest by confluentinc.

the class ConsumerLagsResourceTest method listConsumerLags_returnsConsumerLags.

@Test
public void listConsumerLags_returnsConsumerLags() {
    expect(consumerLagManager.listConsumerLags(CLUSTER_ID, CONSUMER_GROUP_ID)).andReturn(completedFuture(CONSUMER_LAG_LIST));
    replay(consumerLagManager);
    FakeAsyncResponse response = new FakeAsyncResponse();
    consumerLagsResource.listConsumerLags(response, CLUSTER_ID, CONSUMER_GROUP_ID);
    ListConsumerLagsResponse expected = ListConsumerLagsResponse.create(ConsumerLagDataList.builder().setMetadata(ResourceCollection.Metadata.builder().setSelf("/v3/clusters/cluster-1/consumer-groups/consumer-group-1/lags").build()).setData(Arrays.asList(ConsumerLagData.fromConsumerLag(CONSUMER_LAG_2).setMetadata(Resource.Metadata.builder().setSelf("/v3/clusters/cluster-1/consumer-groups/consumer-group-1/" + "lags/topic-1/partitions/2").setResourceName("crn:///kafka=cluster-1/consumer-group=consumer-group-1/" + "lag=topic-1/partition=2").build()).build(), ConsumerLagData.fromConsumerLag(CONSUMER_LAG_1).setMetadata(Resource.Metadata.builder().setSelf("/v3/clusters/cluster-1/consumer-groups/consumer-group-1/" + "lags/topic-1/partitions/1").setResourceName("crn:///kafka=cluster-1/consumer-group=consumer-group-1/" + "lag=topic-1/partition=1").build()).build())).build());
    assertEquals(expected, response.getValue());
}
Also used : FakeAsyncResponse(io.confluent.kafkarest.response.FakeAsyncResponse) ListConsumerLagsResponse(io.confluent.kafkarest.entities.v3.ListConsumerLagsResponse) Test(org.junit.jupiter.api.Test)

Example 2 with ListConsumerLagsResponse

use of io.confluent.kafkarest.entities.v3.ListConsumerLagsResponse in project kafka-rest by confluentinc.

the class ConsumerLagsResource method listConsumerLags.

@GET
@Produces(MediaType.APPLICATION_JSON)
@PerformanceMetric("v3.consumer-lags.list")
@ResourceName("api.v3.consumer-lags.list")
public void listConsumerLags(@Suspended AsyncResponse asyncResponse, @PathParam("clusterId") String clusterId, @PathParam("consumerGroupId") String consumerGroupId) {
    CompletableFuture<ListConsumerLagsResponse> response = consumerLagManager.get().listConsumerLags(clusterId, consumerGroupId).thenApply(lags -> {
        if (lags.isEmpty()) {
            throw new NotFoundException("Consumer lags not found.");
        }
        return lags;
    }).thenApply(lags -> ListConsumerLagsResponse.create(ConsumerLagDataList.builder().setMetadata(ResourceCollection.Metadata.builder().setSelf(urlFactory.create("v3", "clusters", clusterId, "consumer-groups", consumerGroupId, "lags")).build()).setData(lags.stream().map(this::toConsumerLagData).sorted(Comparator.comparing(ConsumerLagData::getLag).reversed().thenComparing(ConsumerLagData::getTopicName).thenComparing(ConsumerLagData::getPartitionId)).collect(Collectors.toList())).build()));
    AsyncResponses.asyncResume(asyncResponse, response);
}
Also used : PathParam(javax.ws.rs.PathParam) ConsumerLagManager(io.confluent.kafkarest.controllers.ConsumerLagManager) Provider(javax.inject.Provider) Produces(javax.ws.rs.Produces) GET(javax.ws.rs.GET) Path(javax.ws.rs.Path) CrnFactory(io.confluent.kafkarest.response.CrnFactory) CompletableFuture(java.util.concurrent.CompletableFuture) PerformanceMetric(io.confluent.rest.annotations.PerformanceMetric) GetConsumerLagResponse(io.confluent.kafkarest.entities.v3.GetConsumerLagResponse) Inject(javax.inject.Inject) ConsumerLag(io.confluent.kafkarest.entities.ConsumerLag) ResourceCollection(io.confluent.kafkarest.entities.v3.ResourceCollection) UrlFactory(io.confluent.kafkarest.response.UrlFactory) MediaType(javax.ws.rs.core.MediaType) Resource(io.confluent.kafkarest.entities.v3.Resource) Objects.requireNonNull(java.util.Objects.requireNonNull) ConsumerLagData(io.confluent.kafkarest.entities.v3.ConsumerLagData) ListConsumerLagsResponse(io.confluent.kafkarest.entities.v3.ListConsumerLagsResponse) AsyncResponses(io.confluent.kafkarest.resources.AsyncResponses) AsyncResponse(javax.ws.rs.container.AsyncResponse) ResourceName(io.confluent.kafkarest.extension.ResourceAccesslistFeature.ResourceName) ConsumerLagDataList(io.confluent.kafkarest.entities.v3.ConsumerLagDataList) Collectors(java.util.stream.Collectors) Suspended(javax.ws.rs.container.Suspended) NotFoundException(javax.ws.rs.NotFoundException) Comparator(java.util.Comparator) NotFoundException(javax.ws.rs.NotFoundException) ConsumerLagData(io.confluent.kafkarest.entities.v3.ConsumerLagData) ListConsumerLagsResponse(io.confluent.kafkarest.entities.v3.ListConsumerLagsResponse) PerformanceMetric(io.confluent.rest.annotations.PerformanceMetric) Produces(javax.ws.rs.Produces) ResourceName(io.confluent.kafkarest.extension.ResourceAccesslistFeature.ResourceName) GET(javax.ws.rs.GET)

Example 3 with ListConsumerLagsResponse

use of io.confluent.kafkarest.entities.v3.ListConsumerLagsResponse in project kafka-rest by confluentinc.

the class ConsumerLagsResourceIntegrationTest method listConsumerLags_returnsConsumerLags.

@Test
public void listConsumerLags_returnsConsumerLags() {
    // produce to topic1 partition0 and topic2 partition1
    BinaryPartitionProduceRequest request1 = BinaryPartitionProduceRequest.create(partitionRecordsWithoutKeys);
    produce(topic1, 0, request1);
    produce(topic2, 1, request1);
    // stores expected currentOffsets and logEndOffsets for each topic partition after sending
    // 3 records to topic1 partition0 and topic2 partition1
    long[][] expectedOffsets = new long[numTopics][numPartitions];
    expectedOffsets[0][0] = 3;
    expectedOffsets[1][1] = 3;
    // all other values default to 0L
    KafkaConsumer<?, ?> consumer1 = createConsumer(group1, "client-1");
    KafkaConsumer<?, ?> consumer2 = createConsumer(group1, "client-2");
    consumer1.subscribe(Collections.singletonList(topic1));
    consumer2.subscribe(Collections.singletonList(topic2));
    consumer1.poll(Duration.ofSeconds(5));
    consumer2.poll(Duration.ofSeconds(5));
    // After polling once, only one of the consumers will be member of the group, so we poll again
    // to force the other consumer to join the group.
    consumer1.poll(Duration.ofSeconds(5));
    consumer2.poll(Duration.ofSeconds(5));
    // commit offsets from consuming from subscribed topics
    consumer1.commitSync();
    consumer2.commitSync();
    testWithRetry(() -> {
        Response response = request("/v3/clusters/" + clusterId + "/consumer-groups/" + group1 + "/lags").accept(MediaType.APPLICATION_JSON).get();
        assertEquals(Status.OK.getStatusCode(), response.getStatus());
        ConsumerLagDataList consumerLagDataList = response.readEntity(ListConsumerLagsResponse.class).getValue();
        // checks offsets and lag match what is expected for each topic partition
        for (int t = 0; t < numTopics; t++) {
            for (int p = 0; p < numPartitions; p++) {
                final int finalT = t;
                final int finalP = p;
                ConsumerLagData consumerLagData = consumerLagDataList.getData().stream().filter(lagData -> lagData.getTopicName().equals(topics[finalT])).filter(lagData -> lagData.getPartitionId() == finalP).findAny().get();
                assertEquals(expectedOffsets[t][p], (long) consumerLagData.getCurrentOffset());
                assertEquals(expectedOffsets[t][p], (long) consumerLagData.getLogEndOffset());
                assertEquals(0, (long) consumerLagData.getLag());
            }
        }
    });
    // produce again to topic2 partition1
    BinaryPartitionProduceRequest request2 = BinaryPartitionProduceRequest.create(partitionRecordsWithoutKeys);
    produce(topic2, 1, request2);
    Response response2 = request("/v3/clusters/" + clusterId + "/consumer-groups/" + group1 + "/lags").accept(MediaType.APPLICATION_JSON).get();
    ListConsumerLagsResponse expected = ListConsumerLagsResponse.create(ConsumerLagDataList.builder().setMetadata(ResourceCollection.Metadata.builder().setSelf(baseUrl + "/v3/clusters/" + clusterId + "/consumer-groups/" + group1 + "/lags").build()).setData(Arrays.asList(expectedConsumerLagData(/* topicName= */
    topic2, /* partitionId=*/
    1, /* consumerId= */
    consumer2.groupMetadata().memberId(), /* clientId= */
    "client-2", /* currentOffset= */
    3, /* logEndOffset= */
    6), expectedConsumerLagData(/* topicName= */
    topic1, /* partitionId=*/
    0, /* consumerId= */
    consumer1.groupMetadata().memberId(), /* clientId= */
    "client-1", /* currentOffset= */
    3, /* logEndOffset= */
    3), expectedConsumerLagData(/* topicName= */
    topic1, /* partitionId=*/
    1, /* consumerId= */
    consumer1.groupMetadata().memberId(), /* clientId= */
    "client-1", /* currentOffset= */
    0, /* logEndOffset= */
    0), expectedConsumerLagData(/* topicName= */
    topic2, /* partitionId=*/
    0, /* consumerId= */
    consumer2.groupMetadata().memberId(), /* clientId= */
    "client-2", /* currentOffset= */
    0, /* logEndOffset= */
    0))).build());
    assertEquals(expected, response2.readEntity(ListConsumerLagsResponse.class));
}
Also used : GetConsumerLagResponse(io.confluent.kafkarest.entities.v3.GetConsumerLagResponse) ListConsumerLagsResponse(io.confluent.kafkarest.entities.v3.ListConsumerLagsResponse) Response(javax.ws.rs.core.Response) BeforeEach(org.junit.jupiter.api.BeforeEach) Arrays(java.util.Arrays) Versions(io.confluent.kafkarest.Versions) GetConsumerLagResponse(io.confluent.kafkarest.entities.v3.GetConsumerLagResponse) ResourceCollection(io.confluent.kafkarest.entities.v3.ResourceCollection) MediaType(javax.ws.rs.core.MediaType) Resource(io.confluent.kafkarest.entities.v3.Resource) Duration(java.time.Duration) BinaryPartitionProduceRequest(io.confluent.kafkarest.entities.v2.BinaryPartitionProduceRequest) ConsumerLagData(io.confluent.kafkarest.entities.v3.ConsumerLagData) Assertions.assertEquals(org.junit.jupiter.api.Assertions.assertEquals) Status(javax.ws.rs.core.Response.Status) TestUtils.testWithRetry(io.confluent.kafkarest.TestUtils.testWithRetry) BinaryPartitionProduceRecord(io.confluent.kafkarest.entities.v2.BinaryPartitionProduceRequest.BinaryPartitionProduceRecord) ListConsumerLagsResponse(io.confluent.kafkarest.entities.v3.ListConsumerLagsResponse) Properties(java.util.Properties) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) ConsumerLagDataList(io.confluent.kafkarest.entities.v3.ConsumerLagDataList) Entity(javax.ws.rs.client.Entity) Test(org.junit.jupiter.api.Test) List(java.util.List) Response(javax.ws.rs.core.Response) BytesDeserializer(org.apache.kafka.common.serialization.BytesDeserializer) ClusterTestHarness(io.confluent.kafkarest.integration.ClusterTestHarness) Collections(java.util.Collections) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) BinaryPartitionProduceRequest(io.confluent.kafkarest.entities.v2.BinaryPartitionProduceRequest) ConsumerLagData(io.confluent.kafkarest.entities.v3.ConsumerLagData) ListConsumerLagsResponse(io.confluent.kafkarest.entities.v3.ListConsumerLagsResponse) ConsumerLagDataList(io.confluent.kafkarest.entities.v3.ConsumerLagDataList) Test(org.junit.jupiter.api.Test)

Aggregations

ListConsumerLagsResponse (io.confluent.kafkarest.entities.v3.ListConsumerLagsResponse)3 ConsumerLagData (io.confluent.kafkarest.entities.v3.ConsumerLagData)2 ConsumerLagDataList (io.confluent.kafkarest.entities.v3.ConsumerLagDataList)2 GetConsumerLagResponse (io.confluent.kafkarest.entities.v3.GetConsumerLagResponse)2 Resource (io.confluent.kafkarest.entities.v3.Resource)2 ResourceCollection (io.confluent.kafkarest.entities.v3.ResourceCollection)2 MediaType (javax.ws.rs.core.MediaType)2 Test (org.junit.jupiter.api.Test)2 TestUtils.testWithRetry (io.confluent.kafkarest.TestUtils.testWithRetry)1 Versions (io.confluent.kafkarest.Versions)1 ConsumerLagManager (io.confluent.kafkarest.controllers.ConsumerLagManager)1 ConsumerLag (io.confluent.kafkarest.entities.ConsumerLag)1 BinaryPartitionProduceRequest (io.confluent.kafkarest.entities.v2.BinaryPartitionProduceRequest)1 BinaryPartitionProduceRecord (io.confluent.kafkarest.entities.v2.BinaryPartitionProduceRequest.BinaryPartitionProduceRecord)1 ResourceName (io.confluent.kafkarest.extension.ResourceAccesslistFeature.ResourceName)1 ClusterTestHarness (io.confluent.kafkarest.integration.ClusterTestHarness)1 AsyncResponses (io.confluent.kafkarest.resources.AsyncResponses)1 CrnFactory (io.confluent.kafkarest.response.CrnFactory)1 FakeAsyncResponse (io.confluent.kafkarest.response.FakeAsyncResponse)1 UrlFactory (io.confluent.kafkarest.response.UrlFactory)1