Search in sources :

Example 6 with FakeAsyncResponse

use of io.confluent.kafkarest.response.FakeAsyncResponse in project kafka-rest by confluentinc.

the class BrokerConfigResourceTest method updateConfig_nonExistingConfigOrBrokerOrCluster_throwsNotFound.

@Test
public void updateConfig_nonExistingConfigOrBrokerOrCluster_throwsNotFound() {
    expect(brokerConfigManager.updateBrokerConfig(CLUSTER_ID, BROKER_ID, CONFIG_1.getName(), "new-value")).andReturn(failedFuture(new NotFoundException()));
    replay(brokerConfigManager);
    FakeAsyncResponse response = new FakeAsyncResponse();
    brokerConfigsResource.updateBrokerConfig(response, CLUSTER_ID, BROKER_ID, CONFIG_1.getName(), UpdateBrokerConfigRequest.create("new-value"));
    assertEquals(NotFoundException.class, response.getException().getClass());
}
Also used : FakeAsyncResponse(io.confluent.kafkarest.response.FakeAsyncResponse) NotFoundException(javax.ws.rs.NotFoundException) Test(org.junit.jupiter.api.Test)

Example 7 with FakeAsyncResponse

use of io.confluent.kafkarest.response.FakeAsyncResponse in project kafka-rest by confluentinc.

the class BrokerConfigResourceTest method listBrokerConfigs_nonExistingBrokerOrCluster_throwsNotFound.

@Test
public void listBrokerConfigs_nonExistingBrokerOrCluster_throwsNotFound() {
    expect(brokerConfigManager.listBrokerConfigs(CLUSTER_ID, BROKER_ID)).andReturn(failedFuture(new NotFoundException()));
    replay(brokerConfigManager);
    FakeAsyncResponse response = new FakeAsyncResponse();
    brokerConfigsResource.listBrokerConfigs(response, CLUSTER_ID, BROKER_ID);
    assertEquals(NotFoundException.class, response.getException().getClass());
}
Also used : FakeAsyncResponse(io.confluent.kafkarest.response.FakeAsyncResponse) NotFoundException(javax.ws.rs.NotFoundException) Test(org.junit.jupiter.api.Test)

Example 8 with FakeAsyncResponse

use of io.confluent.kafkarest.response.FakeAsyncResponse in project kafka-rest by confluentinc.

the class ProduceActionTest method produceNoLimit.

@Test
public void produceNoLimit() throws Exception {
    // config
    final int TOTAL_NUMBER_OF_PRODUCE_CALLS = 2;
    Properties properties = new Properties();
    properties.put(PRODUCE_MAX_REQUESTS_PER_SECOND, "100");
    properties.put(PRODUCE_MAX_BYTES_PER_SECOND, // first record is 25 bytes long
    Integer.toString(30));
    properties.put(PRODUCE_RATE_LIMIT_CACHE_EXPIRY_MS, "3600000");
    properties.put(PRODUCE_RATE_LIMIT_ENABLED, "falsse");
    // setup
    ChunkedOutputFactory chunkedOutputFactory = mock(ChunkedOutputFactory.class);
    ChunkedOutput<ResultOrError> mockedChunkedOutput = getChunkedOutput(chunkedOutputFactory, TOTAL_NUMBER_OF_PRODUCE_CALLS);
    Provider<RequestRateLimiter> countLimitProvider = mock(Provider.class);
    Provider<RequestRateLimiter> bytesLimitProvider = mock(Provider.class);
    RequestRateLimiter rateLimiterForCount = mock(RequestRateLimiter.class);
    RequestRateLimiter rateLimiterForBytes = mock(RequestRateLimiter.class);
    replay(countLimitProvider, bytesLimitProvider, rateLimiterForCount, rateLimiterForBytes);
    ProduceAction produceAction = getProduceAction(properties, chunkedOutputFactory, 2, countLimitProvider, bytesLimitProvider);
    MappingIterator<ProduceRequest> requests = getProduceRequestsMappingIterator(TOTAL_NUMBER_OF_PRODUCE_CALLS);
    // expected results
    ProduceResponse produceResponse = getProduceResponse(0);
    ResultOrError resultOrErrorOK1 = ResultOrError.result(produceResponse);
    expect(mockedChunkedOutput.isClosed()).andReturn(false);
    // successful first produce
    mockedChunkedOutput.write(resultOrErrorOK1);
    mockedChunkedOutput.close();
    ProduceResponse produceResponse2 = getProduceResponse(1);
    ResultOrError resultOrErrorOK2 = ResultOrError.result(produceResponse2);
    expect(mockedChunkedOutput.isClosed()).andReturn(false);
    // successful second produce
    mockedChunkedOutput.write(resultOrErrorOK2);
    mockedChunkedOutput.close();
    replay(mockedChunkedOutput, chunkedOutputFactory);
    // run test
    FakeAsyncResponse fakeAsyncResponse = new FakeAsyncResponse();
    produceAction.produce(fakeAsyncResponse, "clusterId", "topicName", requests);
    FakeAsyncResponse fakeAsyncResponse2 = new FakeAsyncResponse();
    produceAction.produce(fakeAsyncResponse2, "clusterId", "topicName", requests);
    // check results
    verify(requests, mockedChunkedOutput, countLimitProvider, bytesLimitProvider, rateLimiterForCount, rateLimiterForBytes);
}
Also used : ChunkedOutputFactory(io.confluent.kafkarest.response.ChunkedOutputFactory) ResultOrError(io.confluent.kafkarest.response.StreamingResponse.ResultOrError) RequestRateLimiter(io.confluent.kafkarest.ratelimit.RequestRateLimiter) ProduceRequest(io.confluent.kafkarest.entities.v3.ProduceRequest) ProduceResponse(io.confluent.kafkarest.entities.v3.ProduceResponse) FakeAsyncResponse(io.confluent.kafkarest.response.FakeAsyncResponse) Properties(java.util.Properties) Test(org.junit.jupiter.api.Test)

Example 9 with FakeAsyncResponse

use of io.confluent.kafkarest.response.FakeAsyncResponse in project kafka-rest by confluentinc.

the class ProduceActionTest method produceWithCountLimit.

@Test
public void produceWithCountLimit() throws Exception {
    // config
    final int TOTAL_NUMBER_OF_PRODUCE_CALLS = 2;
    Properties properties = new Properties();
    properties.put(PRODUCE_MAX_REQUESTS_PER_SECOND, "100");
    properties.put(PRODUCE_MAX_BYTES_PER_SECOND, // first record is 25 bytes long
    Integer.toString(30));
    properties.put(PRODUCE_RATE_LIMIT_CACHE_EXPIRY_MS, "3600000");
    properties.put(PRODUCE_RATE_LIMIT_ENABLED, "true");
    // setup
    ChunkedOutputFactory chunkedOutputFactory = mock(ChunkedOutputFactory.class);
    ChunkedOutput<ResultOrError> mockedChunkedOutput = getChunkedOutput(chunkedOutputFactory, TOTAL_NUMBER_OF_PRODUCE_CALLS);
    Provider<RequestRateLimiter> countLimitProvider = mock(Provider.class);
    Provider<RequestRateLimiter> bytesLimitProvider = mock(Provider.class);
    RequestRateLimiter rateLimiterForCount = mock(RequestRateLimiter.class);
    RequestRateLimiter rateLimiterForBytes = mock(RequestRateLimiter.class);
    expect(countLimitProvider.get()).andReturn(rateLimiterForCount);
    expect(bytesLimitProvider.get()).andReturn(rateLimiterForBytes);
    rateLimiterForCount.rateLimit(anyInt());
    rateLimiterForBytes.rateLimit(anyInt());
    rateLimiterForCount.rateLimit(anyInt());
    EasyMock.expectLastCall().andThrow(new RateLimitExceededException());
    replay(countLimitProvider, bytesLimitProvider, rateLimiterForCount, rateLimiterForBytes);
    ProduceAction produceAction = getProduceAction(properties, chunkedOutputFactory, 1, countLimitProvider, bytesLimitProvider);
    MappingIterator<ProduceRequest> requests = getProduceRequestsMappingIterator(TOTAL_NUMBER_OF_PRODUCE_CALLS);
    // expected results
    ProduceResponse produceResponse = getProduceResponse(0);
    ResultOrError resultOrErrorOK = ResultOrError.result(produceResponse);
    expect(mockedChunkedOutput.isClosed()).andReturn(false);
    // successful first produce
    mockedChunkedOutput.write(resultOrErrorOK);
    mockedChunkedOutput.close();
    ErrorResponse err = ErrorResponse.create(429, "Request rate limit exceeded: The rate limit of requests per second has been exceeded.");
    ResultOrError resultOrErrorFail = ResultOrError.error(err);
    expect(mockedChunkedOutput.isClosed()).andReturn(false);
    // failing second produce
    mockedChunkedOutput.write(resultOrErrorFail);
    // error close
    mockedChunkedOutput.close();
    replay(mockedChunkedOutput, chunkedOutputFactory);
    // run test
    FakeAsyncResponse fakeAsyncResponse = new FakeAsyncResponse();
    produceAction.produce(fakeAsyncResponse, "clusterId", "topicName", requests);
    FakeAsyncResponse fakeAsyncResponse2 = new FakeAsyncResponse();
    produceAction.produce(fakeAsyncResponse2, "clusterId", "topicName", requests);
    // check results
    verify(requests, mockedChunkedOutput, countLimitProvider, bytesLimitProvider, rateLimiterForCount, rateLimiterForBytes);
}
Also used : ChunkedOutputFactory(io.confluent.kafkarest.response.ChunkedOutputFactory) ProduceRequest(io.confluent.kafkarest.entities.v3.ProduceRequest) ProduceResponse(io.confluent.kafkarest.entities.v3.ProduceResponse) FakeAsyncResponse(io.confluent.kafkarest.response.FakeAsyncResponse) RateLimitExceededException(io.confluent.kafkarest.ratelimit.RateLimitExceededException) Properties(java.util.Properties) ErrorResponse(io.confluent.kafkarest.exceptions.v3.ErrorResponse) ResultOrError(io.confluent.kafkarest.response.StreamingResponse.ResultOrError) RequestRateLimiter(io.confluent.kafkarest.ratelimit.RequestRateLimiter) Test(org.junit.jupiter.api.Test)

Example 10 with FakeAsyncResponse

use of io.confluent.kafkarest.response.FakeAsyncResponse in project kafka-rest by confluentinc.

the class SearchReassignmentsByTopicActionTest method searchReassignments_existingCluster_returnsReassignments.

@Test
public void searchReassignments_existingCluster_returnsReassignments() {
    expect(reassignmentManager.searchReassignmentsByTopicName(CLUSTER_ID, TOPIC_1)).andReturn(CompletableFuture.completedFuture(asList(REASSIGNMENT_1, REASSIGNMENT_2, REASSIGNMENT_3)));
    replay(reassignmentManager);
    FakeAsyncResponse response = new FakeAsyncResponse();
    listReassignmentsByTopicAction.listReassignmentsByTopic(response, CLUSTER_ID, TOPIC_1);
    SearchReassignmentsByTopicResponse expected = SearchReassignmentsByTopicResponse.create(ReassignmentDataList.builder().setMetadata(ResourceCollection.Metadata.builder().setSelf("/v3/clusters/cluster-1/topics/topic-1/partitions/-/reassignments").build()).setData(Arrays.asList(ReassignmentData.builder().setMetadata(Resource.Metadata.builder().setSelf("/v3/clusters/cluster-1/topics/topic-1/partitions/1" + "/reassignments").setResourceName("crn:///kafka=cluster-1/topic=topic-1/partition=1" + "/reassignments").build()).setClusterId(CLUSTER_ID).setTopicName(TOPIC_1).setPartitionId(PARTITION_ID_1).setAddingReplicas(ADDING_REPLICAS_1).setRemovingReplicas(REMOVING_REPLICAS_1).setReplicas(Resource.Relationship.create("/v3/clusters/cluster-1/topics/topic-1/partitions/1/replicas")).build(), ReassignmentData.builder().setMetadata(Resource.Metadata.builder().setSelf("/v3/clusters/cluster-1/topics/topic-1/partitions/2" + "/reassignments").setResourceName("crn:///kafka=cluster-1/topic=topic-1/partition=2" + "/reassignments").build()).setClusterId(CLUSTER_ID).setTopicName(TOPIC_1).setPartitionId(PARTITION_ID_2).setAddingReplicas(ADDING_REPLICAS_2).setRemovingReplicas(REMOVING_REPLICAS_2).setReplicas(Resource.Relationship.create("/v3/clusters/cluster-1/topics/topic-1/partitions/2/replicas")).build(), ReassignmentData.builder().setMetadata(Resource.Metadata.builder().setSelf("/v3/clusters/cluster-1/topics/topic-1/partitions/3" + "/reassignments").setResourceName("crn:///kafka=cluster-1/topic=topic-1/partition=3" + "/reassignments").build()).setClusterId(CLUSTER_ID).setTopicName(TOPIC_1).setPartitionId(PARTITION_ID_3).setAddingReplicas(ADDING_REPLICAS_3).setRemovingReplicas(REMOVING_REPLICAS_3).setReplicas(Resource.Relationship.create("/v3/clusters/cluster-1/topics/topic-1/partitions/3/replicas")).build())).build());
    assertEquals(expected, response.getValue());
}
Also used : FakeAsyncResponse(io.confluent.kafkarest.response.FakeAsyncResponse) SearchReassignmentsByTopicResponse(io.confluent.kafkarest.entities.v3.SearchReassignmentsByTopicResponse) Test(org.junit.jupiter.api.Test)

Aggregations

FakeAsyncResponse (io.confluent.kafkarest.response.FakeAsyncResponse)107 Test (org.junit.jupiter.api.Test)107 NotFoundException (javax.ws.rs.NotFoundException)35 ProduceRequest (io.confluent.kafkarest.entities.v3.ProduceRequest)6 RequestRateLimiter (io.confluent.kafkarest.ratelimit.RequestRateLimiter)6 ChunkedOutputFactory (io.confluent.kafkarest.response.ChunkedOutputFactory)6 Properties (java.util.Properties)6 ResultOrError (io.confluent.kafkarest.response.StreamingResponse.ResultOrError)5 CreateTopicResponse (io.confluent.kafkarest.entities.v3.CreateTopicResponse)4 ProduceResponse (io.confluent.kafkarest.entities.v3.ProduceResponse)4 ArrayList (java.util.ArrayList)4 HashMap (java.util.HashMap)4 ConfigSynonymData (io.confluent.kafkarest.entities.v3.ConfigSynonymData)3 ListBrokerConfigsResponse (io.confluent.kafkarest.entities.v3.ListBrokerConfigsResponse)3 ListTopicConfigsResponse (io.confluent.kafkarest.entities.v3.ListTopicConfigsResponse)3 ErrorResponse (io.confluent.kafkarest.exceptions.v3.ErrorResponse)3 BrokerConfig (io.confluent.kafkarest.entities.BrokerConfig)2 TopicConfig (io.confluent.kafkarest.entities.TopicConfig)2 RateLimitExceededException (io.confluent.kafkarest.ratelimit.RateLimitExceededException)2 TimeoutException (org.apache.kafka.common.errors.TimeoutException)2