use of io.confluent.kafkarest.response.FakeAsyncResponse in project kafka-rest by confluentinc.
the class BrokerConfigResourceTest method updateConfig_nonExistingConfigOrBrokerOrCluster_throwsNotFound.
@Test
public void updateConfig_nonExistingConfigOrBrokerOrCluster_throwsNotFound() {
expect(brokerConfigManager.updateBrokerConfig(CLUSTER_ID, BROKER_ID, CONFIG_1.getName(), "new-value")).andReturn(failedFuture(new NotFoundException()));
replay(brokerConfigManager);
FakeAsyncResponse response = new FakeAsyncResponse();
brokerConfigsResource.updateBrokerConfig(response, CLUSTER_ID, BROKER_ID, CONFIG_1.getName(), UpdateBrokerConfigRequest.create("new-value"));
assertEquals(NotFoundException.class, response.getException().getClass());
}
use of io.confluent.kafkarest.response.FakeAsyncResponse in project kafka-rest by confluentinc.
the class BrokerConfigResourceTest method listBrokerConfigs_nonExistingBrokerOrCluster_throwsNotFound.
@Test
public void listBrokerConfigs_nonExistingBrokerOrCluster_throwsNotFound() {
expect(brokerConfigManager.listBrokerConfigs(CLUSTER_ID, BROKER_ID)).andReturn(failedFuture(new NotFoundException()));
replay(brokerConfigManager);
FakeAsyncResponse response = new FakeAsyncResponse();
brokerConfigsResource.listBrokerConfigs(response, CLUSTER_ID, BROKER_ID);
assertEquals(NotFoundException.class, response.getException().getClass());
}
use of io.confluent.kafkarest.response.FakeAsyncResponse in project kafka-rest by confluentinc.
the class ProduceActionTest method produceNoLimit.
@Test
public void produceNoLimit() throws Exception {
// config
final int TOTAL_NUMBER_OF_PRODUCE_CALLS = 2;
Properties properties = new Properties();
properties.put(PRODUCE_MAX_REQUESTS_PER_SECOND, "100");
properties.put(PRODUCE_MAX_BYTES_PER_SECOND, // first record is 25 bytes long
Integer.toString(30));
properties.put(PRODUCE_RATE_LIMIT_CACHE_EXPIRY_MS, "3600000");
properties.put(PRODUCE_RATE_LIMIT_ENABLED, "falsse");
// setup
ChunkedOutputFactory chunkedOutputFactory = mock(ChunkedOutputFactory.class);
ChunkedOutput<ResultOrError> mockedChunkedOutput = getChunkedOutput(chunkedOutputFactory, TOTAL_NUMBER_OF_PRODUCE_CALLS);
Provider<RequestRateLimiter> countLimitProvider = mock(Provider.class);
Provider<RequestRateLimiter> bytesLimitProvider = mock(Provider.class);
RequestRateLimiter rateLimiterForCount = mock(RequestRateLimiter.class);
RequestRateLimiter rateLimiterForBytes = mock(RequestRateLimiter.class);
replay(countLimitProvider, bytesLimitProvider, rateLimiterForCount, rateLimiterForBytes);
ProduceAction produceAction = getProduceAction(properties, chunkedOutputFactory, 2, countLimitProvider, bytesLimitProvider);
MappingIterator<ProduceRequest> requests = getProduceRequestsMappingIterator(TOTAL_NUMBER_OF_PRODUCE_CALLS);
// expected results
ProduceResponse produceResponse = getProduceResponse(0);
ResultOrError resultOrErrorOK1 = ResultOrError.result(produceResponse);
expect(mockedChunkedOutput.isClosed()).andReturn(false);
// successful first produce
mockedChunkedOutput.write(resultOrErrorOK1);
mockedChunkedOutput.close();
ProduceResponse produceResponse2 = getProduceResponse(1);
ResultOrError resultOrErrorOK2 = ResultOrError.result(produceResponse2);
expect(mockedChunkedOutput.isClosed()).andReturn(false);
// successful second produce
mockedChunkedOutput.write(resultOrErrorOK2);
mockedChunkedOutput.close();
replay(mockedChunkedOutput, chunkedOutputFactory);
// run test
FakeAsyncResponse fakeAsyncResponse = new FakeAsyncResponse();
produceAction.produce(fakeAsyncResponse, "clusterId", "topicName", requests);
FakeAsyncResponse fakeAsyncResponse2 = new FakeAsyncResponse();
produceAction.produce(fakeAsyncResponse2, "clusterId", "topicName", requests);
// check results
verify(requests, mockedChunkedOutput, countLimitProvider, bytesLimitProvider, rateLimiterForCount, rateLimiterForBytes);
}
use of io.confluent.kafkarest.response.FakeAsyncResponse in project kafka-rest by confluentinc.
the class ProduceActionTest method produceWithCountLimit.
@Test
public void produceWithCountLimit() throws Exception {
// config
final int TOTAL_NUMBER_OF_PRODUCE_CALLS = 2;
Properties properties = new Properties();
properties.put(PRODUCE_MAX_REQUESTS_PER_SECOND, "100");
properties.put(PRODUCE_MAX_BYTES_PER_SECOND, // first record is 25 bytes long
Integer.toString(30));
properties.put(PRODUCE_RATE_LIMIT_CACHE_EXPIRY_MS, "3600000");
properties.put(PRODUCE_RATE_LIMIT_ENABLED, "true");
// setup
ChunkedOutputFactory chunkedOutputFactory = mock(ChunkedOutputFactory.class);
ChunkedOutput<ResultOrError> mockedChunkedOutput = getChunkedOutput(chunkedOutputFactory, TOTAL_NUMBER_OF_PRODUCE_CALLS);
Provider<RequestRateLimiter> countLimitProvider = mock(Provider.class);
Provider<RequestRateLimiter> bytesLimitProvider = mock(Provider.class);
RequestRateLimiter rateLimiterForCount = mock(RequestRateLimiter.class);
RequestRateLimiter rateLimiterForBytes = mock(RequestRateLimiter.class);
expect(countLimitProvider.get()).andReturn(rateLimiterForCount);
expect(bytesLimitProvider.get()).andReturn(rateLimiterForBytes);
rateLimiterForCount.rateLimit(anyInt());
rateLimiterForBytes.rateLimit(anyInt());
rateLimiterForCount.rateLimit(anyInt());
EasyMock.expectLastCall().andThrow(new RateLimitExceededException());
replay(countLimitProvider, bytesLimitProvider, rateLimiterForCount, rateLimiterForBytes);
ProduceAction produceAction = getProduceAction(properties, chunkedOutputFactory, 1, countLimitProvider, bytesLimitProvider);
MappingIterator<ProduceRequest> requests = getProduceRequestsMappingIterator(TOTAL_NUMBER_OF_PRODUCE_CALLS);
// expected results
ProduceResponse produceResponse = getProduceResponse(0);
ResultOrError resultOrErrorOK = ResultOrError.result(produceResponse);
expect(mockedChunkedOutput.isClosed()).andReturn(false);
// successful first produce
mockedChunkedOutput.write(resultOrErrorOK);
mockedChunkedOutput.close();
ErrorResponse err = ErrorResponse.create(429, "Request rate limit exceeded: The rate limit of requests per second has been exceeded.");
ResultOrError resultOrErrorFail = ResultOrError.error(err);
expect(mockedChunkedOutput.isClosed()).andReturn(false);
// failing second produce
mockedChunkedOutput.write(resultOrErrorFail);
// error close
mockedChunkedOutput.close();
replay(mockedChunkedOutput, chunkedOutputFactory);
// run test
FakeAsyncResponse fakeAsyncResponse = new FakeAsyncResponse();
produceAction.produce(fakeAsyncResponse, "clusterId", "topicName", requests);
FakeAsyncResponse fakeAsyncResponse2 = new FakeAsyncResponse();
produceAction.produce(fakeAsyncResponse2, "clusterId", "topicName", requests);
// check results
verify(requests, mockedChunkedOutput, countLimitProvider, bytesLimitProvider, rateLimiterForCount, rateLimiterForBytes);
}
use of io.confluent.kafkarest.response.FakeAsyncResponse in project kafka-rest by confluentinc.
the class SearchReassignmentsByTopicActionTest method searchReassignments_existingCluster_returnsReassignments.
@Test
public void searchReassignments_existingCluster_returnsReassignments() {
expect(reassignmentManager.searchReassignmentsByTopicName(CLUSTER_ID, TOPIC_1)).andReturn(CompletableFuture.completedFuture(asList(REASSIGNMENT_1, REASSIGNMENT_2, REASSIGNMENT_3)));
replay(reassignmentManager);
FakeAsyncResponse response = new FakeAsyncResponse();
listReassignmentsByTopicAction.listReassignmentsByTopic(response, CLUSTER_ID, TOPIC_1);
SearchReassignmentsByTopicResponse expected = SearchReassignmentsByTopicResponse.create(ReassignmentDataList.builder().setMetadata(ResourceCollection.Metadata.builder().setSelf("/v3/clusters/cluster-1/topics/topic-1/partitions/-/reassignments").build()).setData(Arrays.asList(ReassignmentData.builder().setMetadata(Resource.Metadata.builder().setSelf("/v3/clusters/cluster-1/topics/topic-1/partitions/1" + "/reassignments").setResourceName("crn:///kafka=cluster-1/topic=topic-1/partition=1" + "/reassignments").build()).setClusterId(CLUSTER_ID).setTopicName(TOPIC_1).setPartitionId(PARTITION_ID_1).setAddingReplicas(ADDING_REPLICAS_1).setRemovingReplicas(REMOVING_REPLICAS_1).setReplicas(Resource.Relationship.create("/v3/clusters/cluster-1/topics/topic-1/partitions/1/replicas")).build(), ReassignmentData.builder().setMetadata(Resource.Metadata.builder().setSelf("/v3/clusters/cluster-1/topics/topic-1/partitions/2" + "/reassignments").setResourceName("crn:///kafka=cluster-1/topic=topic-1/partition=2" + "/reassignments").build()).setClusterId(CLUSTER_ID).setTopicName(TOPIC_1).setPartitionId(PARTITION_ID_2).setAddingReplicas(ADDING_REPLICAS_2).setRemovingReplicas(REMOVING_REPLICAS_2).setReplicas(Resource.Relationship.create("/v3/clusters/cluster-1/topics/topic-1/partitions/2/replicas")).build(), ReassignmentData.builder().setMetadata(Resource.Metadata.builder().setSelf("/v3/clusters/cluster-1/topics/topic-1/partitions/3" + "/reassignments").setResourceName("crn:///kafka=cluster-1/topic=topic-1/partition=3" + "/reassignments").build()).setClusterId(CLUSTER_ID).setTopicName(TOPIC_1).setPartitionId(PARTITION_ID_3).setAddingReplicas(ADDING_REPLICAS_3).setRemovingReplicas(REMOVING_REPLICAS_3).setReplicas(Resource.Relationship.create("/v3/clusters/cluster-1/topics/topic-1/partitions/3/replicas")).build())).build());
assertEquals(expected, response.getValue());
}
Aggregations