use of io.confluent.kafkarest.response.StreamingResponse.ResultOrError in project kafka-rest by confluentinc.
the class ProduceActionTest method produceNoLimit.
@Test
public void produceNoLimit() throws Exception {
// config
final int TOTAL_NUMBER_OF_PRODUCE_CALLS = 2;
Properties properties = new Properties();
properties.put(PRODUCE_MAX_REQUESTS_PER_SECOND, "100");
properties.put(PRODUCE_MAX_BYTES_PER_SECOND, // first record is 25 bytes long
Integer.toString(30));
properties.put(PRODUCE_RATE_LIMIT_CACHE_EXPIRY_MS, "3600000");
properties.put(PRODUCE_RATE_LIMIT_ENABLED, "falsse");
// setup
ChunkedOutputFactory chunkedOutputFactory = mock(ChunkedOutputFactory.class);
ChunkedOutput<ResultOrError> mockedChunkedOutput = getChunkedOutput(chunkedOutputFactory, TOTAL_NUMBER_OF_PRODUCE_CALLS);
Provider<RequestRateLimiter> countLimitProvider = mock(Provider.class);
Provider<RequestRateLimiter> bytesLimitProvider = mock(Provider.class);
RequestRateLimiter rateLimiterForCount = mock(RequestRateLimiter.class);
RequestRateLimiter rateLimiterForBytes = mock(RequestRateLimiter.class);
replay(countLimitProvider, bytesLimitProvider, rateLimiterForCount, rateLimiterForBytes);
ProduceAction produceAction = getProduceAction(properties, chunkedOutputFactory, 2, countLimitProvider, bytesLimitProvider);
MappingIterator<ProduceRequest> requests = getProduceRequestsMappingIterator(TOTAL_NUMBER_OF_PRODUCE_CALLS);
// expected results
ProduceResponse produceResponse = getProduceResponse(0);
ResultOrError resultOrErrorOK1 = ResultOrError.result(produceResponse);
expect(mockedChunkedOutput.isClosed()).andReturn(false);
// successful first produce
mockedChunkedOutput.write(resultOrErrorOK1);
mockedChunkedOutput.close();
ProduceResponse produceResponse2 = getProduceResponse(1);
ResultOrError resultOrErrorOK2 = ResultOrError.result(produceResponse2);
expect(mockedChunkedOutput.isClosed()).andReturn(false);
// successful second produce
mockedChunkedOutput.write(resultOrErrorOK2);
mockedChunkedOutput.close();
replay(mockedChunkedOutput, chunkedOutputFactory);
// run test
FakeAsyncResponse fakeAsyncResponse = new FakeAsyncResponse();
produceAction.produce(fakeAsyncResponse, "clusterId", "topicName", requests);
FakeAsyncResponse fakeAsyncResponse2 = new FakeAsyncResponse();
produceAction.produce(fakeAsyncResponse2, "clusterId", "topicName", requests);
// check results
verify(requests, mockedChunkedOutput, countLimitProvider, bytesLimitProvider, rateLimiterForCount, rateLimiterForBytes);
}
use of io.confluent.kafkarest.response.StreamingResponse.ResultOrError in project kafka-rest by confluentinc.
the class ProduceActionTest method produceWithCountLimit.
@Test
public void produceWithCountLimit() throws Exception {
// config
final int TOTAL_NUMBER_OF_PRODUCE_CALLS = 2;
Properties properties = new Properties();
properties.put(PRODUCE_MAX_REQUESTS_PER_SECOND, "100");
properties.put(PRODUCE_MAX_BYTES_PER_SECOND, // first record is 25 bytes long
Integer.toString(30));
properties.put(PRODUCE_RATE_LIMIT_CACHE_EXPIRY_MS, "3600000");
properties.put(PRODUCE_RATE_LIMIT_ENABLED, "true");
// setup
ChunkedOutputFactory chunkedOutputFactory = mock(ChunkedOutputFactory.class);
ChunkedOutput<ResultOrError> mockedChunkedOutput = getChunkedOutput(chunkedOutputFactory, TOTAL_NUMBER_OF_PRODUCE_CALLS);
Provider<RequestRateLimiter> countLimitProvider = mock(Provider.class);
Provider<RequestRateLimiter> bytesLimitProvider = mock(Provider.class);
RequestRateLimiter rateLimiterForCount = mock(RequestRateLimiter.class);
RequestRateLimiter rateLimiterForBytes = mock(RequestRateLimiter.class);
expect(countLimitProvider.get()).andReturn(rateLimiterForCount);
expect(bytesLimitProvider.get()).andReturn(rateLimiterForBytes);
rateLimiterForCount.rateLimit(anyInt());
rateLimiterForBytes.rateLimit(anyInt());
rateLimiterForCount.rateLimit(anyInt());
EasyMock.expectLastCall().andThrow(new RateLimitExceededException());
replay(countLimitProvider, bytesLimitProvider, rateLimiterForCount, rateLimiterForBytes);
ProduceAction produceAction = getProduceAction(properties, chunkedOutputFactory, 1, countLimitProvider, bytesLimitProvider);
MappingIterator<ProduceRequest> requests = getProduceRequestsMappingIterator(TOTAL_NUMBER_OF_PRODUCE_CALLS);
// expected results
ProduceResponse produceResponse = getProduceResponse(0);
ResultOrError resultOrErrorOK = ResultOrError.result(produceResponse);
expect(mockedChunkedOutput.isClosed()).andReturn(false);
// successful first produce
mockedChunkedOutput.write(resultOrErrorOK);
mockedChunkedOutput.close();
ErrorResponse err = ErrorResponse.create(429, "Request rate limit exceeded: The rate limit of requests per second has been exceeded.");
ResultOrError resultOrErrorFail = ResultOrError.error(err);
expect(mockedChunkedOutput.isClosed()).andReturn(false);
// failing second produce
mockedChunkedOutput.write(resultOrErrorFail);
// error close
mockedChunkedOutput.close();
replay(mockedChunkedOutput, chunkedOutputFactory);
// run test
FakeAsyncResponse fakeAsyncResponse = new FakeAsyncResponse();
produceAction.produce(fakeAsyncResponse, "clusterId", "topicName", requests);
FakeAsyncResponse fakeAsyncResponse2 = new FakeAsyncResponse();
produceAction.produce(fakeAsyncResponse2, "clusterId", "topicName", requests);
// check results
verify(requests, mockedChunkedOutput, countLimitProvider, bytesLimitProvider, rateLimiterForCount, rateLimiterForBytes);
}
use of io.confluent.kafkarest.response.StreamingResponse.ResultOrError in project kafka-rest by confluentinc.
the class StreamingResponseTest method testHasNextRuntimeException.
@Test
public void testHasNextRuntimeException() throws IOException {
MappingIterator<ProduceRequest> requests = mock(MappingIterator.class);
expect(requests.hasNext()).andThrow(new RuntimeException("IO error thrown by mapping iterator describing problem."));
requests.close();
replay(requests);
ChunkedOutputFactory mockedChunkedOutputFactory = mock(ChunkedOutputFactory.class);
ChunkedOutput<ResultOrError> mockedChunkedOutput = mock(ChunkedOutput.class);
ResultOrError resultOrError = ResultOrError.error(ErrorResponse.create(400, "Bad Request: Error processing message: IO error thrown by mapping iterator describing problem."));
expect(mockedChunkedOutputFactory.getChunkedOutput()).andReturn(mockedChunkedOutput);
mockedChunkedOutput.write(resultOrError);
expect(mockedChunkedOutput.isClosed()).andReturn(false);
mockedChunkedOutput.close();
replay(mockedChunkedOutputFactory);
replay(mockedChunkedOutput);
StreamingResponseFactory streamingResponseFactory = new StreamingResponseFactory(mockedChunkedOutputFactory);
StreamingResponse<ProduceRequest> streamingResponse = streamingResponseFactory.from(requests);
FakeAsyncResponse response = new FakeAsyncResponse();
streamingResponse.compose(result -> new CompletableFuture<>()).resume(response);
EasyMock.verify(mockedChunkedOutput);
EasyMock.verify(mockedChunkedOutputFactory);
EasyMock.verify(requests);
}
use of io.confluent.kafkarest.response.StreamingResponse.ResultOrError in project kafka-rest by confluentinc.
the class StreamingResponseTest method testWriteToChunkedOutput.
@Test
public void testWriteToChunkedOutput() throws IOException {
String key = "foo";
String value = "bar";
ProduceRequest request = ProduceRequest.builder().setKey(ProduceRequestData.builder().setFormat(EmbeddedFormat.AVRO).setRawSchema("{\"type\": \"string\"}").setData(TextNode.valueOf(key)).build()).setValue(ProduceRequestData.builder().setFormat(EmbeddedFormat.AVRO).setRawSchema("{\"type\": \"string\"}").setData(TextNode.valueOf(value)).build()).setOriginalSize(0L).build();
MappingIterator<ProduceRequest> requestsMappingIterator = mock(MappingIterator.class);
expect(requestsMappingIterator.hasNext()).andReturn(true);
expect(requestsMappingIterator.nextValue()).andReturn(request);
expect(requestsMappingIterator.hasNext()).andReturn(false);
requestsMappingIterator.close();
replay(requestsMappingIterator);
ChunkedOutputFactory mockedChunkedOutputFactory = mock(ChunkedOutputFactory.class);
ChunkedOutput<ResultOrError> mockedChunkedOutput = mock(ChunkedOutput.class);
ProduceResponse produceResponse = ProduceResponse.builder().setClusterId("clusterId").setTopicName("topicName").setPartitionId(1).setOffset(1L).build();
ResultOrError resultOrError = ResultOrError.result(produceResponse);
expect(mockedChunkedOutputFactory.getChunkedOutput()).andReturn(mockedChunkedOutput);
mockedChunkedOutput.write(resultOrError);
expect(mockedChunkedOutput.isClosed()).andReturn(false);
mockedChunkedOutput.close();
replay(mockedChunkedOutput, mockedChunkedOutputFactory);
StreamingResponseFactory streamingResponseFactory = new StreamingResponseFactory(mockedChunkedOutputFactory);
StreamingResponse<ProduceRequest> streamingResponse = streamingResponseFactory.from(requestsMappingIterator);
CompletableFuture<ProduceResponse> produceResponseFuture = new CompletableFuture<>();
produceResponseFuture.complete(produceResponse);
FakeAsyncResponse response = new FakeAsyncResponse();
streamingResponse.compose(result -> produceResponseFuture).resume(response);
EasyMock.verify(mockedChunkedOutput);
EasyMock.verify(mockedChunkedOutputFactory);
EasyMock.verify(requestsMappingIterator);
}
use of io.confluent.kafkarest.response.StreamingResponse.ResultOrError in project kafka-rest by confluentinc.
the class StreamingResponseTest method testGracePeriodExceededExceptionThrown.
@Test
public void testGracePeriodExceededExceptionThrown() throws IOException {
String key = "foo";
String value = "bar";
ProduceRequest request = ProduceRequest.builder().setKey(ProduceRequestData.builder().setFormat(EmbeddedFormat.AVRO).setRawSchema("{\"type\": \"string\"}").setData(TextNode.valueOf(key)).build()).setValue(ProduceRequestData.builder().setFormat(EmbeddedFormat.AVRO).setRawSchema("{\"type\": \"string\"}").setData(TextNode.valueOf(value)).build()).setOriginalSize(0L).build();
MappingIterator<ProduceRequest> requests = mock(MappingIterator.class);
expect(requests.hasNext()).andReturn(true);
expect(requests.nextValue()).andReturn(request);
expect(requests.hasNext()).andReturn(false);
requests.close();
replay(requests);
ChunkedOutputFactory mockedChunkedOutputFactory = mock(ChunkedOutputFactory.class);
ChunkedOutput<ResultOrError> mockedChunkedOutput = mock(ChunkedOutput.class);
ProduceResponse produceResponse = ProduceResponse.builder().setClusterId("clusterId").setTopicName("topicName").setPartitionId(1).setOffset(1L).build();
ResultOrError resultOrError = ResultOrError.result(produceResponse);
expect(mockedChunkedOutputFactory.getChunkedOutput()).andReturn(mockedChunkedOutput);
mockedChunkedOutput.write(resultOrError);
expect(mockedChunkedOutput.isClosed()).andReturn(false);
mockedChunkedOutput.close();
replay(mockedChunkedOutputFactory);
replay(mockedChunkedOutput);
StreamingResponseFactory streamingResponseFactory = new StreamingResponseFactory(mockedChunkedOutputFactory);
StreamingResponse<ProduceRequest> streamingResponse = streamingResponseFactory.from(requests);
CompletableFuture<ProduceResponse> produceResponseFuture = new CompletableFuture<>();
produceResponseFuture.complete(produceResponse);
FakeAsyncResponse response = new FakeAsyncResponse();
streamingResponse.compose(result -> produceResponseFuture).resume(response);
EasyMock.verify(mockedChunkedOutput);
EasyMock.verify(mockedChunkedOutputFactory);
EasyMock.verify(requests);
}
Aggregations