use of io.confluent.kafkarest.entities.v3.ProduceResponse in project kafka-rest by confluentinc.
the class ProduceActionIntegrationTest method produceBinaryWithNullData.
@Test
public void produceBinaryWithNullData() throws Exception {
String clusterId = testEnv.kafkaCluster().getClusterId();
ProduceRequest request = ProduceRequest.builder().setKey(ProduceRequestData.builder().setFormat(EmbeddedFormat.BINARY).setData(NullNode.getInstance()).build()).setValue(ProduceRequestData.builder().setFormat(EmbeddedFormat.BINARY).setData(NullNode.getInstance()).build()).setOriginalSize(0L).build();
Response response = testEnv.kafkaRest().target().path("/v3/clusters/" + clusterId + "/topics/" + TOPIC_NAME + "/records").request().accept(MediaType.APPLICATION_JSON).post(Entity.entity(request, MediaType.APPLICATION_JSON));
assertEquals(Status.OK.getStatusCode(), response.getStatus());
ProduceResponse actual = readProduceResponse(response);
ConsumerRecord<byte[], byte[]> produced = testEnv.kafkaCluster().getRecord(TOPIC_NAME, actual.getPartitionId(), actual.getOffset(), new ByteArrayDeserializer(), new ByteArrayDeserializer());
assertNull(produced.key());
assertNull(produced.value());
}
use of io.confluent.kafkarest.entities.v3.ProduceResponse in project kafka-rest by confluentinc.
the class ProduceActionIntegrationTest method produceJsonschemaWithRawSchema.
@Test
public void produceJsonschemaWithRawSchema() throws Exception {
String clusterId = testEnv.kafkaCluster().getClusterId();
TextNode key = TextNode.valueOf("foo");
TextNode value = TextNode.valueOf("bar");
ProduceRequest request = ProduceRequest.builder().setKey(ProduceRequestData.builder().setFormat(EmbeddedFormat.JSONSCHEMA).setRawSchema("{\"type\": \"string\"}").setData(key).build()).setValue(ProduceRequestData.builder().setFormat(EmbeddedFormat.JSONSCHEMA).setRawSchema("{\"type\": \"string\"}").setData(value).build()).setOriginalSize(0L).build();
Response response = testEnv.kafkaRest().target().path("/v3/clusters/" + clusterId + "/topics/" + TOPIC_NAME + "/records").request().accept(MediaType.APPLICATION_JSON).post(Entity.entity(request, MediaType.APPLICATION_JSON));
assertEquals(Status.OK.getStatusCode(), response.getStatus());
ProduceResponse actual = readProduceResponse(response);
ConsumerRecord<Object, Object> produced = testEnv.kafkaCluster().getRecord(TOPIC_NAME, actual.getPartitionId(), actual.getOffset(), testEnv.schemaRegistry().createJsonSchemaDeserializer(), testEnv.schemaRegistry().createJsonSchemaDeserializer());
assertEquals(key, produced.key());
assertEquals(value, produced.value());
}
use of io.confluent.kafkarest.entities.v3.ProduceResponse in project kafka-rest by confluentinc.
the class ProduceActionIntegrationTest method produceProtobufWithSchemaId.
@Test
public void produceProtobufWithSchemaId() throws Exception {
String clusterId = testEnv.kafkaCluster().getClusterId();
ProtobufSchema keySchema = new ProtobufSchema("syntax = \"proto3\"; message MyKey { string foo = 1; }");
SchemaKey keySchemaKey = testEnv.schemaRegistry().createSchema(DEFAULT_KEY_SUBJECT, keySchema);
ProtobufSchema valueSchema = new ProtobufSchema("syntax = \"proto3\"; message MyValue { string bar = 1; }");
SchemaKey valueSchemaKey = testEnv.schemaRegistry().createSchema(DEFAULT_VALUE_SUBJECT, valueSchema);
ObjectNode key = new ObjectNode(JsonNodeFactory.instance);
key.put("foo", "foz");
ObjectNode value = new ObjectNode(JsonNodeFactory.instance);
value.put("bar", "baz");
ProduceRequest request = ProduceRequest.builder().setKey(ProduceRequestData.builder().setSchemaId(keySchemaKey.getSchemaId()).setData(key).build()).setValue(ProduceRequestData.builder().setSchemaId(valueSchemaKey.getSchemaId()).setData(value).build()).setOriginalSize(0L).build();
Response response = testEnv.kafkaRest().target().path("/v3/clusters/" + clusterId + "/topics/" + TOPIC_NAME + "/records").request().accept(MediaType.APPLICATION_JSON).post(Entity.entity(request, MediaType.APPLICATION_JSON));
assertEquals(Status.OK.getStatusCode(), response.getStatus());
ProduceResponse actual = readProduceResponse(response);
ConsumerRecord<Message, Message> produced = testEnv.kafkaCluster().getRecord(TOPIC_NAME, actual.getPartitionId(), actual.getOffset(), testEnv.schemaRegistry().createProtobufDeserializer(), testEnv.schemaRegistry().createProtobufDeserializer());
DynamicMessage.Builder expectedKey = DynamicMessage.newBuilder(keySchema.toDescriptor());
expectedKey.setField(keySchema.toDescriptor().findFieldByName("foo"), "foz");
DynamicMessage.Builder expectedValue = DynamicMessage.newBuilder(valueSchema.toDescriptor());
expectedValue.setField(valueSchema.toDescriptor().findFieldByName("bar"), "baz");
assertEquals(expectedKey.build().toByteString(), produced.key().toByteString());
assertEquals(expectedValue.build().toByteString(), produced.value().toByteString());
}
use of io.confluent.kafkarest.entities.v3.ProduceResponse in project kafka-rest by confluentinc.
the class ProduceActionTest method produceNoLimit.
@Test
public void produceNoLimit() throws Exception {
// config
final int TOTAL_NUMBER_OF_PRODUCE_CALLS = 2;
Properties properties = new Properties();
properties.put(PRODUCE_MAX_REQUESTS_PER_SECOND, "100");
properties.put(PRODUCE_MAX_BYTES_PER_SECOND, // first record is 25 bytes long
Integer.toString(30));
properties.put(PRODUCE_RATE_LIMIT_CACHE_EXPIRY_MS, "3600000");
properties.put(PRODUCE_RATE_LIMIT_ENABLED, "falsse");
// setup
ChunkedOutputFactory chunkedOutputFactory = mock(ChunkedOutputFactory.class);
ChunkedOutput<ResultOrError> mockedChunkedOutput = getChunkedOutput(chunkedOutputFactory, TOTAL_NUMBER_OF_PRODUCE_CALLS);
Provider<RequestRateLimiter> countLimitProvider = mock(Provider.class);
Provider<RequestRateLimiter> bytesLimitProvider = mock(Provider.class);
RequestRateLimiter rateLimiterForCount = mock(RequestRateLimiter.class);
RequestRateLimiter rateLimiterForBytes = mock(RequestRateLimiter.class);
replay(countLimitProvider, bytesLimitProvider, rateLimiterForCount, rateLimiterForBytes);
ProduceAction produceAction = getProduceAction(properties, chunkedOutputFactory, 2, countLimitProvider, bytesLimitProvider);
MappingIterator<ProduceRequest> requests = getProduceRequestsMappingIterator(TOTAL_NUMBER_OF_PRODUCE_CALLS);
// expected results
ProduceResponse produceResponse = getProduceResponse(0);
ResultOrError resultOrErrorOK1 = ResultOrError.result(produceResponse);
expect(mockedChunkedOutput.isClosed()).andReturn(false);
// successful first produce
mockedChunkedOutput.write(resultOrErrorOK1);
mockedChunkedOutput.close();
ProduceResponse produceResponse2 = getProduceResponse(1);
ResultOrError resultOrErrorOK2 = ResultOrError.result(produceResponse2);
expect(mockedChunkedOutput.isClosed()).andReturn(false);
// successful second produce
mockedChunkedOutput.write(resultOrErrorOK2);
mockedChunkedOutput.close();
replay(mockedChunkedOutput, chunkedOutputFactory);
// run test
FakeAsyncResponse fakeAsyncResponse = new FakeAsyncResponse();
produceAction.produce(fakeAsyncResponse, "clusterId", "topicName", requests);
FakeAsyncResponse fakeAsyncResponse2 = new FakeAsyncResponse();
produceAction.produce(fakeAsyncResponse2, "clusterId", "topicName", requests);
// check results
verify(requests, mockedChunkedOutput, countLimitProvider, bytesLimitProvider, rateLimiterForCount, rateLimiterForBytes);
}
use of io.confluent.kafkarest.entities.v3.ProduceResponse in project kafka-rest by confluentinc.
the class ProduceActionTest method produceWithCountLimit.
@Test
public void produceWithCountLimit() throws Exception {
// config
final int TOTAL_NUMBER_OF_PRODUCE_CALLS = 2;
Properties properties = new Properties();
properties.put(PRODUCE_MAX_REQUESTS_PER_SECOND, "100");
properties.put(PRODUCE_MAX_BYTES_PER_SECOND, // first record is 25 bytes long
Integer.toString(30));
properties.put(PRODUCE_RATE_LIMIT_CACHE_EXPIRY_MS, "3600000");
properties.put(PRODUCE_RATE_LIMIT_ENABLED, "true");
// setup
ChunkedOutputFactory chunkedOutputFactory = mock(ChunkedOutputFactory.class);
ChunkedOutput<ResultOrError> mockedChunkedOutput = getChunkedOutput(chunkedOutputFactory, TOTAL_NUMBER_OF_PRODUCE_CALLS);
Provider<RequestRateLimiter> countLimitProvider = mock(Provider.class);
Provider<RequestRateLimiter> bytesLimitProvider = mock(Provider.class);
RequestRateLimiter rateLimiterForCount = mock(RequestRateLimiter.class);
RequestRateLimiter rateLimiterForBytes = mock(RequestRateLimiter.class);
expect(countLimitProvider.get()).andReturn(rateLimiterForCount);
expect(bytesLimitProvider.get()).andReturn(rateLimiterForBytes);
rateLimiterForCount.rateLimit(anyInt());
rateLimiterForBytes.rateLimit(anyInt());
rateLimiterForCount.rateLimit(anyInt());
EasyMock.expectLastCall().andThrow(new RateLimitExceededException());
replay(countLimitProvider, bytesLimitProvider, rateLimiterForCount, rateLimiterForBytes);
ProduceAction produceAction = getProduceAction(properties, chunkedOutputFactory, 1, countLimitProvider, bytesLimitProvider);
MappingIterator<ProduceRequest> requests = getProduceRequestsMappingIterator(TOTAL_NUMBER_OF_PRODUCE_CALLS);
// expected results
ProduceResponse produceResponse = getProduceResponse(0);
ResultOrError resultOrErrorOK = ResultOrError.result(produceResponse);
expect(mockedChunkedOutput.isClosed()).andReturn(false);
// successful first produce
mockedChunkedOutput.write(resultOrErrorOK);
mockedChunkedOutput.close();
ErrorResponse err = ErrorResponse.create(429, "Request rate limit exceeded: The rate limit of requests per second has been exceeded.");
ResultOrError resultOrErrorFail = ResultOrError.error(err);
expect(mockedChunkedOutput.isClosed()).andReturn(false);
// failing second produce
mockedChunkedOutput.write(resultOrErrorFail);
// error close
mockedChunkedOutput.close();
replay(mockedChunkedOutput, chunkedOutputFactory);
// run test
FakeAsyncResponse fakeAsyncResponse = new FakeAsyncResponse();
produceAction.produce(fakeAsyncResponse, "clusterId", "topicName", requests);
FakeAsyncResponse fakeAsyncResponse2 = new FakeAsyncResponse();
produceAction.produce(fakeAsyncResponse2, "clusterId", "topicName", requests);
// check results
verify(requests, mockedChunkedOutput, countLimitProvider, bytesLimitProvider, rateLimiterForCount, rateLimiterForBytes);
}
Aggregations