use of io.confluent.kafkarest.entities.v2.ProduceResponse in project kafka-rest by confluentinc.
the class StreamingResponseTest method testWriteToChunkedOutput.
@Test
public void testWriteToChunkedOutput() throws IOException {
String key = "foo";
String value = "bar";
ProduceRequest request = ProduceRequest.builder().setKey(ProduceRequestData.builder().setFormat(EmbeddedFormat.AVRO).setRawSchema("{\"type\": \"string\"}").setData(TextNode.valueOf(key)).build()).setValue(ProduceRequestData.builder().setFormat(EmbeddedFormat.AVRO).setRawSchema("{\"type\": \"string\"}").setData(TextNode.valueOf(value)).build()).setOriginalSize(0L).build();
MappingIterator<ProduceRequest> requestsMappingIterator = mock(MappingIterator.class);
expect(requestsMappingIterator.hasNext()).andReturn(true);
expect(requestsMappingIterator.nextValue()).andReturn(request);
expect(requestsMappingIterator.hasNext()).andReturn(false);
requestsMappingIterator.close();
replay(requestsMappingIterator);
ChunkedOutputFactory mockedChunkedOutputFactory = mock(ChunkedOutputFactory.class);
ChunkedOutput<ResultOrError> mockedChunkedOutput = mock(ChunkedOutput.class);
ProduceResponse produceResponse = ProduceResponse.builder().setClusterId("clusterId").setTopicName("topicName").setPartitionId(1).setOffset(1L).build();
ResultOrError resultOrError = ResultOrError.result(produceResponse);
expect(mockedChunkedOutputFactory.getChunkedOutput()).andReturn(mockedChunkedOutput);
mockedChunkedOutput.write(resultOrError);
expect(mockedChunkedOutput.isClosed()).andReturn(false);
mockedChunkedOutput.close();
replay(mockedChunkedOutput, mockedChunkedOutputFactory);
StreamingResponseFactory streamingResponseFactory = new StreamingResponseFactory(mockedChunkedOutputFactory);
StreamingResponse<ProduceRequest> streamingResponse = streamingResponseFactory.from(requestsMappingIterator);
CompletableFuture<ProduceResponse> produceResponseFuture = new CompletableFuture<>();
produceResponseFuture.complete(produceResponse);
FakeAsyncResponse response = new FakeAsyncResponse();
streamingResponse.compose(result -> produceResponseFuture).resume(response);
EasyMock.verify(mockedChunkedOutput);
EasyMock.verify(mockedChunkedOutputFactory);
EasyMock.verify(requestsMappingIterator);
}
use of io.confluent.kafkarest.entities.v2.ProduceResponse in project kafka-rest by confluentinc.
the class StreamingResponseTest method testGracePeriodExceededExceptionThrown.
@Test
public void testGracePeriodExceededExceptionThrown() throws IOException {
String key = "foo";
String value = "bar";
ProduceRequest request = ProduceRequest.builder().setKey(ProduceRequestData.builder().setFormat(EmbeddedFormat.AVRO).setRawSchema("{\"type\": \"string\"}").setData(TextNode.valueOf(key)).build()).setValue(ProduceRequestData.builder().setFormat(EmbeddedFormat.AVRO).setRawSchema("{\"type\": \"string\"}").setData(TextNode.valueOf(value)).build()).setOriginalSize(0L).build();
MappingIterator<ProduceRequest> requests = mock(MappingIterator.class);
expect(requests.hasNext()).andReturn(true);
expect(requests.nextValue()).andReturn(request);
expect(requests.hasNext()).andReturn(false);
requests.close();
replay(requests);
ChunkedOutputFactory mockedChunkedOutputFactory = mock(ChunkedOutputFactory.class);
ChunkedOutput<ResultOrError> mockedChunkedOutput = mock(ChunkedOutput.class);
ProduceResponse produceResponse = ProduceResponse.builder().setClusterId("clusterId").setTopicName("topicName").setPartitionId(1).setOffset(1L).build();
ResultOrError resultOrError = ResultOrError.result(produceResponse);
expect(mockedChunkedOutputFactory.getChunkedOutput()).andReturn(mockedChunkedOutput);
mockedChunkedOutput.write(resultOrError);
expect(mockedChunkedOutput.isClosed()).andReturn(false);
mockedChunkedOutput.close();
replay(mockedChunkedOutputFactory);
replay(mockedChunkedOutput);
StreamingResponseFactory streamingResponseFactory = new StreamingResponseFactory(mockedChunkedOutputFactory);
StreamingResponse<ProduceRequest> streamingResponse = streamingResponseFactory.from(requests);
CompletableFuture<ProduceResponse> produceResponseFuture = new CompletableFuture<>();
produceResponseFuture.complete(produceResponse);
FakeAsyncResponse response = new FakeAsyncResponse();
streamingResponse.compose(result -> produceResponseFuture).resume(response);
EasyMock.verify(mockedChunkedOutput);
EasyMock.verify(mockedChunkedOutputFactory);
EasyMock.verify(requests);
}
use of io.confluent.kafkarest.entities.v2.ProduceResponse in project kafka-rest by confluentinc.
the class ProduceAction method produce.
private CompletableFuture<ProduceResponse> produce(String clusterId, String topicName, ProduceRequest request, ProduceController controller) {
try {
produceRateLimiters.rateLimit(clusterId, request.getOriginalSize());
} catch (RateLimitExceededException e) {
// KREST-4356 Use our own CompletionException that will avoid the costly stack trace fill.
throw new StacklessCompletionException(e);
}
Instant requestInstant = Instant.now();
Optional<RegisteredSchema> keySchema = request.getKey().flatMap(key -> getSchema(topicName, /* isKey= */
true, key));
Optional<EmbeddedFormat> keyFormat = keySchema.map(schema -> Optional.of(schema.getFormat())).orElse(request.getKey().flatMap(ProduceRequestData::getFormat));
Optional<ByteString> serializedKey = serialize(topicName, keyFormat, keySchema, request.getKey(), /* isKey= */
true);
Optional<RegisteredSchema> valueSchema = request.getValue().flatMap(value -> getSchema(topicName, /* isKey= */
false, value));
Optional<EmbeddedFormat> valueFormat = valueSchema.map(schema -> Optional.of(schema.getFormat())).orElse(request.getValue().flatMap(ProduceRequestData::getFormat));
Optional<ByteString> serializedValue = serialize(topicName, valueFormat, valueSchema, request.getValue(), /* isKey= */
false);
recordRequestMetrics(request.getOriginalSize());
CompletableFuture<ProduceResult> produceResult = controller.produce(clusterId, topicName, request.getPartitionId(), request.getHeaders().stream().collect(PRODUCE_REQUEST_HEADER_COLLECTOR), serializedKey, serializedValue, request.getTimestamp().orElse(Instant.now()));
return produceResult.handleAsync((result, error) -> {
if (error != null) {
long latency = Duration.between(requestInstant, Instant.now()).toMillis();
recordErrorMetrics(latency);
throw new StacklessCompletionException(error);
}
return result;
}, executorService).thenApplyAsync(result -> {
ProduceResponse response = toProduceResponse(clusterId, topicName, keyFormat, keySchema, valueFormat, valueSchema, result);
long latency = Duration.between(requestInstant, result.getCompletionTimestamp()).toMillis();
recordResponseMetrics(latency);
return response;
}, executorService);
}
use of io.confluent.kafkarest.entities.v2.ProduceResponse in project kafka-rest by confluentinc.
the class AvroProducerTest method testProduceToPartition.
protected <K, V> void testProduceToPartition(List<SchemaPartitionProduceRecord> records, List<PartitionOffset> offsetResponse, Map<String, String> queryParams) {
SchemaPartitionProduceRequest payload = SchemaPartitionProduceRequest.create(records, /* keySchema= */
null, /* keySchemaId= */
null, /* valueSchema= */
valueSchemaStr, /* valueSchemaId= */
null);
Response response = request("/topics/" + topicName + "/partitions/0", queryParams).post(Entity.entity(payload, Versions.KAFKA_V2_JSON_AVRO));
assertOKResponse(response, Versions.KAFKA_V2_JSON);
final ProduceResponse poffsetResponse = TestUtils.tryReadEntityOrLog(response, ProduceResponse.class);
assertEquals(offsetResponse, poffsetResponse.getOffsets());
TestUtils.assertTopicContains(plaintextBrokerList, topicName, payload.toProduceRequest().getRecords(), 0, KafkaAvroDeserializer.class.getName(), KafkaAvroDeserializer.class.getName(), deserializerProps, false);
assertEquals((Integer) 1, poffsetResponse.getValueSchemaId());
}
use of io.confluent.kafkarest.entities.v2.ProduceResponse in project kafka-rest by confluentinc.
the class AbstractProducerTest method testProduceToTopic.
protected <K, V> void testProduceToTopic(String topicName, TopicRequestT request, String keyDeserializerClassName, String valueDeserializerClassName, List<PartitionOffset> offsetResponses, boolean matchPartitions, Map<String, String> queryParams, List<ProduceRecord<K, V>> expected) {
Response response = request("/topics/" + topicName, queryParams).post(Entity.entity(request, getEmbeddedContentType()));
assertOKResponse(response, Versions.KAFKA_V2_JSON);
final ProduceResponse produceResponse = TestUtils.tryReadEntityOrLog(response, ProduceResponse.class);
if (matchPartitions) {
TestUtils.assertPartitionsEqual(offsetResponses, produceResponse.getOffsets());
}
TestUtils.assertPartitionOffsetsEqual(offsetResponses, produceResponse.getOffsets());
TestUtils.assertTopicContains(plaintextBrokerList, topicName, expected, null, keyDeserializerClassName, valueDeserializerClassName, true);
}
Aggregations