use of io.confluent.kafkarest.entities.v3.ProduceRequest.ProduceRequestData in project kafka-rest by confluentinc.
the class ProduceActionIntegrationTest method produceJsonBatch.
@Test
public void produceJsonBatch() throws Exception {
String clusterId = testEnv.kafkaCluster().getClusterId();
ArrayList<ProduceRequest> requests = new ArrayList<>();
for (int i = 0; i < 1000; i++) {
requests.add(ProduceRequest.builder().setKey(ProduceRequestData.builder().setFormat(EmbeddedFormat.JSON).setData(TextNode.valueOf("key-" + i)).build()).setValue(ProduceRequestData.builder().setFormat(EmbeddedFormat.JSON).setData(TextNode.valueOf("value-" + i)).build()).setOriginalSize(0L).build());
}
StringBuilder batch = new StringBuilder();
ObjectMapper objectMapper = testEnv.kafkaRest().getObjectMapper();
for (ProduceRequest produceRequest : requests) {
batch.append(objectMapper.writeValueAsString(produceRequest));
}
Response response = testEnv.kafkaRest().target().path("/v3/clusters/" + clusterId + "/topics/" + TOPIC_NAME + "/records").request().accept(MediaType.APPLICATION_JSON).post(Entity.entity(batch.toString(), MediaType.APPLICATION_JSON));
assertEquals(Status.OK.getStatusCode(), response.getStatus());
List<ProduceResponse> actual = readProduceResponses(response);
KafkaJsonDeserializer<Object> deserializer = new KafkaJsonDeserializer<>();
deserializer.configure(emptyMap(), /* isKey= */
false);
for (int i = 0; i < 1000; i++) {
ConsumerRecord<Object, Object> produced = testEnv.kafkaCluster().getRecord(TOPIC_NAME, actual.get(i).getPartitionId(), actual.get(i).getOffset(), deserializer, deserializer);
assertEquals(requests.get(i).getKey().map(ProduceRequestData::getData).map(JsonNode::asText).orElse(null), produced.key());
assertEquals(requests.get(i).getValue().map(ProduceRequestData::getData).map(JsonNode::asText).orElse(null), produced.value());
}
}
use of io.confluent.kafkarest.entities.v3.ProduceRequest.ProduceRequestData in project kafka-rest by confluentinc.
the class ProduceAction method produce.
private CompletableFuture<ProduceResponse> produce(String clusterId, String topicName, ProduceRequest request, ProduceController controller) {
try {
produceRateLimiters.rateLimit(clusterId, request.getOriginalSize());
} catch (RateLimitExceededException e) {
// KREST-4356 Use our own CompletionException that will avoid the costly stack trace fill.
throw new StacklessCompletionException(e);
}
Instant requestInstant = Instant.now();
Optional<RegisteredSchema> keySchema = request.getKey().flatMap(key -> getSchema(topicName, /* isKey= */
true, key));
Optional<EmbeddedFormat> keyFormat = keySchema.map(schema -> Optional.of(schema.getFormat())).orElse(request.getKey().flatMap(ProduceRequestData::getFormat));
Optional<ByteString> serializedKey = serialize(topicName, keyFormat, keySchema, request.getKey(), /* isKey= */
true);
Optional<RegisteredSchema> valueSchema = request.getValue().flatMap(value -> getSchema(topicName, /* isKey= */
false, value));
Optional<EmbeddedFormat> valueFormat = valueSchema.map(schema -> Optional.of(schema.getFormat())).orElse(request.getValue().flatMap(ProduceRequestData::getFormat));
Optional<ByteString> serializedValue = serialize(topicName, valueFormat, valueSchema, request.getValue(), /* isKey= */
false);
recordRequestMetrics(request.getOriginalSize());
CompletableFuture<ProduceResult> produceResult = controller.produce(clusterId, topicName, request.getPartitionId(), request.getHeaders().stream().collect(PRODUCE_REQUEST_HEADER_COLLECTOR), serializedKey, serializedValue, request.getTimestamp().orElse(Instant.now()));
return produceResult.handleAsync((result, error) -> {
if (error != null) {
long latency = Duration.between(requestInstant, Instant.now()).toMillis();
recordErrorMetrics(latency);
throw new StacklessCompletionException(error);
}
return result;
}, executorService).thenApplyAsync(result -> {
ProduceResponse response = toProduceResponse(clusterId, topicName, keyFormat, keySchema, valueFormat, valueSchema, result);
long latency = Duration.between(requestInstant, result.getCompletionTimestamp()).toMillis();
recordResponseMetrics(latency);
return response;
}, executorService);
}
use of io.confluent.kafkarest.entities.v3.ProduceRequest.ProduceRequestData in project kafka-rest by confluentinc.
the class ProduceActionTest method getProduceRequestsMappingIteratorWithSchemaNeeded.
private static MappingIterator<ProduceRequest> getProduceRequestsMappingIteratorWithSchemaNeeded() throws IOException {
MappingIterator<ProduceRequest> requests = mock(MappingIterator.class);
ProduceRequestData key = ProduceRequestData.builder().setFormat(EmbeddedFormat.AVRO).setData(TextNode.valueOf("bob")).setRawSchema("bob").build();
ProduceRequest request = ProduceRequest.builder().setKey(key).setOriginalSize(25L).build();
expect(requests.hasNext()).andReturn(true).times(1);
expect(requests.nextValue()).andReturn(request).times(1);
expect(requests.hasNext()).andReturn(false).times(1);
requests.close();
replay(requests);
return requests;
}
Aggregations