use of io.confluent.kafkarest.controllers.ProduceController in project kafka-rest by confluentinc.
the class ProduceActionTest method getProduceAction.
private static ProduceAction getProduceAction(ProduceRateLimiters produceRateLimiters, ChunkedOutputFactory chunkedOutputFactory, int times, int producerId, boolean errorSchemaRegistry) {
Provider<SchemaManager> schemaManagerProvider = mock(Provider.class);
SchemaManager schemaManagerMock = mock(SchemaManager.class);
expect(schemaManagerProvider.get()).andReturn(schemaManagerMock);
expect(schemaManagerMock.getSchema("topicName", Optional.of(EmbeddedFormat.AVRO), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.of("bob"), true)).andThrow(Errors.invalidPayloadException("Schema Registry must be configured when using schemas."));
replay(schemaManagerProvider, schemaManagerMock);
Provider<ProducerMetrics> producerMetricsProvider = mock(Provider.class);
getProducerMetricsProvider(producerMetricsProvider);
Provider<RecordSerializer> recordSerializerProvider = getRecordSerializerProvider(errorSchemaRegistry);
Provider<ProduceController> produceControllerProvider = mock(Provider.class);
ProduceController produceController = getProduceControllerMock(produceControllerProvider);
setupExpectsMockCallsForProduce(produceController, times, producerId);
replay(producerMetricsProvider, produceControllerProvider, produceController);
StreamingResponseFactory streamingResponseFactory = new StreamingResponseFactory(chunkedOutputFactory);
// get the current thread so that the call counts can be seen by easy mock
ExecutorService executorService = MoreExecutors.newDirectExecutorService();
ProduceAction produceAction = new ProduceAction(schemaManagerProvider, recordSerializerProvider, produceControllerProvider, producerMetricsProvider, streamingResponseFactory, produceRateLimiters, executorService);
produceRateLimiters.clear();
return produceAction;
}
use of io.confluent.kafkarest.controllers.ProduceController in project kafka-rest by confluentinc.
the class ProduceAction method produce.
@POST
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
@PerformanceMetric("v3.produce.produce-to-topic")
@ResourceName("api.v3.produce.produce-to-topic")
public void produce(@Suspended AsyncResponse asyncResponse, @PathParam("clusterId") String clusterId, @PathParam("topicName") String topicName, MappingIterator<ProduceRequest> requests) throws Exception {
if (requests == null) {
throw Errors.invalidPayloadException("Null input provided. Data is required.");
}
ProduceController controller = produceControllerProvider.get();
streamingResponseFactory.from(requests).compose(request -> produce(clusterId, topicName, request, controller)).resume(asyncResponse);
}
use of io.confluent.kafkarest.controllers.ProduceController in project kafka-rest by confluentinc.
the class ProduceAction method produce.
private CompletableFuture<ProduceResponse> produce(String clusterId, String topicName, ProduceRequest request, ProduceController controller) {
try {
produceRateLimiters.rateLimit(clusterId, request.getOriginalSize());
} catch (RateLimitExceededException e) {
// KREST-4356 Use our own CompletionException that will avoid the costly stack trace fill.
throw new StacklessCompletionException(e);
}
Instant requestInstant = Instant.now();
Optional<RegisteredSchema> keySchema = request.getKey().flatMap(key -> getSchema(topicName, /* isKey= */
true, key));
Optional<EmbeddedFormat> keyFormat = keySchema.map(schema -> Optional.of(schema.getFormat())).orElse(request.getKey().flatMap(ProduceRequestData::getFormat));
Optional<ByteString> serializedKey = serialize(topicName, keyFormat, keySchema, request.getKey(), /* isKey= */
true);
Optional<RegisteredSchema> valueSchema = request.getValue().flatMap(value -> getSchema(topicName, /* isKey= */
false, value));
Optional<EmbeddedFormat> valueFormat = valueSchema.map(schema -> Optional.of(schema.getFormat())).orElse(request.getValue().flatMap(ProduceRequestData::getFormat));
Optional<ByteString> serializedValue = serialize(topicName, valueFormat, valueSchema, request.getValue(), /* isKey= */
false);
recordRequestMetrics(request.getOriginalSize());
CompletableFuture<ProduceResult> produceResult = controller.produce(clusterId, topicName, request.getPartitionId(), request.getHeaders().stream().collect(PRODUCE_REQUEST_HEADER_COLLECTOR), serializedKey, serializedValue, request.getTimestamp().orElse(Instant.now()));
return produceResult.handleAsync((result, error) -> {
if (error != null) {
long latency = Duration.between(requestInstant, Instant.now()).toMillis();
recordErrorMetrics(latency);
throw new StacklessCompletionException(error);
}
return result;
}, executorService).thenApplyAsync(result -> {
ProduceResponse response = toProduceResponse(clusterId, topicName, keyFormat, keySchema, valueFormat, valueSchema, result);
long latency = Duration.between(requestInstant, result.getCompletionTimestamp()).toMillis();
recordResponseMetrics(latency);
return response;
}, executorService);
}
use of io.confluent.kafkarest.controllers.ProduceController in project kafka-rest by confluentinc.
the class ProduceActionTest method getProduceControllerMock.
private static ProduceController getProduceControllerMock(Provider produceControllerProvider) {
ProduceController produceController = mock(ProduceController.class);
expect(produceControllerProvider.get()).andReturn(produceController).anyTimes();
return produceController;
}
Aggregations