use of io.confluent.kafkarest.entities.RegisteredSchema in project kafka-rest by confluentinc.
the class ProduceAction method produce.
private CompletableFuture<ProduceResponse> produce(String clusterId, String topicName, ProduceRequest request, ProduceController controller) {
try {
produceRateLimiters.rateLimit(clusterId, request.getOriginalSize());
} catch (RateLimitExceededException e) {
// KREST-4356 Use our own CompletionException that will avoid the costly stack trace fill.
throw new StacklessCompletionException(e);
}
Instant requestInstant = Instant.now();
Optional<RegisteredSchema> keySchema = request.getKey().flatMap(key -> getSchema(topicName, /* isKey= */
true, key));
Optional<EmbeddedFormat> keyFormat = keySchema.map(schema -> Optional.of(schema.getFormat())).orElse(request.getKey().flatMap(ProduceRequestData::getFormat));
Optional<ByteString> serializedKey = serialize(topicName, keyFormat, keySchema, request.getKey(), /* isKey= */
true);
Optional<RegisteredSchema> valueSchema = request.getValue().flatMap(value -> getSchema(topicName, /* isKey= */
false, value));
Optional<EmbeddedFormat> valueFormat = valueSchema.map(schema -> Optional.of(schema.getFormat())).orElse(request.getValue().flatMap(ProduceRequestData::getFormat));
Optional<ByteString> serializedValue = serialize(topicName, valueFormat, valueSchema, request.getValue(), /* isKey= */
false);
recordRequestMetrics(request.getOriginalSize());
CompletableFuture<ProduceResult> produceResult = controller.produce(clusterId, topicName, request.getPartitionId(), request.getHeaders().stream().collect(PRODUCE_REQUEST_HEADER_COLLECTOR), serializedKey, serializedValue, request.getTimestamp().orElse(Instant.now()));
return produceResult.handleAsync((result, error) -> {
if (error != null) {
long latency = Duration.between(requestInstant, Instant.now()).toMillis();
recordErrorMetrics(latency);
throw new StacklessCompletionException(error);
}
return result;
}, executorService).thenApplyAsync(result -> {
ProduceResponse response = toProduceResponse(clusterId, topicName, keyFormat, keySchema, valueFormat, valueSchema, result);
long latency = Duration.between(requestInstant, result.getCompletionTimestamp()).toMillis();
recordResponseMetrics(latency);
return response;
}, executorService);
}
use of io.confluent.kafkarest.entities.RegisteredSchema in project kafka-rest by confluentinc.
the class SchemaManagerImplTest method getSchema_avro_latestSchema_subject.
@Test
public void getSchema_avro_latestSchema_subject() throws Exception {
String subject = "my-subject";
ParsedSchema schema = new AvroSchema("{\"type\": \"int\"}");
int schemaId = schemaRegistryClient.register(subject, schema);
int schemaVersion = schemaRegistryClient.getVersion(subject, schema);
RegisteredSchema actual = schemaManager.getSchema(TOPIC_NAME, /* format= */
Optional.empty(), /* subject= */
Optional.of(subject), /* subjectNameStrategy= */
Optional.empty(), /* schemaId= */
Optional.empty(), /* schemaVersion= */
Optional.empty(), /* rawSchema= */
Optional.empty(), /* isKey= */
true);
assertEquals(RegisteredSchema.create(subject, schemaId, schemaVersion, schema), actual);
}
use of io.confluent.kafkarest.entities.RegisteredSchema in project kafka-rest by confluentinc.
the class SchemaManagerImplTest method getSchema_jsonschema_rawSchema.
@Test
public void getSchema_jsonschema_rawSchema() throws Exception {
RegisteredSchema actual = schemaManager.getSchema(TOPIC_NAME, /* format= */
Optional.of(EmbeddedFormat.JSONSCHEMA), /* subject= */
Optional.empty(), /* subjectNameStrategy= */
Optional.empty(), /* schemaId= */
Optional.empty(), /* schemaVersion= */
Optional.empty(), /* rawSchema= */
Optional.of("{\"type\": \"string\"}"), /* isKey= */
true);
ParsedSchema schema = schemaRegistryClient.getSchemaById(actual.getSchemaId());
int schemaId = schemaRegistryClient.getId(KEY_SUBJECT, schema);
int schemaVersion = schemaRegistryClient.getVersion(KEY_SUBJECT, schema);
assertEquals(RegisteredSchema.create(KEY_SUBJECT, schemaId, schemaVersion, schema), actual);
}
use of io.confluent.kafkarest.entities.RegisteredSchema in project kafka-rest by confluentinc.
the class SchemaManagerImplTest method getSchema_avro_latestSchema_subjectNameStrategy.
@Test
public void getSchema_avro_latestSchema_subjectNameStrategy() throws Exception {
ParsedSchema schema = new AvroSchema("{\"type\": \"int\"}");
SubjectNameStrategy strategy = new MySubjectNameStrategy();
String subject = strategy.subjectName(TOPIC_NAME, /* isKey= */
true, /* schema= */
null);
int schemaId = schemaRegistryClient.register(subject, schema);
int schemaVersion = schemaRegistryClient.getVersion(subject, schema);
RegisteredSchema actual = schemaManager.getSchema(TOPIC_NAME, /* format= */
Optional.empty(), /* subject= */
Optional.empty(), /* subjectNameStrategy= */
Optional.of(strategy), /* schemaId= */
Optional.empty(), /* schemaVersion= */
Optional.empty(), /* rawSchema= */
Optional.empty(), /* isKey= */
true);
assertEquals(RegisteredSchema.create(subject, schemaId, schemaVersion, schema), actual);
}
use of io.confluent.kafkarest.entities.RegisteredSchema in project kafka-rest by confluentinc.
the class SchemaManagerImplTest method getSchema_avro_latestSchema_notIsKey.
@Test
public void getSchema_avro_latestSchema_notIsKey() throws Exception {
ParsedSchema schema = new AvroSchema("{\"type\": \"int\"}");
int schemaId = schemaRegistryClient.register(VALUE_SUBJECT, schema);
int schemaVersion = schemaRegistryClient.getVersion(VALUE_SUBJECT, schema);
RegisteredSchema actual = schemaManager.getSchema(TOPIC_NAME, /* format= */
Optional.empty(), /* subject= */
Optional.empty(), /* subjectNameStrategy= */
Optional.empty(), /* schemaId= */
Optional.empty(), /* schemaVersion= */
Optional.empty(), /* rawSchema= */
Optional.empty(), /* isKey= */
false);
assertEquals(RegisteredSchema.create(VALUE_SUBJECT, schemaId, schemaVersion, schema), actual);
}
Aggregations