use of io.confluent.ksql.serde.Format in project ksql by confluentinc.
the class InsertValuesExecutor method ensureKeySchemasMatch.
/**
* Ensures that the key schema that we generate will be identical
* to the schema that is registered in schema registry, if it exists.
* Otherwise, it is possible that we will publish messages with a new
* schemaID, meaning that logically identical keys might be routed to
* different partitions.
*/
private static void ensureKeySchemasMatch(final PersistenceSchema keySchema, final DataSource dataSource, final ServiceContext serviceContext) {
final KeyFormat keyFormat = dataSource.getKsqlTopic().getKeyFormat();
final Format format = FormatFactory.fromName(keyFormat.getFormat());
if (!format.supportsFeature(SerdeFeature.SCHEMA_INFERENCE)) {
return;
}
final ParsedSchema schema = format.getSchemaTranslator(keyFormat.getFormatInfo().getProperties()).toParsedSchema(keySchema);
final Optional<SchemaMetadata> latest;
try {
latest = SchemaRegistryUtil.getLatestSchema(serviceContext.getSchemaRegistryClient(), dataSource.getKafkaTopicName(), true);
} catch (final KsqlException e) {
maybeThrowSchemaRegistryAuthError(format, dataSource.getKafkaTopicName(), true, AclOperation.READ, e);
throw new KsqlException("Could not determine that insert values operations is safe; " + "operation potentially overrides existing key schema in schema registry.", e);
}
if (latest.isPresent() && !latest.get().getSchema().equals(schema.canonicalString())) {
throw new KsqlException("Cannot INSERT VALUES into data source " + dataSource.getName() + ". ksqlDB generated schema would overwrite existing key schema." + "\n\tExisting Schema: " + latest.get().getSchema() + "\n\tksqlDB Generated: " + schema.canonicalString());
}
}
Aggregations