use of io.confluent.kafka.schemaregistry.storage.exceptions.EntryTooLargeException in project schema-registry by confluentinc.
the class KafkaSchemaRegistry method register.
@Override
public int register(String subject, Schema schema, boolean normalize) throws SchemaRegistryException {
try {
checkRegisterMode(subject, schema);
// Ensure cache is up-to-date before any potential writes
kafkaStore.waitUntilKafkaReaderReachesLastOffset(subject, kafkaStoreTimeoutMs);
int schemaId = schema.getId();
ParsedSchema parsedSchema = canonicalizeSchema(schema, schemaId < 0, normalize);
// see if the schema to be registered already exists
SchemaIdAndSubjects schemaIdAndSubjects = this.lookupCache.schemaIdAndSubjects(schema);
if (schemaIdAndSubjects != null && (schemaId < 0 || schemaId == schemaIdAndSubjects.getSchemaId())) {
if (schemaIdAndSubjects.hasSubject(subject) && !isSubjectVersionDeleted(subject, schemaIdAndSubjects.getVersion(subject))) {
// return only if the schema was previously registered under the input subject
return schemaIdAndSubjects.getSchemaId();
} else {
// need to register schema under the input subject
schemaId = schemaIdAndSubjects.getSchemaId();
}
}
// determine the latest version of the schema in the subject
List<SchemaValue> allVersions = getAllSchemaValues(subject);
Collections.reverse(allVersions);
List<SchemaValue> deletedVersions = new ArrayList<>();
List<ParsedSchema> undeletedVersions = new ArrayList<>();
int newVersion = MIN_VERSION;
for (SchemaValue schemaValue : allVersions) {
newVersion = Math.max(newVersion, schemaValue.getVersion() + 1);
if (schemaValue.isDeleted()) {
deletedVersions.add(schemaValue);
} else {
ParsedSchema undeletedSchema = parseSchema(getSchemaEntityFromSchemaValue(schemaValue));
if (parsedSchema.references().isEmpty() && !undeletedSchema.references().isEmpty() && parsedSchema.deepEquals(undeletedSchema)) {
// This handles the case where a schema is sent with all references resolved
return schemaValue.getId();
}
undeletedVersions.add(undeletedSchema);
}
}
Collections.reverse(undeletedVersions);
final List<String> compatibilityErrorLogs = isCompatibleWithPrevious(subject, parsedSchema, undeletedVersions);
final boolean isCompatible = compatibilityErrorLogs.isEmpty();
if (normalize) {
parsedSchema = parsedSchema.normalize();
}
// Allow schema providers to modify the schema during compatibility checks
schema.setSchema(parsedSchema.canonicalString());
schema.setReferences(parsedSchema.references());
if (isCompatible) {
// save the context key
QualifiedSubject qs = QualifiedSubject.create(tenant(), subject);
if (qs != null && !DEFAULT_CONTEXT.equals(qs.getContext())) {
ContextKey contextKey = new ContextKey(qs.getTenant(), qs.getContext());
if (kafkaStore.get(contextKey) == null) {
ContextValue contextValue = new ContextValue(qs.getTenant(), qs.getContext());
kafkaStore.put(contextKey, contextValue);
}
}
// assign a guid and put the schema in the kafka store
if (schema.getVersion() <= 0) {
schema.setVersion(newVersion);
}
SchemaKey schemaKey = new SchemaKey(subject, schema.getVersion());
if (schemaId >= 0) {
checkIfSchemaWithIdExist(schemaId, schema);
schema.setId(schemaId);
kafkaStore.put(schemaKey, new SchemaValue(schema));
} else {
int retries = 0;
while (retries++ < kafkaStoreMaxRetries) {
int newId = idGenerator.id(new SchemaValue(schema));
// Verify id is not already in use
if (lookupCache.schemaKeyById(newId, subject) == null) {
schema.setId(newId);
if (retries > 1) {
log.warn(String.format("Retrying to register the schema with ID %s", newId));
}
kafkaStore.put(schemaKey, new SchemaValue(schema));
break;
}
}
if (retries >= kafkaStoreMaxRetries) {
throw new SchemaRegistryStoreException("Error while registering the schema due " + "to generating an ID that is already in use.");
}
}
for (SchemaValue deleted : deletedVersions) {
if (deleted.getId().equals(schema.getId()) && deleted.getVersion().compareTo(schema.getVersion()) < 0) {
// Tombstone previous version with the same ID
SchemaKey key = new SchemaKey(deleted.getSubject(), deleted.getVersion());
kafkaStore.put(key, null);
}
}
return schema.getId();
} else {
throw new IncompatibleSchemaException(compatibilityErrorLogs.toString());
}
} catch (EntryTooLargeException e) {
throw new SchemaTooLargeException("Write failed because schema is too large", e);
} catch (StoreTimeoutException te) {
throw new SchemaRegistryTimeoutException("Write to the Kafka store timed out while", te);
} catch (StoreException e) {
throw new SchemaRegistryStoreException("Error while registering the schema in the" + " backend Kafka store", e);
}
}
use of io.confluent.kafka.schemaregistry.storage.exceptions.EntryTooLargeException in project schema-registry by confluentinc.
the class KafkaStore method put.
@Override
public V put(K key, V value) throws StoreTimeoutException, StoreException {
assertInitialized();
if (key == null) {
throw new StoreException("Key should not be null");
}
V oldValue = get(key);
// write to the Kafka topic
ProducerRecord<byte[], byte[]> producerRecord = null;
try {
producerRecord = new ProducerRecord<byte[], byte[]>(topic, 0, this.serializer.serializeKey(key), value == null ? null : this.serializer.serializeValue(value));
} catch (SerializationException e) {
throw new StoreException("Error serializing schema while creating the Kafka produce " + "record", e);
}
boolean knownSuccessfulWrite = false;
try {
log.trace("Sending record to KafkaStore topic: " + producerRecord);
Future<RecordMetadata> ack = producer.send(producerRecord);
RecordMetadata recordMetadata = ack.get(timeout, TimeUnit.MILLISECONDS);
log.trace("Waiting for the local store to catch up to offset " + recordMetadata.offset());
this.lastWrittenOffset = recordMetadata.offset();
if (key instanceof SubjectKey) {
setLastOffset(((SubjectKey) key).getSubject(), recordMetadata.offset());
}
waitUntilKafkaReaderReachesOffset(recordMetadata.offset(), timeout);
knownSuccessfulWrite = true;
} catch (InterruptedException e) {
throw new StoreException("Put operation interrupted while waiting for an ack from Kafka", e);
} catch (ExecutionException e) {
if (e.getCause() instanceof RecordTooLargeException) {
throw new EntryTooLargeException("Put operation failed because entry is too large");
} else {
throw new StoreException("Put operation failed while waiting for an ack from Kafka", e);
}
} catch (TimeoutException e) {
throw new StoreTimeoutException("Put operation timed out while waiting for an ack from Kafka", e);
} catch (KafkaException ke) {
throw new StoreException("Put operation to Kafka failed", ke);
} finally {
if (!knownSuccessfulWrite) {
markLastWrittenOffsetInvalid();
}
}
return oldValue;
}
Aggregations