use of io.confluent.kafka.schemaregistry.storage.exceptions.StoreException in project schema-registry by confluentinc.
the class KafkaStoreMessageHandler method validateUpdate.
/**
* Invoked before every new K,V pair written to the store
*
* @param key Key associated with the data
* @param value Data written to the store
*/
@Override
public ValidationStatus validateUpdate(SchemaRegistryKey key, SchemaRegistryValue value, TopicPartition tp, long offset, long timestamp) {
if (value == null) {
return ValidationStatus.SUCCESS;
}
// Store the offset and timestamp in the cached value
value.setOffset(offset);
value.setTimestamp(timestamp);
if (key.getKeyType() == SchemaRegistryKeyType.SCHEMA) {
SchemaValue schemaObj = (SchemaValue) value;
String schemaType = schemaObj.getSchemaType();
if (canonicalizeSchemaTypes.contains(schemaType)) {
SchemaProvider schemaProvider = schemaRegistry.schemaProvider(schemaType);
if (schemaProvider != null) {
canonicalize(schemaProvider, schemaObj);
}
}
try {
SchemaKey oldKey = lookupCache.schemaKeyById(schemaObj.getId(), schemaObj.getSubject());
if (oldKey != null) {
SchemaValue oldSchema;
oldSchema = (SchemaValue) lookupCache.get(oldKey);
if (oldSchema != null && !oldSchema.getSchema().equals(schemaObj.getSchema())) {
log.error("Found a schema with duplicate ID {}. This schema will not be " + "registered since a schema already exists with this ID.", schemaObj.getId());
return schemaRegistry.isLeader() ? ValidationStatus.ROLLBACK_FAILURE : ValidationStatus.IGNORE_FAILURE;
}
}
} catch (StoreException e) {
log.error("Error while retrieving schema", e);
return schemaRegistry.isLeader() ? ValidationStatus.ROLLBACK_FAILURE : ValidationStatus.IGNORE_FAILURE;
}
} else if (key.getKeyType() == SchemaRegistryKeyType.CONFIG || key.getKeyType() == SchemaRegistryKeyType.MODE) {
SubjectValue subjectValue = (SubjectValue) value;
if (subjectValue.getSubject() == null) {
// handle legacy values that don't have subject in the value
subjectValue.setSubject(((SubjectKey) key).getSubject());
}
}
return ValidationStatus.SUCCESS;
}
use of io.confluent.kafka.schemaregistry.storage.exceptions.StoreException in project schema-registry by confluentinc.
the class SchemaRegistryKeysTest method testStoreKeyOrder.
private void testStoreKeyOrder(SchemaRegistryKey[] orderedKeys) throws StoreInitializationException {
int numKeys = orderedKeys.length;
InMemoryCache<SchemaRegistryKey, SchemaRegistryValue> store = new InMemoryCache<>(new SchemaRegistrySerializer());
store.init();
while (--numKeys >= 0) {
try {
store.put(orderedKeys[numKeys], toValue(orderedKeys[numKeys]));
} catch (StoreException e) {
fail("Error writing key " + orderedKeys[numKeys].toString() + " to the in memory store");
}
}
// test key order
try (CloseableIterator<SchemaRegistryKey> keys = store.getAllKeys()) {
SchemaRegistryKey[] retrievedKeyOrder = new SchemaRegistryKey[orderedKeys.length];
int keyIndex = 0;
while (keys.hasNext()) {
retrievedKeyOrder[keyIndex++] = keys.next();
}
assertArrayEquals(orderedKeys, retrievedKeyOrder);
} catch (StoreException e) {
fail();
}
}
Aggregations