use of io.confluent.kafka.schemaregistry.storage.exceptions.StoreException in project schema-registry by confluentinc.
the class KafkaSchemaRegistry method lookUpSchemaUnderSubject.
/**
* Checks if given schema was ever registered under a subject. If found, it returns the version of
* the schema under the subject. If not, returns -1
*/
public Schema lookUpSchemaUnderSubject(String subject, Schema schema, boolean normalize, boolean lookupDeletedSchema) throws SchemaRegistryException {
try {
ParsedSchema parsedSchema = canonicalizeSchema(schema, false, normalize);
SchemaIdAndSubjects schemaIdAndSubjects = this.lookupCache.schemaIdAndSubjects(schema);
if (schemaIdAndSubjects != null) {
if (schemaIdAndSubjects.hasSubject(subject) && (lookupDeletedSchema || !isSubjectVersionDeleted(subject, schemaIdAndSubjects.getVersion(subject)))) {
Schema matchingSchema = new Schema(subject, schemaIdAndSubjects.getVersion(subject), schemaIdAndSubjects.getSchemaId(), schema.getSchemaType(), schema.getReferences(), schema.getSchema());
return matchingSchema;
}
}
List<SchemaValue> allVersions = getAllSchemaValues(subject);
Collections.reverse(allVersions);
for (SchemaValue schemaValue : allVersions) {
if ((lookupDeletedSchema || !schemaValue.isDeleted()) && parsedSchema.references().isEmpty() && !schemaValue.getReferences().isEmpty()) {
Schema prev = getSchemaEntityFromSchemaValue(schemaValue);
ParsedSchema prevSchema = parseSchema(prev);
if (parsedSchema.deepEquals(prevSchema)) {
// This handles the case where a schema is sent with all references resolved
return prev;
}
}
}
return null;
} catch (StoreException e) {
throw new SchemaRegistryStoreException("Error from the backend Kafka store", e);
}
}
use of io.confluent.kafka.schemaregistry.storage.exceptions.StoreException in project schema-registry by confluentinc.
the class KafkaSchemaRegistry method getMode.
public Mode getMode(String subject) throws SchemaRegistryStoreException {
try {
Mode globalMode = lookupCache.mode(null, false, defaultMode);
Mode subjectMode = lookupCache.mode(subject, false, defaultMode);
return globalMode == Mode.READONLY_OVERRIDE ? globalMode : subjectMode;
} catch (StoreException e) {
throw new SchemaRegistryStoreException("Failed to write new config value to the store", e);
}
}
use of io.confluent.kafka.schemaregistry.storage.exceptions.StoreException in project schema-registry by confluentinc.
the class KafkaSchemaRegistry method updateCompatibilityLevel.
public void updateCompatibilityLevel(String subject, CompatibilityLevel newCompatibilityLevel) throws SchemaRegistryStoreException, OperationNotPermittedException, UnknownLeaderException {
if (isReadOnlyMode(subject)) {
throw new OperationNotPermittedException("Subject " + subject + " is in read-only mode");
}
ConfigKey configKey = new ConfigKey(subject);
try {
kafkaStore.waitUntilKafkaReaderReachesLastOffset(subject, kafkaStoreTimeoutMs);
kafkaStore.put(configKey, new ConfigValue(subject, newCompatibilityLevel));
log.debug("Wrote new compatibility level: " + newCompatibilityLevel.name + " to the" + " Kafka data store with key " + configKey.toString());
} catch (StoreException e) {
throw new SchemaRegistryStoreException("Failed to write new config value to the store", e);
}
}
use of io.confluent.kafka.schemaregistry.storage.exceptions.StoreException in project schema-registry by confluentinc.
the class KafkaSchemaRegistry method allVersions.
private CloseableIterator<SchemaRegistryValue> allVersions(String subjectOrPrefix, boolean isPrefix) throws SchemaRegistryException {
try {
String start;
String end;
int idx = subjectOrPrefix.indexOf(CONTEXT_WILDCARD);
if (idx >= 0) {
// Context wildcard match
String prefix = subjectOrPrefix.substring(0, idx);
start = prefix + CONTEXT_PREFIX + CONTEXT_DELIMITER;
end = prefix + CONTEXT_PREFIX + Character.MAX_VALUE + CONTEXT_DELIMITER;
} else {
start = subjectOrPrefix;
end = isPrefix ? subjectOrPrefix + Character.MAX_VALUE : subjectOrPrefix;
}
SchemaKey key1 = new SchemaKey(start, MIN_VERSION);
SchemaKey key2 = new SchemaKey(end, MAX_VERSION);
return kafkaStore.getAll(key1, key2);
} catch (StoreException e) {
throw new SchemaRegistryStoreException("Error from the backend Kafka store", e);
}
}
use of io.confluent.kafka.schemaregistry.storage.exceptions.StoreException in project schema-registry by confluentinc.
the class KafkaStore method getLatestOffset.
/**
* Return the latest offset of the store topic.
*
* <p>The most reliable way to do so in face of potential Kafka broker failure is to produce
* successfully to the Kafka topic and get the offset of the returned metadata.
*
* <p>If the most recent write to Kafka was successful (signaled by lastWrittenOffset >= 0),
* immediately return that offset. Otherwise write a "Noop key" to Kafka in order to find the
* latest offset.
*/
private long getLatestOffset(int timeoutMs) throws StoreException {
ProducerRecord<byte[], byte[]> producerRecord = null;
if (this.lastWrittenOffset >= 0) {
return this.lastWrittenOffset;
}
try {
producerRecord = new ProducerRecord<byte[], byte[]>(topic, 0, this.serializer.serializeKey(noopKey), null);
} catch (SerializationException e) {
throw new StoreException("Failed to serialize noop key.", e);
}
try {
log.trace("Sending Noop record to KafkaStore to find last offset.");
Future<RecordMetadata> ack = producer.send(producerRecord);
RecordMetadata metadata = ack.get(timeoutMs, TimeUnit.MILLISECONDS);
this.lastWrittenOffset = metadata.offset();
log.trace("Noop record's offset is " + this.lastWrittenOffset);
return this.lastWrittenOffset;
} catch (Exception e) {
throw new StoreException("Failed to write Noop record to kafka store.", e);
}
}
Aggregations