use of io.confluent.kafka.schemaregistry.exceptions.SchemaRegistryStoreException in project schema-registry by confluentinc.
the class KafkaSchemaRegistry method deleteSchemaVersion.
@Override
public void deleteSchemaVersion(String subject, Schema schema, boolean permanentDelete) throws SchemaRegistryException {
try {
if (isReadOnlyMode(subject)) {
throw new OperationNotPermittedException("Subject " + subject + " is in read-only mode");
}
SchemaKey key = new SchemaKey(subject, schema.getVersion());
if (!lookupCache.referencesSchema(key).isEmpty()) {
throw new ReferenceExistsException(key.toString());
}
SchemaValue schemaValue = (SchemaValue) lookupCache.get(key);
if (permanentDelete && schemaValue != null && !schemaValue.isDeleted()) {
throw new SchemaVersionNotSoftDeletedException(subject, schema.getVersion().toString());
}
// Ensure cache is up-to-date before any potential writes
kafkaStore.waitUntilKafkaReaderReachesLastOffset(subject, kafkaStoreTimeoutMs);
if (!permanentDelete) {
schemaValue = new SchemaValue(schema);
schemaValue.setDeleted(true);
kafkaStore.put(key, schemaValue);
if (!getAllVersions(subject, false).hasNext()) {
if (getMode(subject) != null) {
deleteMode(subject);
}
if (getCompatibilityLevel(subject) != null) {
deleteCompatibility(subject);
}
}
} else {
kafkaStore.put(key, null);
}
} catch (StoreTimeoutException te) {
throw new SchemaRegistryTimeoutException("Write to the Kafka store timed out while", te);
} catch (StoreException e) {
throw new SchemaRegistryStoreException("Error while deleting the schema for subject '" + subject + "' in the backend Kafka store", e);
}
}
use of io.confluent.kafka.schemaregistry.exceptions.SchemaRegistryStoreException in project schema-registry by confluentinc.
the class KafkaSchemaRegistry method deleteSubjectMode.
public void deleteSubjectMode(String subject) throws SchemaRegistryStoreException, OperationNotPermittedException {
if (!allowModeChanges) {
throw new OperationNotPermittedException("Mode changes are not allowed");
}
try {
kafkaStore.waitUntilKafkaReaderReachesLastOffset(subject, kafkaStoreTimeoutMs);
deleteMode(subject);
} catch (StoreException e) {
throw new SchemaRegistryStoreException("Failed to delete subject config value from store", e);
}
}
use of io.confluent.kafka.schemaregistry.exceptions.SchemaRegistryStoreException in project schema-registry by confluentinc.
the class KafkaSchemaRegistry method init.
@Override
public void init() throws SchemaRegistryException {
try {
kafkaStore.init();
} catch (StoreInitializationException e) {
throw new SchemaRegistryInitializationException("Error initializing kafka store while initializing schema registry", e);
}
try {
config.checkBootstrapServers();
log.info("Joining schema registry with Kafka-based coordination");
leaderElector = new KafkaGroupLeaderElector(config, myIdentity, this);
leaderElector.init();
} catch (SchemaRegistryStoreException e) {
throw new SchemaRegistryInitializationException("Error electing leader while initializing schema registry", e);
} catch (SchemaRegistryTimeoutException e) {
throw new SchemaRegistryInitializationException(e);
}
}
use of io.confluent.kafka.schemaregistry.exceptions.SchemaRegistryStoreException in project schema-registry by confluentinc.
the class KafkaSchemaRegistry method setLeader.
/**
* 'Inform' this SchemaRegistry instance which SchemaRegistry is the current leader.
* If this instance is set as the new leader, ensure it is up-to-date with data in
* the kafka store.
*
* @param newLeader Identity of the current leader. null means no leader is alive.
*/
@Override
public void setLeader(@Nullable SchemaRegistryIdentity newLeader) throws SchemaRegistryTimeoutException, SchemaRegistryStoreException, IdGenerationException {
log.debug("Setting the leader to " + newLeader);
// Only schema registry instances eligible for leader can be set to leader
if (newLeader != null && !newLeader.getLeaderEligibility()) {
throw new IllegalStateException("Tried to set an ineligible node to leader: " + newLeader);
}
kafkaStore.leaderLock().lock();
try {
SchemaRegistryIdentity previousLeader = leaderIdentity;
leaderIdentity = newLeader;
if (leaderIdentity == null) {
leaderRestService = null;
} else {
leaderRestService = new RestService(leaderIdentity.getUrl());
if (sslFactory != null && sslFactory.sslContext() != null) {
leaderRestService.setSslSocketFactory(sslFactory.sslContext().getSocketFactory());
leaderRestService.setHostnameVerifier(getHostnameVerifier());
}
}
if (leaderIdentity != null && !leaderIdentity.equals(previousLeader) && isLeader()) {
// The new leader may not know the exact last offset in the Kafka log. So, mark the
// last offset invalid here
kafkaStore.markLastWrittenOffsetInvalid();
// leader
try {
kafkaStore.waitUntilKafkaReaderReachesLastOffset(initTimeout);
} catch (StoreException e) {
throw new SchemaRegistryStoreException("Exception getting latest offset ", e);
}
idGenerator.init();
}
metricsContainer.getLeaderNode().set(isLeader() ? 1 : 0);
} finally {
kafkaStore.leaderLock().unlock();
}
}
use of io.confluent.kafka.schemaregistry.exceptions.SchemaRegistryStoreException in project schema-registry by confluentinc.
the class KafkaSchemaRegistry method listVersionsForId.
public List<SubjectVersion> listVersionsForId(int id, String subject, boolean lookupDeleted) throws SchemaRegistryException {
SchemaValue schema = null;
try {
SchemaKey subjectVersionKey = getSchemaKeyUsingContexts(id, subject);
if (subjectVersionKey == null) {
return null;
}
schema = (SchemaValue) kafkaStore.get(subjectVersionKey);
if (schema == null) {
return null;
}
return lookupCache.schemaIdAndSubjects(new Schema(schema.getSubject(), schema.getVersion(), schema.getId(), schema.getSchemaType(), schema.getReferences().stream().map(ref -> new io.confluent.kafka.schemaregistry.client.rest.entities.SchemaReference(ref.getName(), ref.getSubject(), ref.getVersion())).collect(Collectors.toList()), schema.getSchema())).allSubjectVersions().entrySet().stream().flatMap(e -> {
try {
SchemaValue schemaValue = (SchemaValue) kafkaStore.get(new SchemaKey(e.getKey(), e.getValue()));
if ((schemaValue != null && !schemaValue.isDeleted()) || lookupDeleted) {
return Stream.of(new SubjectVersion(e.getKey(), e.getValue()));
} else {
return Stream.empty();
}
} catch (StoreException ex) {
return Stream.empty();
}
}).collect(Collectors.toList());
} catch (StoreException e) {
throw new SchemaRegistryStoreException("Error while retrieving schema with id " + id + " from the backend Kafka store", e);
}
}
Aggregations