use of io.confluent.kafka.schemaregistry.exceptions.SubjectNotSoftDeletedException in project schema-registry by confluentinc.
the class KafkaSchemaRegistry method deleteSubject.
@Override
public List<Integer> deleteSubject(String subject, boolean permanentDelete) throws SchemaRegistryException {
// Ensure cache is up-to-date before any potential writes
try {
if (isReadOnlyMode(subject)) {
throw new OperationNotPermittedException("Subject " + subject + " is in read-only mode");
}
kafkaStore.waitUntilKafkaReaderReachesLastOffset(subject, kafkaStoreTimeoutMs);
List<Integer> deletedVersions = new ArrayList<>();
int deleteWatermarkVersion = 0;
Iterator<Schema> schemasToBeDeleted = getAllVersions(subject, permanentDelete);
while (schemasToBeDeleted.hasNext()) {
deleteWatermarkVersion = schemasToBeDeleted.next().getVersion();
SchemaKey key = new SchemaKey(subject, deleteWatermarkVersion);
if (!lookupCache.referencesSchema(key).isEmpty()) {
throw new ReferenceExistsException(key.toString());
}
if (permanentDelete) {
SchemaValue schemaValue = (SchemaValue) lookupCache.get(key);
if (schemaValue != null && !schemaValue.isDeleted()) {
throw new SubjectNotSoftDeletedException(subject);
}
}
deletedVersions.add(deleteWatermarkVersion);
}
if (!permanentDelete) {
DeleteSubjectKey key = new DeleteSubjectKey(subject);
DeleteSubjectValue value = new DeleteSubjectValue(subject, deleteWatermarkVersion);
kafkaStore.put(key, value);
if (getMode(subject) != null) {
deleteMode(subject);
}
if (getCompatibilityLevel(subject) != null) {
deleteCompatibility(subject);
}
} else {
for (Integer version : deletedVersions) {
kafkaStore.put(new SchemaKey(subject, version), null);
}
}
return deletedVersions;
} catch (StoreTimeoutException te) {
throw new SchemaRegistryTimeoutException("Write to the Kafka store timed out while", te);
} catch (StoreException e) {
throw new SchemaRegistryStoreException("Error while deleting the subject in the" + " backend Kafka store", e);
}
}
Aggregations