use of io.confluent.kafka.schemaregistry.exceptions.OperationNotPermittedException in project schema-registry by confluentinc.
the class KafkaSchemaRegistry method updateCompatibilityLevel.
public void updateCompatibilityLevel(String subject, CompatibilityLevel newCompatibilityLevel) throws SchemaRegistryStoreException, OperationNotPermittedException, UnknownLeaderException {
if (isReadOnlyMode(subject)) {
throw new OperationNotPermittedException("Subject " + subject + " is in read-only mode");
}
ConfigKey configKey = new ConfigKey(subject);
try {
kafkaStore.waitUntilKafkaReaderReachesLastOffset(subject, kafkaStoreTimeoutMs);
kafkaStore.put(configKey, new ConfigValue(subject, newCompatibilityLevel));
log.debug("Wrote new compatibility level: " + newCompatibilityLevel.name + " to the" + " Kafka data store with key " + configKey.toString());
} catch (StoreException e) {
throw new SchemaRegistryStoreException("Failed to write new config value to the store", e);
}
}
use of io.confluent.kafka.schemaregistry.exceptions.OperationNotPermittedException in project schema-registry by confluentinc.
the class SubjectVersionsResource method register.
@POST
@PerformanceMetric("subjects.versions.register")
@Operation(summary = "Register a new schema under the specified subject. If successfully " + "registered, this returns the unique identifier of this schema in the registry. The " + "returned identifier should be used to retrieve this schema from the schemas resource and " + "is different from the schema's version which is associated with the subject. If the same " + "schema is registered under a different subject, the same identifier will be returned. " + "However, the version of the schema may be different under different subjects.\n" + "A schema should be compatible with the previously registered schema or schemas (if there " + "are any) as per the configured compatibility level. The configured compatibility level " + "can be obtained by issuing a GET http:get:: /config/(string: subject). If that returns " + "null, then GET http:get:: /config\n" + "When there are multiple instances of Schema Registry running in the same cluster, the " + "schema registration request will be forwarded to one of the instances designated as " + "the primary. If the primary is not available, the client will get an error code " + "indicating that the forwarding has failed.", responses = { @ApiResponse(content = @Content(schema = @io.swagger.v3.oas.annotations.media.Schema(implementation = RegisterSchemaResponse.class))), @ApiResponse(responseCode = "409", description = "Incompatible schema"), @ApiResponse(responseCode = "422", description = "Error code 42201 -- Invalid schema or " + "schema type"), @ApiResponse(responseCode = "500", description = "Error code 50001 -- Error in the backend data store\n" + "Error code 50002 -- Operation timed out\n" + "Error code 50003 -- Error while forwarding the request to the primary") })
public void register(@Suspended final AsyncResponse asyncResponse, @Context HttpHeaders headers, @Parameter(description = "Name of the subject", required = true) @PathParam("subject") String subjectName, @Parameter(description = "Whether to register the normalized schema") @QueryParam("normalize") boolean normalize, @Parameter(description = "Schema", required = true) @NotNull RegisterSchemaRequest request) {
log.info("Registering new schema: subject {}, version {}, id {}, type {}, schema size {}", subjectName, request.getVersion(), request.getId(), request.getSchemaType(), request.getSchema() == null ? 0 : request.getSchema().length());
if (subjectName != null && (CharMatcher.javaIsoControl().matchesAnyOf(subjectName) || QualifiedSubject.create(this.schemaRegistry.tenant(), subjectName).getSubject().equals(GLOBAL_RESOURCE_NAME))) {
throw Errors.invalidSubjectException(subjectName);
}
subjectName = QualifiedSubject.normalize(schemaRegistry.tenant(), subjectName);
Map<String, String> headerProperties = requestHeaderBuilder.buildRequestHeaders(headers, schemaRegistry.config().whitelistHeaders());
Schema schema = new Schema(subjectName, request.getVersion() != null ? request.getVersion() : 0, request.getId() != null ? request.getId() : -1, request.getSchemaType() != null ? request.getSchemaType() : AvroSchema.TYPE, request.getReferences(), request.getSchema());
int id;
try {
id = schemaRegistry.registerOrForward(subjectName, schema, normalize, headerProperties);
} catch (IdDoesNotMatchException e) {
throw Errors.idDoesNotMatchException(e);
} catch (InvalidSchemaException e) {
throw Errors.invalidSchemaException(e);
} catch (OperationNotPermittedException e) {
throw Errors.operationNotPermittedException(e.getMessage());
} catch (SchemaRegistryTimeoutException e) {
throw Errors.operationTimeoutException("Register operation timed out", e);
} catch (SchemaRegistryStoreException e) {
throw Errors.storeException("Register schema operation failed while writing" + " to the Kafka store", e);
} catch (SchemaRegistryRequestForwardingException e) {
throw Errors.requestForwardingFailedException("Error while forwarding register schema request" + " to the leader", e);
} catch (IncompatibleSchemaException e) {
throw Errors.incompatibleSchemaException("Schema being registered is incompatible with" + " an earlier schema for subject \"" + subjectName + "\", details: " + e.getMessage(), e);
} catch (UnknownLeaderException e) {
throw Errors.unknownLeaderException("Leader not known.", e);
} catch (SchemaRegistryException e) {
throw Errors.schemaRegistryException("Error while registering schema", e);
}
RegisterSchemaResponse registerSchemaResponse = new RegisterSchemaResponse();
registerSchemaResponse.setId(id);
asyncResponse.resume(registerSchemaResponse);
}
use of io.confluent.kafka.schemaregistry.exceptions.OperationNotPermittedException in project schema-registry by confluentinc.
the class KafkaSchemaRegistry method deleteSubject.
@Override
public List<Integer> deleteSubject(String subject, boolean permanentDelete) throws SchemaRegistryException {
// Ensure cache is up-to-date before any potential writes
try {
if (isReadOnlyMode(subject)) {
throw new OperationNotPermittedException("Subject " + subject + " is in read-only mode");
}
kafkaStore.waitUntilKafkaReaderReachesLastOffset(subject, kafkaStoreTimeoutMs);
List<Integer> deletedVersions = new ArrayList<>();
int deleteWatermarkVersion = 0;
Iterator<Schema> schemasToBeDeleted = getAllVersions(subject, permanentDelete);
while (schemasToBeDeleted.hasNext()) {
deleteWatermarkVersion = schemasToBeDeleted.next().getVersion();
SchemaKey key = new SchemaKey(subject, deleteWatermarkVersion);
if (!lookupCache.referencesSchema(key).isEmpty()) {
throw new ReferenceExistsException(key.toString());
}
if (permanentDelete) {
SchemaValue schemaValue = (SchemaValue) lookupCache.get(key);
if (schemaValue != null && !schemaValue.isDeleted()) {
throw new SubjectNotSoftDeletedException(subject);
}
}
deletedVersions.add(deleteWatermarkVersion);
}
if (!permanentDelete) {
DeleteSubjectKey key = new DeleteSubjectKey(subject);
DeleteSubjectValue value = new DeleteSubjectValue(subject, deleteWatermarkVersion);
kafkaStore.put(key, value);
if (getMode(subject) != null) {
deleteMode(subject);
}
if (getCompatibilityLevel(subject) != null) {
deleteCompatibility(subject);
}
} else {
for (Integer version : deletedVersions) {
kafkaStore.put(new SchemaKey(subject, version), null);
}
}
return deletedVersions;
} catch (StoreTimeoutException te) {
throw new SchemaRegistryTimeoutException("Write to the Kafka store timed out while", te);
} catch (StoreException e) {
throw new SchemaRegistryStoreException("Error while deleting the subject in the" + " backend Kafka store", e);
}
}
use of io.confluent.kafka.schemaregistry.exceptions.OperationNotPermittedException in project schema-registry by confluentinc.
the class ConfigResource method deleteTopLevelConfig.
@DELETE
@Operation(summary = "Deletes the Global-level compatibility level config and " + "revert to the global default.", responses = { @ApiResponse(content = @Content(schema = @Schema(implementation = CompatibilityLevel.class))), @ApiResponse(responseCode = "500", description = "Error code 50001 -- Error in the backend " + "datastore") })
public void deleteTopLevelConfig(@Suspended final AsyncResponse asyncResponse, @Context HttpHeaders headers) {
log.info("Deleting Global compatibility setting and reverting back to default");
Config deletedConfig;
try {
CompatibilityLevel currentCompatibility = schemaRegistry.getCompatibilityLevel(null);
Map<String, String> headerProperties = requestHeaderBuilder.buildRequestHeaders(headers, schemaRegistry.config().whitelistHeaders());
schemaRegistry.deleteCompatibilityConfigOrForward(null, headerProperties);
deletedConfig = new Config(currentCompatibility.name);
} catch (OperationNotPermittedException e) {
throw Errors.operationNotPermittedException(e.getMessage());
} catch (SchemaRegistryStoreException e) {
throw Errors.storeException("Failed to delete compatibility level", e);
} catch (UnknownLeaderException e) {
throw Errors.unknownLeaderException("Failed to delete compatibility level", e);
} catch (SchemaRegistryRequestForwardingException e) {
throw Errors.requestForwardingFailedException("Error while forwarding delete config request" + " to the leader", e);
}
asyncResponse.resume(deletedConfig);
}
use of io.confluent.kafka.schemaregistry.exceptions.OperationNotPermittedException in project schema-registry by confluentinc.
the class KafkaSchemaRegistry method deleteSchemaVersion.
@Override
public void deleteSchemaVersion(String subject, Schema schema, boolean permanentDelete) throws SchemaRegistryException {
try {
if (isReadOnlyMode(subject)) {
throw new OperationNotPermittedException("Subject " + subject + " is in read-only mode");
}
SchemaKey key = new SchemaKey(subject, schema.getVersion());
if (!lookupCache.referencesSchema(key).isEmpty()) {
throw new ReferenceExistsException(key.toString());
}
SchemaValue schemaValue = (SchemaValue) lookupCache.get(key);
if (permanentDelete && schemaValue != null && !schemaValue.isDeleted()) {
throw new SchemaVersionNotSoftDeletedException(subject, schema.getVersion().toString());
}
// Ensure cache is up-to-date before any potential writes
kafkaStore.waitUntilKafkaReaderReachesLastOffset(subject, kafkaStoreTimeoutMs);
if (!permanentDelete) {
schemaValue = new SchemaValue(schema);
schemaValue.setDeleted(true);
kafkaStore.put(key, schemaValue);
if (!getAllVersions(subject, false).hasNext()) {
if (getMode(subject) != null) {
deleteMode(subject);
}
if (getCompatibilityLevel(subject) != null) {
deleteCompatibility(subject);
}
}
} else {
kafkaStore.put(key, null);
}
} catch (StoreTimeoutException te) {
throw new SchemaRegistryTimeoutException("Write to the Kafka store timed out while", te);
} catch (StoreException e) {
throw new SchemaRegistryStoreException("Error while deleting the schema for subject '" + subject + "' in the backend Kafka store", e);
}
}
Aggregations