use of io.confluent.kafka.schemaregistry.exceptions.SchemaRegistryTimeoutException in project schema-registry by confluentinc.
the class SubjectVersionsResource method deleteSchemaVersion.
@DELETE
@Path("/{version}")
@PerformanceMetric("subjects.versions.deleteSchemaVersion-schema")
@Operation(summary = "Deletes a specific version of the schema registered under this subject. " + "This only deletes the version and the schema ID remains intact making it still possible " + "to decode data using the schema ID. This API is recommended to be used only in " + "development environments or under extreme circumstances where-in, its required to delete " + "a previously registered schema for compatibility purposes or re-register previously " + "registered schema.", responses = { @ApiResponse(content = @Content(schema = @io.swagger.v3.oas.annotations.media.Schema(implementation = int.class))), @ApiResponse(responseCode = "404", description = "Error code 40401 -- Subject not found\n" + "Error code 40402 -- Version not found"), @ApiResponse(responseCode = "422", description = "Error code 42202 -- Invalid version"), @ApiResponse(responseCode = "500", description = "Error code 50001 -- Error in the backend " + "data store") })
public void deleteSchemaVersion(@Suspended final AsyncResponse asyncResponse, @Context HttpHeaders headers, @Parameter(description = "Name of the subject", required = true) @PathParam("subject") String subject, @Parameter(description = VERSION_PARAM_DESC, required = true) @PathParam("version") String version, @Parameter(description = "Whether to perform a permanent delete") @QueryParam("permanent") boolean permanentDelete) {
log.info("Deleting schema version {} from subject {}", version, subject);
subject = QualifiedSubject.normalize(schemaRegistry.tenant(), subject);
VersionId versionId;
try {
versionId = new VersionId(version);
} catch (InvalidVersionException e) {
throw Errors.invalidVersionException(e.getMessage());
}
Schema schema;
String errorMessage = "Error while retrieving schema for subject " + subject + " with version " + version + " from the schema registry";
try {
if (schemaRegistry.schemaVersionExists(subject, versionId, true)) {
if (!permanentDelete && !schemaRegistry.schemaVersionExists(subject, versionId, false)) {
throw Errors.schemaVersionSoftDeletedException(subject, version);
}
}
schema = schemaRegistry.get(subject, versionId.getVersionId(), true);
if (schema == null) {
if (!schemaRegistry.hasSubjects(subject, true)) {
throw Errors.subjectNotFoundException(subject);
} else {
throw Errors.versionNotFoundException(versionId.getVersionId());
}
}
} catch (SchemaRegistryStoreException e) {
log.debug(errorMessage, e);
throw Errors.storeException(errorMessage, e);
} catch (InvalidVersionException e) {
throw Errors.invalidVersionException(e.getMessage());
} catch (SchemaRegistryException e) {
throw Errors.schemaRegistryException(errorMessage, e);
}
try {
Map<String, String> headerProperties = requestHeaderBuilder.buildRequestHeaders(headers, schemaRegistry.config().whitelistHeaders());
schemaRegistry.deleteSchemaVersionOrForward(headerProperties, subject, schema, permanentDelete);
} catch (SchemaVersionNotSoftDeletedException e) {
throw Errors.schemaVersionNotSoftDeletedException(e.getSubject(), e.getVersion());
} catch (SchemaRegistryTimeoutException e) {
throw Errors.operationTimeoutException("Delete Schema Version operation timed out", e);
} catch (SchemaRegistryStoreException e) {
throw Errors.storeException("Delete Schema Version operation failed while writing" + " to the Kafka store", e);
} catch (SchemaRegistryRequestForwardingException e) {
throw Errors.requestForwardingFailedException("Error while forwarding delete schema version request" + " to the leader", e);
} catch (ReferenceExistsException e) {
throw Errors.referenceExistsException(e.getMessage());
} catch (UnknownLeaderException e) {
throw Errors.unknownLeaderException("Leader not known.", e);
} catch (SchemaRegistryException e) {
throw Errors.schemaRegistryException("Error while deleting Schema Version", e);
}
asyncResponse.resume(schema.getVersion());
}
use of io.confluent.kafka.schemaregistry.exceptions.SchemaRegistryTimeoutException in project schema-registry by confluentinc.
the class SubjectVersionsResource method register.
@POST
@PerformanceMetric("subjects.versions.register")
@Operation(summary = "Register a new schema under the specified subject. If successfully " + "registered, this returns the unique identifier of this schema in the registry. The " + "returned identifier should be used to retrieve this schema from the schemas resource and " + "is different from the schema's version which is associated with the subject. If the same " + "schema is registered under a different subject, the same identifier will be returned. " + "However, the version of the schema may be different under different subjects.\n" + "A schema should be compatible with the previously registered schema or schemas (if there " + "are any) as per the configured compatibility level. The configured compatibility level " + "can be obtained by issuing a GET http:get:: /config/(string: subject). If that returns " + "null, then GET http:get:: /config\n" + "When there are multiple instances of Schema Registry running in the same cluster, the " + "schema registration request will be forwarded to one of the instances designated as " + "the primary. If the primary is not available, the client will get an error code " + "indicating that the forwarding has failed.", responses = { @ApiResponse(content = @Content(schema = @io.swagger.v3.oas.annotations.media.Schema(implementation = RegisterSchemaResponse.class))), @ApiResponse(responseCode = "409", description = "Incompatible schema"), @ApiResponse(responseCode = "422", description = "Error code 42201 -- Invalid schema or " + "schema type"), @ApiResponse(responseCode = "500", description = "Error code 50001 -- Error in the backend data store\n" + "Error code 50002 -- Operation timed out\n" + "Error code 50003 -- Error while forwarding the request to the primary") })
public void register(@Suspended final AsyncResponse asyncResponse, @Context HttpHeaders headers, @Parameter(description = "Name of the subject", required = true) @PathParam("subject") String subjectName, @Parameter(description = "Whether to register the normalized schema") @QueryParam("normalize") boolean normalize, @Parameter(description = "Schema", required = true) @NotNull RegisterSchemaRequest request) {
log.info("Registering new schema: subject {}, version {}, id {}, type {}, schema size {}", subjectName, request.getVersion(), request.getId(), request.getSchemaType(), request.getSchema() == null ? 0 : request.getSchema().length());
if (subjectName != null && (CharMatcher.javaIsoControl().matchesAnyOf(subjectName) || QualifiedSubject.create(this.schemaRegistry.tenant(), subjectName).getSubject().equals(GLOBAL_RESOURCE_NAME))) {
throw Errors.invalidSubjectException(subjectName);
}
subjectName = QualifiedSubject.normalize(schemaRegistry.tenant(), subjectName);
Map<String, String> headerProperties = requestHeaderBuilder.buildRequestHeaders(headers, schemaRegistry.config().whitelistHeaders());
Schema schema = new Schema(subjectName, request.getVersion() != null ? request.getVersion() : 0, request.getId() != null ? request.getId() : -1, request.getSchemaType() != null ? request.getSchemaType() : AvroSchema.TYPE, request.getReferences(), request.getSchema());
int id;
try {
id = schemaRegistry.registerOrForward(subjectName, schema, normalize, headerProperties);
} catch (IdDoesNotMatchException e) {
throw Errors.idDoesNotMatchException(e);
} catch (InvalidSchemaException e) {
throw Errors.invalidSchemaException(e);
} catch (OperationNotPermittedException e) {
throw Errors.operationNotPermittedException(e.getMessage());
} catch (SchemaRegistryTimeoutException e) {
throw Errors.operationTimeoutException("Register operation timed out", e);
} catch (SchemaRegistryStoreException e) {
throw Errors.storeException("Register schema operation failed while writing" + " to the Kafka store", e);
} catch (SchemaRegistryRequestForwardingException e) {
throw Errors.requestForwardingFailedException("Error while forwarding register schema request" + " to the leader", e);
} catch (IncompatibleSchemaException e) {
throw Errors.incompatibleSchemaException("Schema being registered is incompatible with" + " an earlier schema for subject \"" + subjectName + "\", details: " + e.getMessage(), e);
} catch (UnknownLeaderException e) {
throw Errors.unknownLeaderException("Leader not known.", e);
} catch (SchemaRegistryException e) {
throw Errors.schemaRegistryException("Error while registering schema", e);
}
RegisterSchemaResponse registerSchemaResponse = new RegisterSchemaResponse();
registerSchemaResponse.setId(id);
asyncResponse.resume(registerSchemaResponse);
}
use of io.confluent.kafka.schemaregistry.exceptions.SchemaRegistryTimeoutException in project schema-registry by confluentinc.
the class KafkaSchemaRegistry method deleteSubject.
@Override
public List<Integer> deleteSubject(String subject, boolean permanentDelete) throws SchemaRegistryException {
// Ensure cache is up-to-date before any potential writes
try {
if (isReadOnlyMode(subject)) {
throw new OperationNotPermittedException("Subject " + subject + " is in read-only mode");
}
kafkaStore.waitUntilKafkaReaderReachesLastOffset(subject, kafkaStoreTimeoutMs);
List<Integer> deletedVersions = new ArrayList<>();
int deleteWatermarkVersion = 0;
Iterator<Schema> schemasToBeDeleted = getAllVersions(subject, permanentDelete);
while (schemasToBeDeleted.hasNext()) {
deleteWatermarkVersion = schemasToBeDeleted.next().getVersion();
SchemaKey key = new SchemaKey(subject, deleteWatermarkVersion);
if (!lookupCache.referencesSchema(key).isEmpty()) {
throw new ReferenceExistsException(key.toString());
}
if (permanentDelete) {
SchemaValue schemaValue = (SchemaValue) lookupCache.get(key);
if (schemaValue != null && !schemaValue.isDeleted()) {
throw new SubjectNotSoftDeletedException(subject);
}
}
deletedVersions.add(deleteWatermarkVersion);
}
if (!permanentDelete) {
DeleteSubjectKey key = new DeleteSubjectKey(subject);
DeleteSubjectValue value = new DeleteSubjectValue(subject, deleteWatermarkVersion);
kafkaStore.put(key, value);
if (getMode(subject) != null) {
deleteMode(subject);
}
if (getCompatibilityLevel(subject) != null) {
deleteCompatibility(subject);
}
} else {
for (Integer version : deletedVersions) {
kafkaStore.put(new SchemaKey(subject, version), null);
}
}
return deletedVersions;
} catch (StoreTimeoutException te) {
throw new SchemaRegistryTimeoutException("Write to the Kafka store timed out while", te);
} catch (StoreException e) {
throw new SchemaRegistryStoreException("Error while deleting the subject in the" + " backend Kafka store", e);
}
}
use of io.confluent.kafka.schemaregistry.exceptions.SchemaRegistryTimeoutException in project schema-registry by confluentinc.
the class KafkaSchemaRegistry method register.
@Override
public int register(String subject, Schema schema, boolean normalize) throws SchemaRegistryException {
try {
checkRegisterMode(subject, schema);
// Ensure cache is up-to-date before any potential writes
kafkaStore.waitUntilKafkaReaderReachesLastOffset(subject, kafkaStoreTimeoutMs);
int schemaId = schema.getId();
ParsedSchema parsedSchema = canonicalizeSchema(schema, schemaId < 0, normalize);
// see if the schema to be registered already exists
SchemaIdAndSubjects schemaIdAndSubjects = this.lookupCache.schemaIdAndSubjects(schema);
if (schemaIdAndSubjects != null) {
if (schemaId >= 0 && schemaId != schemaIdAndSubjects.getSchemaId()) {
throw new IdDoesNotMatchException(schemaIdAndSubjects.getSchemaId(), schema.getId());
}
if (schemaIdAndSubjects.hasSubject(subject) && !isSubjectVersionDeleted(subject, schemaIdAndSubjects.getVersion(subject))) {
// return only if the schema was previously registered under the input subject
return schemaIdAndSubjects.getSchemaId();
} else {
// need to register schema under the input subject
schemaId = schemaIdAndSubjects.getSchemaId();
}
}
// determine the latest version of the schema in the subject
List<SchemaValue> allVersions = getAllSchemaValues(subject);
Collections.reverse(allVersions);
List<SchemaValue> deletedVersions = new ArrayList<>();
List<ParsedSchema> undeletedVersions = new ArrayList<>();
int newVersion = MIN_VERSION;
for (SchemaValue schemaValue : allVersions) {
newVersion = Math.max(newVersion, schemaValue.getVersion() + 1);
if (schemaValue.isDeleted()) {
deletedVersions.add(schemaValue);
} else {
ParsedSchema undeletedSchema = parseSchema(getSchemaEntityFromSchemaValue(schemaValue));
if (parsedSchema.references().isEmpty() && !undeletedSchema.references().isEmpty() && parsedSchema.deepEquals(undeletedSchema)) {
// This handles the case where a schema is sent with all references resolved
return schemaValue.getId();
}
undeletedVersions.add(undeletedSchema);
}
}
Collections.reverse(undeletedVersions);
final List<String> compatibilityErrorLogs = isCompatibleWithPrevious(subject, parsedSchema, undeletedVersions);
final boolean isCompatible = compatibilityErrorLogs.isEmpty();
if (normalize) {
parsedSchema = parsedSchema.normalize();
}
// Allow schema providers to modify the schema during compatibility checks
schema.setSchema(parsedSchema.canonicalString());
schema.setReferences(parsedSchema.references());
if (isCompatible) {
// save the context key
QualifiedSubject qs = QualifiedSubject.create(tenant(), subject);
if (qs != null && !DEFAULT_CONTEXT.equals(qs.getContext())) {
ContextKey contextKey = new ContextKey(qs.getTenant(), qs.getContext());
if (kafkaStore.get(contextKey) == null) {
ContextValue contextValue = new ContextValue(qs.getTenant(), qs.getContext());
kafkaStore.put(contextKey, contextValue);
}
}
// assign a guid and put the schema in the kafka store
if (schema.getVersion() <= 0) {
schema.setVersion(newVersion);
}
SchemaKey schemaKey = new SchemaKey(subject, schema.getVersion());
if (schemaId >= 0) {
checkIfSchemaWithIdExist(schemaId, schema);
schema.setId(schemaId);
kafkaStore.put(schemaKey, new SchemaValue(schema));
} else {
int retries = 0;
while (retries++ < kafkaStoreMaxRetries) {
int newId = idGenerator.id(new SchemaValue(schema));
// Verify id is not already in use
if (lookupCache.schemaKeyById(newId, subject) == null) {
schema.setId(newId);
if (retries > 1) {
log.warn(String.format("Retrying to register the schema with ID %s", newId));
}
kafkaStore.put(schemaKey, new SchemaValue(schema));
break;
}
}
if (retries >= kafkaStoreMaxRetries) {
throw new SchemaRegistryStoreException("Error while registering the schema due " + "to generating an ID that is already in use.");
}
}
for (SchemaValue deleted : deletedVersions) {
if (deleted.getId().equals(schema.getId()) && deleted.getVersion().compareTo(schema.getVersion()) < 0) {
// Tombstone previous version with the same ID
SchemaKey key = new SchemaKey(deleted.getSubject(), deleted.getVersion());
kafkaStore.put(key, null);
}
}
return schema.getId();
} else {
throw new IncompatibleSchemaException(compatibilityErrorLogs.toString());
}
} catch (StoreTimeoutException te) {
throw new SchemaRegistryTimeoutException("Write to the Kafka store timed out while", te);
} catch (StoreException e) {
throw new SchemaRegistryStoreException("Error while registering the schema in the" + " backend Kafka store", e);
}
}
use of io.confluent.kafka.schemaregistry.exceptions.SchemaRegistryTimeoutException in project schema-registry by confluentinc.
the class KafkaGroupLeaderElector method init.
@Override
public void init() throws SchemaRegistryTimeoutException, SchemaRegistryStoreException {
log.debug("Initializing schema registry group member");
executor = Executors.newSingleThreadExecutor();
executor.submit(new Runnable() {
@Override
public void run() {
try {
while (!stopped.get()) {
coordinator.poll(Integer.MAX_VALUE);
}
} catch (Throwable t) {
log.error("Unexpected exception in schema registry group processing thread", t);
}
}
});
try {
if (!joinedLatch.await(initTimeout, TimeUnit.MILLISECONDS)) {
throw new SchemaRegistryTimeoutException("Timed out waiting for join group to complete");
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new SchemaRegistryStoreException("Interrupted while waiting for join group to " + "complete", e);
}
log.debug("Schema registry group member initialized and joined group");
}
Aggregations