Search in sources :

Example 11 with SchemaRegistryStoreException

use of io.confluent.kafka.schemaregistry.exceptions.SchemaRegistryStoreException in project schema-registry by confluentinc.

the class KafkaSchemaRegistry method deleteSubject.

@Override
public List<Integer> deleteSubject(String subject, boolean permanentDelete) throws SchemaRegistryException {
    // Ensure cache is up-to-date before any potential writes
    try {
        if (isReadOnlyMode(subject)) {
            throw new OperationNotPermittedException("Subject " + subject + " is in read-only mode");
        }
        kafkaStore.waitUntilKafkaReaderReachesLastOffset(subject, kafkaStoreTimeoutMs);
        List<Integer> deletedVersions = new ArrayList<>();
        int deleteWatermarkVersion = 0;
        Iterator<Schema> schemasToBeDeleted = getAllVersions(subject, permanentDelete);
        while (schemasToBeDeleted.hasNext()) {
            deleteWatermarkVersion = schemasToBeDeleted.next().getVersion();
            SchemaKey key = new SchemaKey(subject, deleteWatermarkVersion);
            if (!lookupCache.referencesSchema(key).isEmpty()) {
                throw new ReferenceExistsException(key.toString());
            }
            if (permanentDelete) {
                SchemaValue schemaValue = (SchemaValue) lookupCache.get(key);
                if (schemaValue != null && !schemaValue.isDeleted()) {
                    throw new SubjectNotSoftDeletedException(subject);
                }
            }
            deletedVersions.add(deleteWatermarkVersion);
        }
        if (!permanentDelete) {
            DeleteSubjectKey key = new DeleteSubjectKey(subject);
            DeleteSubjectValue value = new DeleteSubjectValue(subject, deleteWatermarkVersion);
            kafkaStore.put(key, value);
            if (getMode(subject) != null) {
                deleteMode(subject);
            }
            if (getCompatibilityLevel(subject) != null) {
                deleteCompatibility(subject);
            }
        } else {
            for (Integer version : deletedVersions) {
                kafkaStore.put(new SchemaKey(subject, version), null);
            }
        }
        return deletedVersions;
    } catch (StoreTimeoutException te) {
        throw new SchemaRegistryTimeoutException("Write to the Kafka store timed out while", te);
    } catch (StoreException e) {
        throw new SchemaRegistryStoreException("Error while deleting the subject in the" + " backend Kafka store", e);
    }
}
Also used : ReferenceExistsException(io.confluent.kafka.schemaregistry.exceptions.ReferenceExistsException) ParsedSchema(io.confluent.kafka.schemaregistry.ParsedSchema) Schema(io.confluent.kafka.schemaregistry.client.rest.entities.Schema) AvroSchema(io.confluent.kafka.schemaregistry.avro.AvroSchema) ArrayList(java.util.ArrayList) SchemaRegistryStoreException(io.confluent.kafka.schemaregistry.exceptions.SchemaRegistryStoreException) SubjectNotSoftDeletedException(io.confluent.kafka.schemaregistry.exceptions.SubjectNotSoftDeletedException) SchemaRegistryStoreException(io.confluent.kafka.schemaregistry.exceptions.SchemaRegistryStoreException) StoreException(io.confluent.kafka.schemaregistry.storage.exceptions.StoreException) StoreTimeoutException(io.confluent.kafka.schemaregistry.storage.exceptions.StoreTimeoutException) OperationNotPermittedException(io.confluent.kafka.schemaregistry.exceptions.OperationNotPermittedException) SchemaRegistryTimeoutException(io.confluent.kafka.schemaregistry.exceptions.SchemaRegistryTimeoutException)

Example 12 with SchemaRegistryStoreException

use of io.confluent.kafka.schemaregistry.exceptions.SchemaRegistryStoreException in project schema-registry by confluentinc.

the class KafkaSchemaRegistry method register.

@Override
public int register(String subject, Schema schema, boolean normalize) throws SchemaRegistryException {
    try {
        checkRegisterMode(subject, schema);
        // Ensure cache is up-to-date before any potential writes
        kafkaStore.waitUntilKafkaReaderReachesLastOffset(subject, kafkaStoreTimeoutMs);
        int schemaId = schema.getId();
        ParsedSchema parsedSchema = canonicalizeSchema(schema, schemaId < 0, normalize);
        // see if the schema to be registered already exists
        SchemaIdAndSubjects schemaIdAndSubjects = this.lookupCache.schemaIdAndSubjects(schema);
        if (schemaIdAndSubjects != null) {
            if (schemaId >= 0 && schemaId != schemaIdAndSubjects.getSchemaId()) {
                throw new IdDoesNotMatchException(schemaIdAndSubjects.getSchemaId(), schema.getId());
            }
            if (schemaIdAndSubjects.hasSubject(subject) && !isSubjectVersionDeleted(subject, schemaIdAndSubjects.getVersion(subject))) {
                // return only if the schema was previously registered under the input subject
                return schemaIdAndSubjects.getSchemaId();
            } else {
                // need to register schema under the input subject
                schemaId = schemaIdAndSubjects.getSchemaId();
            }
        }
        // determine the latest version of the schema in the subject
        List<SchemaValue> allVersions = getAllSchemaValues(subject);
        Collections.reverse(allVersions);
        List<SchemaValue> deletedVersions = new ArrayList<>();
        List<ParsedSchema> undeletedVersions = new ArrayList<>();
        int newVersion = MIN_VERSION;
        for (SchemaValue schemaValue : allVersions) {
            newVersion = Math.max(newVersion, schemaValue.getVersion() + 1);
            if (schemaValue.isDeleted()) {
                deletedVersions.add(schemaValue);
            } else {
                ParsedSchema undeletedSchema = parseSchema(getSchemaEntityFromSchemaValue(schemaValue));
                if (parsedSchema.references().isEmpty() && !undeletedSchema.references().isEmpty() && parsedSchema.deepEquals(undeletedSchema)) {
                    // This handles the case where a schema is sent with all references resolved
                    return schemaValue.getId();
                }
                undeletedVersions.add(undeletedSchema);
            }
        }
        Collections.reverse(undeletedVersions);
        final List<String> compatibilityErrorLogs = isCompatibleWithPrevious(subject, parsedSchema, undeletedVersions);
        final boolean isCompatible = compatibilityErrorLogs.isEmpty();
        if (normalize) {
            parsedSchema = parsedSchema.normalize();
        }
        // Allow schema providers to modify the schema during compatibility checks
        schema.setSchema(parsedSchema.canonicalString());
        schema.setReferences(parsedSchema.references());
        if (isCompatible) {
            // save the context key
            QualifiedSubject qs = QualifiedSubject.create(tenant(), subject);
            if (qs != null && !DEFAULT_CONTEXT.equals(qs.getContext())) {
                ContextKey contextKey = new ContextKey(qs.getTenant(), qs.getContext());
                if (kafkaStore.get(contextKey) == null) {
                    ContextValue contextValue = new ContextValue(qs.getTenant(), qs.getContext());
                    kafkaStore.put(contextKey, contextValue);
                }
            }
            // assign a guid and put the schema in the kafka store
            if (schema.getVersion() <= 0) {
                schema.setVersion(newVersion);
            }
            SchemaKey schemaKey = new SchemaKey(subject, schema.getVersion());
            if (schemaId >= 0) {
                checkIfSchemaWithIdExist(schemaId, schema);
                schema.setId(schemaId);
                kafkaStore.put(schemaKey, new SchemaValue(schema));
            } else {
                int retries = 0;
                while (retries++ < kafkaStoreMaxRetries) {
                    int newId = idGenerator.id(new SchemaValue(schema));
                    // Verify id is not already in use
                    if (lookupCache.schemaKeyById(newId, subject) == null) {
                        schema.setId(newId);
                        if (retries > 1) {
                            log.warn(String.format("Retrying to register the schema with ID %s", newId));
                        }
                        kafkaStore.put(schemaKey, new SchemaValue(schema));
                        break;
                    }
                }
                if (retries >= kafkaStoreMaxRetries) {
                    throw new SchemaRegistryStoreException("Error while registering the schema due " + "to generating an ID that is already in use.");
                }
            }
            for (SchemaValue deleted : deletedVersions) {
                if (deleted.getId().equals(schema.getId()) && deleted.getVersion().compareTo(schema.getVersion()) < 0) {
                    // Tombstone previous version with the same ID
                    SchemaKey key = new SchemaKey(deleted.getSubject(), deleted.getVersion());
                    kafkaStore.put(key, null);
                }
            }
            return schema.getId();
        } else {
            throw new IncompatibleSchemaException(compatibilityErrorLogs.toString());
        }
    } catch (StoreTimeoutException te) {
        throw new SchemaRegistryTimeoutException("Write to the Kafka store timed out while", te);
    } catch (StoreException e) {
        throw new SchemaRegistryStoreException("Error while registering the schema in the" + " backend Kafka store", e);
    }
}
Also used : QualifiedSubject(io.confluent.kafka.schemaregistry.utils.QualifiedSubject) ArrayList(java.util.ArrayList) SchemaRegistryStoreException(io.confluent.kafka.schemaregistry.exceptions.SchemaRegistryStoreException) SchemaString(io.confluent.kafka.schemaregistry.client.rest.entities.SchemaString) SchemaRegistryStoreException(io.confluent.kafka.schemaregistry.exceptions.SchemaRegistryStoreException) StoreException(io.confluent.kafka.schemaregistry.storage.exceptions.StoreException) IncompatibleSchemaException(io.confluent.kafka.schemaregistry.exceptions.IncompatibleSchemaException) IdDoesNotMatchException(io.confluent.kafka.schemaregistry.exceptions.IdDoesNotMatchException) StoreTimeoutException(io.confluent.kafka.schemaregistry.storage.exceptions.StoreTimeoutException) ParsedSchema(io.confluent.kafka.schemaregistry.ParsedSchema) SchemaRegistryTimeoutException(io.confluent.kafka.schemaregistry.exceptions.SchemaRegistryTimeoutException)

Example 13 with SchemaRegistryStoreException

use of io.confluent.kafka.schemaregistry.exceptions.SchemaRegistryStoreException in project schema-registry by confluentinc.

the class CompatibilityResource method testCompatibilityForSubject.

@POST
@Path("/subjects/{subject}/versions")
@Operation(summary = "Test input schema against a subject's schemas for compatibility, " + "based on the compatibility level of the subject configured. In other word, " + "it will perform the same compatibility check as register for that subject", description = "the compatibility level applied for the check is the configured compatibility level " + "for the subject (http:get:: /config/(string: subject)). If this subject's " + "compatibility level was never changed, then the global compatibility level " + "applies (http:get:: /config).", responses = { @ApiResponse(content = @Content(schema = @io.swagger.v3.oas.annotations.media.Schema(implementation = CompatibilityCheckResponse.class))), @ApiResponse(responseCode = "422", description = "Error code 42201 -- Invalid schema or schema type\n" + "Error code 42202 -- Invalid version"), @ApiResponse(responseCode = "500", description = "Error code 50001 -- Error in the " + "backend data store") })
@PerformanceMetric("compatibility.subjects.versions.verify")
public void testCompatibilityForSubject(@Suspended final AsyncResponse asyncResponse, @Parameter(description = "Subject of the schema version against which compatibility is to " + "be tested", required = true) @PathParam("subject") String subject, @Parameter(description = "Schema", required = true) @NotNull RegisterSchemaRequest request, @Parameter(description = "Whether to return detailed error messages") @QueryParam("verbose") boolean verbose) {
    log.info("Testing schema subject {} compatibility with specified version {}, id {}, type {}", subject, request.getVersion(), request.getId(), request.getSchemaType());
    subject = QualifiedSubject.normalize(schemaRegistry.tenant(), subject);
    // returns true if posted schema is compatible with the specified subject.
    List<String> errorMessages;
    List<Schema> previousSchemas = new ArrayList<>();
    try {
        // Don't check compatibility against deleted schema
        schemaRegistry.getAllVersions(subject, false).forEachRemaining(previousSchemas::add);
    } catch (SchemaRegistryException e) {
        throw Errors.storeException("Error while retrieving schema for subject " + subject, e);
    }
    Schema schema = new Schema(subject, 0, -1, request.getSchemaType() != null ? request.getSchemaType() : AvroSchema.TYPE, request.getReferences(), request.getSchema());
    try {
        errorMessages = schemaRegistry.isCompatible(subject, schema, previousSchemas);
    } catch (InvalidSchemaException e) {
        if (verbose) {
            errorMessages = Collections.singletonList(e.getMessage());
        } else {
            throw Errors.invalidSchemaException(e);
        }
    } catch (SchemaRegistryStoreException e) {
        throw Errors.storeException("Error while getting compatibility level for subject " + subject, e);
    } catch (SchemaRegistryException e) {
        throw Errors.schemaRegistryException("Error while getting compatibility level for subject " + subject, e);
    }
    CompatibilityCheckResponse compatibilityCheckResponse = createCompatiblityCheckResponse(errorMessages, verbose);
    asyncResponse.resume(compatibilityCheckResponse);
}
Also used : CompatibilityCheckResponse(io.confluent.kafka.schemaregistry.client.rest.entities.requests.CompatibilityCheckResponse) InvalidSchemaException(io.confluent.kafka.schemaregistry.exceptions.InvalidSchemaException) Schema(io.confluent.kafka.schemaregistry.client.rest.entities.Schema) AvroSchema(io.confluent.kafka.schemaregistry.avro.AvroSchema) ArrayList(java.util.ArrayList) SchemaRegistryException(io.confluent.kafka.schemaregistry.exceptions.SchemaRegistryException) SchemaRegistryStoreException(io.confluent.kafka.schemaregistry.exceptions.SchemaRegistryStoreException) Path(javax.ws.rs.Path) PerformanceMetric(io.confluent.rest.annotations.PerformanceMetric) POST(javax.ws.rs.POST) Operation(io.swagger.v3.oas.annotations.Operation)

Example 14 with SchemaRegistryStoreException

use of io.confluent.kafka.schemaregistry.exceptions.SchemaRegistryStoreException in project schema-registry by confluentinc.

the class CompatibilityResource method testCompatibilityBySubjectName.

@POST
@Path("/subjects/{subject}/versions/{version}")
@Operation(summary = "Test input schema against a particular version of a subject's schema for " + "compatibility.", description = "the compatibility level applied for the check is the configured compatibility level " + "for the subject (http:get:: /config/(string: subject)). If this subject's " + "compatibility level was never changed, then the global compatibility level " + "applies (http:get:: /config).", responses = { @ApiResponse(content = @Content(schema = @io.swagger.v3.oas.annotations.media.Schema(implementation = CompatibilityCheckResponse.class))), @ApiResponse(responseCode = "404", description = "Error code 40401 -- Subject not found\n" + "Error code 40402 -- Version not found"), @ApiResponse(responseCode = "422", description = "Error code 42201 -- Invalid schema or schema type\n" + "Error code 42202 -- Invalid version"), @ApiResponse(responseCode = "500", description = "Error code 50001 -- Error in the " + "backend data store") })
@PerformanceMetric("compatibility.subjects.versions.verify")
public void testCompatibilityBySubjectName(@Suspended final AsyncResponse asyncResponse, @Parameter(description = "Subject of the schema version against which compatibility is to " + "be tested", required = true) @PathParam("subject") String subject, @Parameter(description = "Version of the subject's schema against which compatibility is to be " + "tested. Valid values for versionId are between [1,2^31-1] or the string " + "\"latest\"." + "\"latest\" checks compatibility of the input schema with the last registered " + "schema " + "under the specified subject", required = true) @PathParam("version") String version, @Parameter(description = "Schema", required = true) @NotNull RegisterSchemaRequest request, @Parameter(description = "Whether to return detailed error messages") @QueryParam("verbose") boolean verbose) {
    log.info("Testing schema subject {} compatibility between existing version {} and " + "specified version {}, id {}, type {}", subject, version, request.getVersion(), request.getId(), request.getSchemaType());
    subject = QualifiedSubject.normalize(schemaRegistry.tenant(), subject);
    // returns true if posted schema is compatible with the specified version. "latest" is
    // a special version
    List<String> errorMessages;
    VersionId versionId = parseVersionId(version);
    Schema schemaForSpecifiedVersion;
    try {
        // Don't check compatibility against deleted schema
        schemaForSpecifiedVersion = schemaRegistry.get(subject, versionId.getVersionId(), false);
    } catch (InvalidVersionException e) {
        throw Errors.invalidVersionException(e.getMessage());
    } catch (SchemaRegistryException e) {
        throw Errors.storeException("Error while retrieving schema for subject " + subject + " and version " + versionId.getVersionId(), e);
    }
    if (schemaForSpecifiedVersion == null && !versionId.isLatest()) {
        throw Errors.versionNotFoundException(versionId.getVersionId());
    }
    Schema schema = new Schema(subject, 0, -1, request.getSchemaType() != null ? request.getSchemaType() : AvroSchema.TYPE, request.getReferences(), request.getSchema());
    try {
        errorMessages = schemaRegistry.isCompatible(subject, schema, schemaForSpecifiedVersion != null ? Collections.singletonList(schemaForSpecifiedVersion) : Collections.emptyList());
    } catch (InvalidSchemaException e) {
        if (verbose) {
            errorMessages = Collections.singletonList(e.getMessage());
        } else {
            throw Errors.invalidSchemaException(e);
        }
    } catch (SchemaRegistryStoreException e) {
        throw Errors.storeException("Error while getting compatibility level for subject " + subject, e);
    } catch (SchemaRegistryException e) {
        throw Errors.schemaRegistryException("Error while getting compatibility level for subject " + subject, e);
    }
    CompatibilityCheckResponse compatibilityCheckResponse = createCompatiblityCheckResponse(errorMessages, verbose);
    asyncResponse.resume(compatibilityCheckResponse);
}
Also used : VersionId(io.confluent.kafka.schemaregistry.rest.VersionId) CompatibilityCheckResponse(io.confluent.kafka.schemaregistry.client.rest.entities.requests.CompatibilityCheckResponse) InvalidSchemaException(io.confluent.kafka.schemaregistry.exceptions.InvalidSchemaException) InvalidVersionException(io.confluent.kafka.schemaregistry.exceptions.InvalidVersionException) Schema(io.confluent.kafka.schemaregistry.client.rest.entities.Schema) AvroSchema(io.confluent.kafka.schemaregistry.avro.AvroSchema) SchemaRegistryException(io.confluent.kafka.schemaregistry.exceptions.SchemaRegistryException) SchemaRegistryStoreException(io.confluent.kafka.schemaregistry.exceptions.SchemaRegistryStoreException) Path(javax.ws.rs.Path) PerformanceMetric(io.confluent.rest.annotations.PerformanceMetric) POST(javax.ws.rs.POST) Operation(io.swagger.v3.oas.annotations.Operation)

Example 15 with SchemaRegistryStoreException

use of io.confluent.kafka.schemaregistry.exceptions.SchemaRegistryStoreException in project schema-registry by confluentinc.

the class ConfigResource method deleteTopLevelConfig.

@DELETE
@Operation(summary = "Deletes the Global-level compatibility level config and " + "revert to the global default.", responses = { @ApiResponse(content = @Content(schema = @Schema(implementation = CompatibilityLevel.class))), @ApiResponse(responseCode = "500", description = "Error code 50001 -- Error in the backend " + "datastore") })
public void deleteTopLevelConfig(@Suspended final AsyncResponse asyncResponse, @Context HttpHeaders headers) {
    log.info("Deleting Global compatibility setting and reverting back to default");
    Config deletedConfig;
    try {
        CompatibilityLevel currentCompatibility = schemaRegistry.getCompatibilityLevel(null);
        Map<String, String> headerProperties = requestHeaderBuilder.buildRequestHeaders(headers, schemaRegistry.config().whitelistHeaders());
        schemaRegistry.deleteCompatibilityConfigOrForward(null, headerProperties);
        deletedConfig = new Config(currentCompatibility.name);
    } catch (OperationNotPermittedException e) {
        throw Errors.operationNotPermittedException(e.getMessage());
    } catch (SchemaRegistryStoreException e) {
        throw Errors.storeException("Failed to delete compatibility level", e);
    } catch (UnknownLeaderException e) {
        throw Errors.unknownLeaderException("Failed to delete compatibility level", e);
    } catch (SchemaRegistryRequestForwardingException e) {
        throw Errors.requestForwardingFailedException("Error while forwarding delete config request" + " to the leader", e);
    }
    asyncResponse.resume(deletedConfig);
}
Also used : CompatibilityLevel(io.confluent.kafka.schemaregistry.CompatibilityLevel) UnknownLeaderException(io.confluent.kafka.schemaregistry.exceptions.UnknownLeaderException) SchemaRegistryRequestForwardingException(io.confluent.kafka.schemaregistry.exceptions.SchemaRegistryRequestForwardingException) Config(io.confluent.kafka.schemaregistry.client.rest.entities.Config) SchemaRegistryStoreException(io.confluent.kafka.schemaregistry.exceptions.SchemaRegistryStoreException) OperationNotPermittedException(io.confluent.kafka.schemaregistry.exceptions.OperationNotPermittedException) DELETE(javax.ws.rs.DELETE) Operation(io.swagger.v3.oas.annotations.Operation)

Aggregations

SchemaRegistryStoreException (io.confluent.kafka.schemaregistry.exceptions.SchemaRegistryStoreException)37 Operation (io.swagger.v3.oas.annotations.Operation)19 StoreException (io.confluent.kafka.schemaregistry.storage.exceptions.StoreException)16 OperationNotPermittedException (io.confluent.kafka.schemaregistry.exceptions.OperationNotPermittedException)14 Path (javax.ws.rs.Path)13 SchemaRegistryException (io.confluent.kafka.schemaregistry.exceptions.SchemaRegistryException)12 Schema (io.confluent.kafka.schemaregistry.client.rest.entities.Schema)11 AvroSchema (io.confluent.kafka.schemaregistry.avro.AvroSchema)10 SchemaRegistryRequestForwardingException (io.confluent.kafka.schemaregistry.exceptions.SchemaRegistryRequestForwardingException)9 UnknownLeaderException (io.confluent.kafka.schemaregistry.exceptions.UnknownLeaderException)9 GET (javax.ws.rs.GET)9 SchemaString (io.confluent.kafka.schemaregistry.client.rest.entities.SchemaString)8 SchemaRegistryTimeoutException (io.confluent.kafka.schemaregistry.exceptions.SchemaRegistryTimeoutException)8 PerformanceMetric (io.confluent.rest.annotations.PerformanceMetric)8 ArrayList (java.util.ArrayList)8 CompatibilityLevel (io.confluent.kafka.schemaregistry.CompatibilityLevel)7 ParsedSchema (io.confluent.kafka.schemaregistry.ParsedSchema)5 VersionId (io.confluent.kafka.schemaregistry.rest.VersionId)5 Config (io.confluent.kafka.schemaregistry.client.rest.entities.Config)4 ReferenceExistsException (io.confluent.kafka.schemaregistry.exceptions.ReferenceExistsException)4