Search in sources :

Example 1 with SerializationException

use of io.confluent.kafka.schemaregistry.storage.exceptions.SerializationException in project schema-registry by confluentinc.

the class KafkaStore method getLatestOffset.

/**
 * Return the latest offset of the store topic.
 *
 * <p>The most reliable way to do so in face of potential Kafka broker failure is to produce
 * successfully to the Kafka topic and get the offset of the returned metadata.
 *
 * <p>If the most recent write to Kafka was successful (signaled by lastWrittenOffset >= 0),
 * immediately return that offset. Otherwise write a "Noop key" to Kafka in order to find the
 * latest offset.
 */
private long getLatestOffset(int timeoutMs) throws StoreException {
    ProducerRecord<byte[], byte[]> producerRecord = null;
    if (this.lastWrittenOffset >= 0) {
        return this.lastWrittenOffset;
    }
    try {
        producerRecord = new ProducerRecord<byte[], byte[]>(topic, 0, this.serializer.serializeKey(noopKey), null);
    } catch (SerializationException e) {
        throw new StoreException("Failed to serialize noop key.", e);
    }
    try {
        log.trace("Sending Noop record to KafkaStore to find last offset.");
        Future<RecordMetadata> ack = producer.send(producerRecord);
        RecordMetadata metadata = ack.get(timeoutMs, TimeUnit.MILLISECONDS);
        this.lastWrittenOffset = metadata.offset();
        log.trace("Noop record's offset is " + this.lastWrittenOffset);
        return this.lastWrittenOffset;
    } catch (Exception e) {
        throw new StoreException("Failed to write Noop record to kafka store.", e);
    }
}
Also used : RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) SerializationException(io.confluent.kafka.schemaregistry.storage.exceptions.SerializationException) KafkaException(org.apache.kafka.common.KafkaException) TimeoutException(java.util.concurrent.TimeoutException) SchemaRegistryException(io.confluent.kafka.schemaregistry.exceptions.SchemaRegistryException) StoreTimeoutException(io.confluent.kafka.schemaregistry.storage.exceptions.StoreTimeoutException) SerializationException(io.confluent.kafka.schemaregistry.storage.exceptions.SerializationException) ExecutionException(java.util.concurrent.ExecutionException) TopicExistsException(org.apache.kafka.common.errors.TopicExistsException) StoreException(io.confluent.kafka.schemaregistry.storage.exceptions.StoreException) StoreInitializationException(io.confluent.kafka.schemaregistry.storage.exceptions.StoreInitializationException) StoreException(io.confluent.kafka.schemaregistry.storage.exceptions.StoreException)

Example 2 with SerializationException

use of io.confluent.kafka.schemaregistry.storage.exceptions.SerializationException in project schema-registry by confluentinc.

the class KafkaStoreReaderThread method doWork.

@Override
public void doWork() {
    try {
        ConsumerRecords<byte[], byte[]> records = consumer.poll(Long.MAX_VALUE);
        storeUpdateHandler.startBatch(records.count());
        for (ConsumerRecord<byte[], byte[]> record : records) {
            K messageKey = null;
            try {
                messageKey = this.serializer.deserializeKey(record.key());
            } catch (SerializationException e) {
                log.error("Failed to deserialize the schema or config key at offset " + record.offset(), e);
                continue;
            }
            if (messageKey.equals(noopKey)) {
                // If it's a noop, update local offset counter and do nothing else
                try {
                    offsetUpdateLock.lock();
                    offsetInSchemasTopic = record.offset();
                    offsetReachedThreshold.signalAll();
                } finally {
                    offsetUpdateLock.unlock();
                }
            } else {
                V message = null;
                try {
                    message = record.value() == null ? null : serializer.deserializeValue(messageKey, record.value());
                } catch (SerializationException e) {
                    log.error("Failed to deserialize a schema or config update at offset " + record.offset(), e);
                    continue;
                }
                try {
                    log.trace("Applying update (" + messageKey + "," + message + ") to the local store");
                    TopicPartition tp = new TopicPartition(record.topic(), record.partition());
                    long offset = record.offset();
                    long timestamp = record.timestamp();
                    ValidationStatus status = this.storeUpdateHandler.validateUpdate(messageKey, message, tp, offset, timestamp);
                    V oldMessage;
                    switch(status) {
                        case SUCCESS:
                            if (message == null) {
                                oldMessage = localStore.delete(messageKey);
                            } else {
                                oldMessage = localStore.put(messageKey, message);
                            }
                            this.storeUpdateHandler.handleUpdate(messageKey, message, oldMessage, tp, offset, timestamp);
                            break;
                        case ROLLBACK_FAILURE:
                            oldMessage = localStore.get(messageKey);
                            try {
                                ProducerRecord<byte[], byte[]> producerRecord = new ProducerRecord<>(topic, record.key(), oldMessage == null ? null : serializer.serializeValue(oldMessage));
                                producer.send(producerRecord);
                                log.warn("Rollback invalid update to key {}", messageKey);
                            } catch (KafkaException | SerializationException ke) {
                                log.error("Failed to recover from invalid update to key {}", messageKey, ke);
                            }
                            break;
                        case IGNORE_FAILURE:
                        default:
                            log.warn("Ignore invalid update to key {}", messageKey);
                            break;
                    }
                    try {
                        offsetUpdateLock.lock();
                        offsetInSchemasTopic = record.offset();
                        offsetReachedThreshold.signalAll();
                    } finally {
                        offsetUpdateLock.unlock();
                    }
                } catch (Exception se) {
                    log.error("Failed to add record from the Kafka topic" + topic + " the local store", se);
                }
            }
        }
        if (localStore.isPersistent() && initialized.get()) {
            try {
                localStore.flush();
                Map<TopicPartition, Long> offsets = storeUpdateHandler.checkpoint(records.count());
                checkpointOffsets(offsets);
            } catch (StoreException se) {
                log.warn("Failed to flush", se);
            }
        }
        storeUpdateHandler.endBatch(records.count());
    } catch (WakeupException we) {
    // do nothing because the thread is closing -- see shutdown()
    } catch (RecordTooLargeException rtle) {
        throw new IllegalStateException("Consumer threw RecordTooLargeException. A schema has been written that " + "exceeds the default maximum fetch size.", rtle);
    } catch (RuntimeException e) {
        log.error("KafkaStoreReader thread has died for an unknown reason.", e);
        throw new RuntimeException(e);
    }
}
Also used : SerializationException(io.confluent.kafka.schemaregistry.storage.exceptions.SerializationException) WakeupException(org.apache.kafka.common.errors.WakeupException) KafkaException(org.apache.kafka.common.KafkaException) WakeupException(org.apache.kafka.common.errors.WakeupException) StoreTimeoutException(io.confluent.kafka.schemaregistry.storage.exceptions.StoreTimeoutException) IOException(java.io.IOException) SerializationException(io.confluent.kafka.schemaregistry.storage.exceptions.SerializationException) StoreException(io.confluent.kafka.schemaregistry.storage.exceptions.StoreException) RecordTooLargeException(org.apache.kafka.common.errors.RecordTooLargeException) StoreException(io.confluent.kafka.schemaregistry.storage.exceptions.StoreException) ValidationStatus(io.confluent.kafka.schemaregistry.storage.StoreUpdateHandler.ValidationStatus) TopicPartition(org.apache.kafka.common.TopicPartition) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) KafkaException(org.apache.kafka.common.KafkaException) RecordTooLargeException(org.apache.kafka.common.errors.RecordTooLargeException)

Example 3 with SerializationException

use of io.confluent.kafka.schemaregistry.storage.exceptions.SerializationException in project schema-registry by confluentinc.

the class SchemaRegistrySerializer method deserializeKey.

@Override
public SchemaRegistryKey deserializeKey(byte[] key) throws SerializationException {
    SchemaRegistryKey schemaKey = null;
    SchemaRegistryKeyType keyType = null;
    try {
        try {
            Map<Object, Object> keyObj = null;
            keyObj = JacksonMapper.INSTANCE.readValue(key, new TypeReference<Map<Object, Object>>() {
            });
            keyType = SchemaRegistryKeyType.forName((String) keyObj.get("keytype"));
            if (keyType == SchemaRegistryKeyType.CONFIG) {
                schemaKey = JacksonMapper.INSTANCE.readValue(key, ConfigKey.class);
            } else if (keyType == SchemaRegistryKeyType.MODE) {
                schemaKey = JacksonMapper.INSTANCE.readValue(key, ModeKey.class);
            } else if (keyType == SchemaRegistryKeyType.NOOP) {
                schemaKey = JacksonMapper.INSTANCE.readValue(key, NoopKey.class);
            } else if (keyType == SchemaRegistryKeyType.CONTEXT) {
                schemaKey = JacksonMapper.INSTANCE.readValue(key, ContextKey.class);
            } else if (keyType == SchemaRegistryKeyType.DELETE_SUBJECT) {
                schemaKey = JacksonMapper.INSTANCE.readValue(key, DeleteSubjectKey.class);
            } else if (keyType == SchemaRegistryKeyType.CLEAR_SUBJECT) {
                schemaKey = JacksonMapper.INSTANCE.readValue(key, ClearSubjectKey.class);
            } else if (keyType == SchemaRegistryKeyType.SCHEMA) {
                schemaKey = JacksonMapper.INSTANCE.readValue(key, SchemaKey.class);
                validateMagicByte((SchemaKey) schemaKey);
            }
        } catch (JsonProcessingException e) {
            String type = "unknown";
            if (keyType != null) {
                type = keyType.name();
            }
            throw new SerializationException("Failed to deserialize " + type + " key", e);
        }
    } catch (IOException e) {
        throw new SerializationException("Error while deserializing schema key", e);
    }
    return schemaKey;
}
Also used : ConfigKey(io.confluent.kafka.schemaregistry.storage.ConfigKey) SerializationException(io.confluent.kafka.schemaregistry.storage.exceptions.SerializationException) SchemaRegistryKeyType(io.confluent.kafka.schemaregistry.storage.SchemaRegistryKeyType) IOException(java.io.IOException) SchemaKey(io.confluent.kafka.schemaregistry.storage.SchemaKey) DeleteSubjectKey(io.confluent.kafka.schemaregistry.storage.DeleteSubjectKey) SchemaRegistryKey(io.confluent.kafka.schemaregistry.storage.SchemaRegistryKey) TypeReference(com.fasterxml.jackson.core.type.TypeReference) NoopKey(io.confluent.kafka.schemaregistry.storage.NoopKey) JsonProcessingException(com.fasterxml.jackson.core.JsonProcessingException)

Example 4 with SerializationException

use of io.confluent.kafka.schemaregistry.storage.exceptions.SerializationException in project schema-registry by confluentinc.

the class SchemaRegistrySerializer method deserializeValue.

/**
 * @param key   Typed key corresponding to this value
 * @param value Bytes of the serialized value
 * @return Typed deserialized value. Must be one of
 *     {@link io.confluent.kafka.schemaregistry.storage.ConfigValue}
 *     or {@link io.confluent.kafka.schemaregistry.storage.ModeValue}
 *     or {@link io.confluent.kafka.schemaregistry.storage.SchemaValue}
 *     or {@link io.confluent.kafka.schemaregistry.storage.DeleteSubjectValue}
 *     or {@link io.confluent.kafka.schemaregistry.storage.ClearSubjectValue}
 */
@Override
public SchemaRegistryValue deserializeValue(SchemaRegistryKey key, byte[] value) throws SerializationException {
    SchemaRegistryValue schemaRegistryValue = null;
    if (key.getKeyType().equals(SchemaRegistryKeyType.CONFIG)) {
        try {
            schemaRegistryValue = JacksonMapper.INSTANCE.readValue(value, ConfigValue.class);
        } catch (IOException e) {
            throw new SerializationException("Error while deserializing config", e);
        }
    } else if (key.getKeyType().equals(SchemaRegistryKeyType.MODE)) {
        try {
            schemaRegistryValue = JacksonMapper.INSTANCE.readValue(value, ModeValue.class);
        } catch (IOException e) {
            throw new SerializationException("Error while deserializing schema", e);
        }
    } else if (key.getKeyType().equals(SchemaRegistryKeyType.SCHEMA)) {
        try {
            validateMagicByte((SchemaKey) key);
            schemaRegistryValue = JacksonMapper.INSTANCE.readValue(value, SchemaValue.class);
        } catch (IOException e) {
            throw new SerializationException("Error while deserializing schema", e);
        }
    } else if (key.getKeyType().equals(SchemaRegistryKeyType.CONTEXT)) {
        try {
            schemaRegistryValue = JacksonMapper.INSTANCE.readValue(value, ContextValue.class);
        } catch (IOException e) {
            throw new SerializationException("Error while deserializing Delete Subject message", e);
        }
    } else if (key.getKeyType().equals(SchemaRegistryKeyType.DELETE_SUBJECT)) {
        try {
            schemaRegistryValue = JacksonMapper.INSTANCE.readValue(value, DeleteSubjectValue.class);
        } catch (IOException e) {
            throw new SerializationException("Error while deserializing Delete Subject message", e);
        }
    } else if (key.getKeyType().equals(SchemaRegistryKeyType.CLEAR_SUBJECT)) {
        try {
            schemaRegistryValue = JacksonMapper.INSTANCE.readValue(value, ClearSubjectValue.class);
        } catch (IOException e) {
            throw new SerializationException("Error while deserializing Clear Subject message", e);
        }
    } else {
        throw new SerializationException("Unrecognized key type. Must be one of schema or config");
    }
    return schemaRegistryValue;
}
Also used : ConfigValue(io.confluent.kafka.schemaregistry.storage.ConfigValue) SerializationException(io.confluent.kafka.schemaregistry.storage.exceptions.SerializationException) SchemaValue(io.confluent.kafka.schemaregistry.storage.SchemaValue) IOException(java.io.IOException) SchemaRegistryValue(io.confluent.kafka.schemaregistry.storage.SchemaRegistryValue) DeleteSubjectValue(io.confluent.kafka.schemaregistry.storage.DeleteSubjectValue)

Example 5 with SerializationException

use of io.confluent.kafka.schemaregistry.storage.exceptions.SerializationException in project schema-registry by confluentinc.

the class KafkaStore method put.

@Override
public V put(K key, V value) throws StoreTimeoutException, StoreException {
    assertInitialized();
    if (key == null) {
        throw new StoreException("Key should not be null");
    }
    V oldValue = get(key);
    // write to the Kafka topic
    ProducerRecord<byte[], byte[]> producerRecord = null;
    try {
        producerRecord = new ProducerRecord<byte[], byte[]>(topic, 0, this.serializer.serializeKey(key), value == null ? null : this.serializer.serializeValue(value));
    } catch (SerializationException e) {
        throw new StoreException("Error serializing schema while creating the Kafka produce " + "record", e);
    }
    boolean knownSuccessfulWrite = false;
    try {
        log.trace("Sending record to KafkaStore topic: " + producerRecord);
        Future<RecordMetadata> ack = producer.send(producerRecord);
        RecordMetadata recordMetadata = ack.get(timeout, TimeUnit.MILLISECONDS);
        log.trace("Waiting for the local store to catch up to offset " + recordMetadata.offset());
        this.lastWrittenOffset = recordMetadata.offset();
        if (key instanceof SubjectKey) {
            setLastOffset(((SubjectKey) key).getSubject(), recordMetadata.offset());
        }
        waitUntilKafkaReaderReachesOffset(recordMetadata.offset(), timeout);
        knownSuccessfulWrite = true;
    } catch (InterruptedException e) {
        throw new StoreException("Put operation interrupted while waiting for an ack from Kafka", e);
    } catch (ExecutionException e) {
        throw new StoreException("Put operation failed while waiting for an ack from Kafka", e);
    } catch (TimeoutException e) {
        throw new StoreTimeoutException("Put operation timed out while waiting for an ack from Kafka", e);
    } catch (KafkaException ke) {
        throw new StoreException("Put operation to Kafka failed", ke);
    } finally {
        if (!knownSuccessfulWrite) {
            markLastWrittenOffsetInvalid();
        }
    }
    return oldValue;
}
Also used : SerializationException(io.confluent.kafka.schemaregistry.storage.exceptions.SerializationException) StoreException(io.confluent.kafka.schemaregistry.storage.exceptions.StoreException) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) StoreTimeoutException(io.confluent.kafka.schemaregistry.storage.exceptions.StoreTimeoutException) KafkaException(org.apache.kafka.common.KafkaException) ExecutionException(java.util.concurrent.ExecutionException) TimeoutException(java.util.concurrent.TimeoutException) StoreTimeoutException(io.confluent.kafka.schemaregistry.storage.exceptions.StoreTimeoutException)

Aggregations

SerializationException (io.confluent.kafka.schemaregistry.storage.exceptions.SerializationException)5 StoreException (io.confluent.kafka.schemaregistry.storage.exceptions.StoreException)3 StoreTimeoutException (io.confluent.kafka.schemaregistry.storage.exceptions.StoreTimeoutException)3 IOException (java.io.IOException)3 KafkaException (org.apache.kafka.common.KafkaException)3 ExecutionException (java.util.concurrent.ExecutionException)2 TimeoutException (java.util.concurrent.TimeoutException)2 RecordMetadata (org.apache.kafka.clients.producer.RecordMetadata)2 JsonProcessingException (com.fasterxml.jackson.core.JsonProcessingException)1 TypeReference (com.fasterxml.jackson.core.type.TypeReference)1 SchemaRegistryException (io.confluent.kafka.schemaregistry.exceptions.SchemaRegistryException)1 ConfigKey (io.confluent.kafka.schemaregistry.storage.ConfigKey)1 ConfigValue (io.confluent.kafka.schemaregistry.storage.ConfigValue)1 DeleteSubjectKey (io.confluent.kafka.schemaregistry.storage.DeleteSubjectKey)1 DeleteSubjectValue (io.confluent.kafka.schemaregistry.storage.DeleteSubjectValue)1 NoopKey (io.confluent.kafka.schemaregistry.storage.NoopKey)1 SchemaKey (io.confluent.kafka.schemaregistry.storage.SchemaKey)1 SchemaRegistryKey (io.confluent.kafka.schemaregistry.storage.SchemaRegistryKey)1 SchemaRegistryKeyType (io.confluent.kafka.schemaregistry.storage.SchemaRegistryKeyType)1 SchemaRegistryValue (io.confluent.kafka.schemaregistry.storage.SchemaRegistryValue)1