Search in sources :

Example 1 with ValidationStatus

use of io.confluent.kafka.schemaregistry.storage.StoreUpdateHandler.ValidationStatus in project schema-registry by confluentinc.

the class KafkaStoreReaderThread method doWork.

@Override
public void doWork() {
    try {
        ConsumerRecords<byte[], byte[]> records = consumer.poll(Long.MAX_VALUE);
        storeUpdateHandler.startBatch(records.count());
        for (ConsumerRecord<byte[], byte[]> record : records) {
            K messageKey = null;
            try {
                messageKey = this.serializer.deserializeKey(record.key());
            } catch (SerializationException e) {
                log.error("Failed to deserialize the schema or config key at offset " + record.offset(), e);
                continue;
            }
            if (messageKey.equals(noopKey)) {
                // If it's a noop, update local offset counter and do nothing else
                try {
                    offsetUpdateLock.lock();
                    offsetInSchemasTopic = record.offset();
                    offsetReachedThreshold.signalAll();
                } finally {
                    offsetUpdateLock.unlock();
                }
            } else {
                V message = null;
                try {
                    message = record.value() == null ? null : serializer.deserializeValue(messageKey, record.value());
                } catch (SerializationException e) {
                    log.error("Failed to deserialize a schema or config update at offset " + record.offset(), e);
                    continue;
                }
                try {
                    log.trace("Applying update (" + messageKey + "," + message + ") to the local store");
                    TopicPartition tp = new TopicPartition(record.topic(), record.partition());
                    long offset = record.offset();
                    long timestamp = record.timestamp();
                    ValidationStatus status = this.storeUpdateHandler.validateUpdate(messageKey, message, tp, offset, timestamp);
                    V oldMessage;
                    switch(status) {
                        case SUCCESS:
                            if (message == null) {
                                oldMessage = localStore.delete(messageKey);
                            } else {
                                oldMessage = localStore.put(messageKey, message);
                            }
                            this.storeUpdateHandler.handleUpdate(messageKey, message, oldMessage, tp, offset, timestamp);
                            break;
                        case ROLLBACK_FAILURE:
                            oldMessage = localStore.get(messageKey);
                            try {
                                ProducerRecord<byte[], byte[]> producerRecord = new ProducerRecord<>(topic, record.key(), oldMessage == null ? null : serializer.serializeValue(oldMessage));
                                producer.send(producerRecord);
                                log.warn("Rollback invalid update to key {}", messageKey);
                            } catch (KafkaException | SerializationException ke) {
                                log.error("Failed to recover from invalid update to key {}", messageKey, ke);
                            }
                            break;
                        case IGNORE_FAILURE:
                        default:
                            log.warn("Ignore invalid update to key {}", messageKey);
                            break;
                    }
                    try {
                        offsetUpdateLock.lock();
                        offsetInSchemasTopic = record.offset();
                        offsetReachedThreshold.signalAll();
                    } finally {
                        offsetUpdateLock.unlock();
                    }
                } catch (Exception se) {
                    log.error("Failed to add record from the Kafka topic" + topic + " the local store", se);
                }
            }
        }
        if (localStore.isPersistent() && initialized.get()) {
            try {
                localStore.flush();
                Map<TopicPartition, Long> offsets = storeUpdateHandler.checkpoint(records.count());
                checkpointOffsets(offsets);
            } catch (StoreException se) {
                log.warn("Failed to flush", se);
            }
        }
        storeUpdateHandler.endBatch(records.count());
    } catch (WakeupException we) {
    // do nothing because the thread is closing -- see shutdown()
    } catch (RecordTooLargeException rtle) {
        throw new IllegalStateException("Consumer threw RecordTooLargeException. A schema has been written that " + "exceeds the default maximum fetch size.", rtle);
    } catch (RuntimeException e) {
        log.error("KafkaStoreReader thread has died for an unknown reason.", e);
        throw new RuntimeException(e);
    }
}
Also used : SerializationException(io.confluent.kafka.schemaregistry.storage.exceptions.SerializationException) WakeupException(org.apache.kafka.common.errors.WakeupException) KafkaException(org.apache.kafka.common.KafkaException) WakeupException(org.apache.kafka.common.errors.WakeupException) StoreTimeoutException(io.confluent.kafka.schemaregistry.storage.exceptions.StoreTimeoutException) IOException(java.io.IOException) SerializationException(io.confluent.kafka.schemaregistry.storage.exceptions.SerializationException) StoreException(io.confluent.kafka.schemaregistry.storage.exceptions.StoreException) RecordTooLargeException(org.apache.kafka.common.errors.RecordTooLargeException) StoreException(io.confluent.kafka.schemaregistry.storage.exceptions.StoreException) ValidationStatus(io.confluent.kafka.schemaregistry.storage.StoreUpdateHandler.ValidationStatus) TopicPartition(org.apache.kafka.common.TopicPartition) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) KafkaException(org.apache.kafka.common.KafkaException) RecordTooLargeException(org.apache.kafka.common.errors.RecordTooLargeException)

Aggregations

ValidationStatus (io.confluent.kafka.schemaregistry.storage.StoreUpdateHandler.ValidationStatus)1 SerializationException (io.confluent.kafka.schemaregistry.storage.exceptions.SerializationException)1 StoreException (io.confluent.kafka.schemaregistry.storage.exceptions.StoreException)1 StoreTimeoutException (io.confluent.kafka.schemaregistry.storage.exceptions.StoreTimeoutException)1 IOException (java.io.IOException)1 ProducerRecord (org.apache.kafka.clients.producer.ProducerRecord)1 KafkaException (org.apache.kafka.common.KafkaException)1 TopicPartition (org.apache.kafka.common.TopicPartition)1 RecordTooLargeException (org.apache.kafka.common.errors.RecordTooLargeException)1 WakeupException (org.apache.kafka.common.errors.WakeupException)1