use of io.confluent.kafka.schemaregistry.storage.StoreUpdateHandler.ValidationStatus in project schema-registry by confluentinc.
the class KafkaStoreReaderThread method doWork.
@Override
public void doWork() {
try {
ConsumerRecords<byte[], byte[]> records = consumer.poll(Long.MAX_VALUE);
storeUpdateHandler.startBatch(records.count());
for (ConsumerRecord<byte[], byte[]> record : records) {
K messageKey = null;
try {
messageKey = this.serializer.deserializeKey(record.key());
} catch (SerializationException e) {
log.error("Failed to deserialize the schema or config key at offset " + record.offset(), e);
continue;
}
if (messageKey.equals(noopKey)) {
// If it's a noop, update local offset counter and do nothing else
try {
offsetUpdateLock.lock();
offsetInSchemasTopic = record.offset();
offsetReachedThreshold.signalAll();
} finally {
offsetUpdateLock.unlock();
}
} else {
V message = null;
try {
message = record.value() == null ? null : serializer.deserializeValue(messageKey, record.value());
} catch (SerializationException e) {
log.error("Failed to deserialize a schema or config update at offset " + record.offset(), e);
continue;
}
try {
log.trace("Applying update (" + messageKey + "," + message + ") to the local store");
TopicPartition tp = new TopicPartition(record.topic(), record.partition());
long offset = record.offset();
long timestamp = record.timestamp();
ValidationStatus status = this.storeUpdateHandler.validateUpdate(messageKey, message, tp, offset, timestamp);
V oldMessage;
switch(status) {
case SUCCESS:
if (message == null) {
oldMessage = localStore.delete(messageKey);
} else {
oldMessage = localStore.put(messageKey, message);
}
this.storeUpdateHandler.handleUpdate(messageKey, message, oldMessage, tp, offset, timestamp);
break;
case ROLLBACK_FAILURE:
oldMessage = localStore.get(messageKey);
try {
ProducerRecord<byte[], byte[]> producerRecord = new ProducerRecord<>(topic, record.key(), oldMessage == null ? null : serializer.serializeValue(oldMessage));
producer.send(producerRecord);
log.warn("Rollback invalid update to key {}", messageKey);
} catch (KafkaException | SerializationException ke) {
log.error("Failed to recover from invalid update to key {}", messageKey, ke);
}
break;
case IGNORE_FAILURE:
default:
log.warn("Ignore invalid update to key {}", messageKey);
break;
}
try {
offsetUpdateLock.lock();
offsetInSchemasTopic = record.offset();
offsetReachedThreshold.signalAll();
} finally {
offsetUpdateLock.unlock();
}
} catch (Exception se) {
log.error("Failed to add record from the Kafka topic" + topic + " the local store", se);
}
}
}
if (localStore.isPersistent() && initialized.get()) {
try {
localStore.flush();
Map<TopicPartition, Long> offsets = storeUpdateHandler.checkpoint(records.count());
checkpointOffsets(offsets);
} catch (StoreException se) {
log.warn("Failed to flush", se);
}
}
storeUpdateHandler.endBatch(records.count());
} catch (WakeupException we) {
// do nothing because the thread is closing -- see shutdown()
} catch (RecordTooLargeException rtle) {
throw new IllegalStateException("Consumer threw RecordTooLargeException. A schema has been written that " + "exceeds the default maximum fetch size.", rtle);
} catch (RuntimeException e) {
log.error("KafkaStoreReader thread has died for an unknown reason.", e);
throw new RuntimeException(e);
}
}
Aggregations