use of io.confluent.kafka.schemaregistry.storage.exceptions.SerializationException in project schema-registry by confluentinc.
the class KafkaStore method getLatestOffset.
/**
* Return the latest offset of the store topic.
*
* <p>The most reliable way to do so in face of potential Kafka broker failure is to produce
* successfully to the Kafka topic and get the offset of the returned metadata.
*
* <p>If the most recent write to Kafka was successful (signaled by lastWrittenOffset >= 0),
* immediately return that offset. Otherwise write a "Noop key" to Kafka in order to find the
* latest offset.
*/
private long getLatestOffset(int timeoutMs) throws StoreException {
ProducerRecord<byte[], byte[]> producerRecord = null;
if (this.lastWrittenOffset >= 0) {
return this.lastWrittenOffset;
}
try {
producerRecord = new ProducerRecord<byte[], byte[]>(topic, 0, this.serializer.serializeKey(noopKey), null);
} catch (SerializationException e) {
throw new StoreException("Failed to serialize noop key.", e);
}
try {
log.trace("Sending Noop record to KafkaStore to find last offset.");
Future<RecordMetadata> ack = producer.send(producerRecord);
RecordMetadata metadata = ack.get(timeoutMs, TimeUnit.MILLISECONDS);
this.lastWrittenOffset = metadata.offset();
log.trace("Noop record's offset is " + this.lastWrittenOffset);
return this.lastWrittenOffset;
} catch (Exception e) {
throw new StoreException("Failed to write Noop record to kafka store.", e);
}
}
use of io.confluent.kafka.schemaregistry.storage.exceptions.SerializationException in project schema-registry by confluentinc.
the class KafkaStoreReaderThread method doWork.
@Override
public void doWork() {
try {
ConsumerRecords<byte[], byte[]> records = consumer.poll(Long.MAX_VALUE);
storeUpdateHandler.startBatch(records.count());
for (ConsumerRecord<byte[], byte[]> record : records) {
K messageKey = null;
try {
messageKey = this.serializer.deserializeKey(record.key());
} catch (SerializationException e) {
log.error("Failed to deserialize the schema or config key at offset " + record.offset(), e);
continue;
}
if (messageKey.equals(noopKey)) {
// If it's a noop, update local offset counter and do nothing else
try {
offsetUpdateLock.lock();
offsetInSchemasTopic = record.offset();
offsetReachedThreshold.signalAll();
} finally {
offsetUpdateLock.unlock();
}
} else {
V message = null;
try {
message = record.value() == null ? null : serializer.deserializeValue(messageKey, record.value());
} catch (SerializationException e) {
log.error("Failed to deserialize a schema or config update at offset " + record.offset(), e);
continue;
}
try {
log.trace("Applying update (" + messageKey + "," + message + ") to the local store");
TopicPartition tp = new TopicPartition(record.topic(), record.partition());
long offset = record.offset();
long timestamp = record.timestamp();
ValidationStatus status = this.storeUpdateHandler.validateUpdate(messageKey, message, tp, offset, timestamp);
V oldMessage;
switch(status) {
case SUCCESS:
if (message == null) {
oldMessage = localStore.delete(messageKey);
} else {
oldMessage = localStore.put(messageKey, message);
}
this.storeUpdateHandler.handleUpdate(messageKey, message, oldMessage, tp, offset, timestamp);
break;
case ROLLBACK_FAILURE:
oldMessage = localStore.get(messageKey);
try {
ProducerRecord<byte[], byte[]> producerRecord = new ProducerRecord<>(topic, record.key(), oldMessage == null ? null : serializer.serializeValue(oldMessage));
producer.send(producerRecord);
log.warn("Rollback invalid update to key {}", messageKey);
} catch (KafkaException | SerializationException ke) {
log.error("Failed to recover from invalid update to key {}", messageKey, ke);
}
break;
case IGNORE_FAILURE:
default:
log.warn("Ignore invalid update to key {}", messageKey);
break;
}
try {
offsetUpdateLock.lock();
offsetInSchemasTopic = record.offset();
offsetReachedThreshold.signalAll();
} finally {
offsetUpdateLock.unlock();
}
} catch (Exception se) {
log.error("Failed to add record from the Kafka topic" + topic + " the local store", se);
}
}
}
if (localStore.isPersistent() && initialized.get()) {
try {
localStore.flush();
Map<TopicPartition, Long> offsets = storeUpdateHandler.checkpoint(records.count());
checkpointOffsets(offsets);
} catch (StoreException se) {
log.warn("Failed to flush", se);
}
}
storeUpdateHandler.endBatch(records.count());
} catch (WakeupException we) {
// do nothing because the thread is closing -- see shutdown()
} catch (RecordTooLargeException rtle) {
throw new IllegalStateException("Consumer threw RecordTooLargeException. A schema has been written that " + "exceeds the default maximum fetch size.", rtle);
} catch (RuntimeException e) {
log.error("KafkaStoreReader thread has died for an unknown reason.", e);
throw new RuntimeException(e);
}
}
use of io.confluent.kafka.schemaregistry.storage.exceptions.SerializationException in project schema-registry by confluentinc.
the class SchemaRegistrySerializer method deserializeKey.
@Override
public SchemaRegistryKey deserializeKey(byte[] key) throws SerializationException {
SchemaRegistryKey schemaKey = null;
SchemaRegistryKeyType keyType = null;
try {
try {
Map<Object, Object> keyObj = null;
keyObj = JacksonMapper.INSTANCE.readValue(key, new TypeReference<Map<Object, Object>>() {
});
keyType = SchemaRegistryKeyType.forName((String) keyObj.get("keytype"));
if (keyType == SchemaRegistryKeyType.CONFIG) {
schemaKey = JacksonMapper.INSTANCE.readValue(key, ConfigKey.class);
} else if (keyType == SchemaRegistryKeyType.MODE) {
schemaKey = JacksonMapper.INSTANCE.readValue(key, ModeKey.class);
} else if (keyType == SchemaRegistryKeyType.NOOP) {
schemaKey = JacksonMapper.INSTANCE.readValue(key, NoopKey.class);
} else if (keyType == SchemaRegistryKeyType.CONTEXT) {
schemaKey = JacksonMapper.INSTANCE.readValue(key, ContextKey.class);
} else if (keyType == SchemaRegistryKeyType.DELETE_SUBJECT) {
schemaKey = JacksonMapper.INSTANCE.readValue(key, DeleteSubjectKey.class);
} else if (keyType == SchemaRegistryKeyType.CLEAR_SUBJECT) {
schemaKey = JacksonMapper.INSTANCE.readValue(key, ClearSubjectKey.class);
} else if (keyType == SchemaRegistryKeyType.SCHEMA) {
schemaKey = JacksonMapper.INSTANCE.readValue(key, SchemaKey.class);
validateMagicByte((SchemaKey) schemaKey);
}
} catch (JsonProcessingException e) {
String type = "unknown";
if (keyType != null) {
type = keyType.name();
}
throw new SerializationException("Failed to deserialize " + type + " key", e);
}
} catch (IOException e) {
throw new SerializationException("Error while deserializing schema key", e);
}
return schemaKey;
}
use of io.confluent.kafka.schemaregistry.storage.exceptions.SerializationException in project schema-registry by confluentinc.
the class SchemaRegistrySerializer method deserializeValue.
/**
* @param key Typed key corresponding to this value
* @param value Bytes of the serialized value
* @return Typed deserialized value. Must be one of
* {@link io.confluent.kafka.schemaregistry.storage.ConfigValue}
* or {@link io.confluent.kafka.schemaregistry.storage.ModeValue}
* or {@link io.confluent.kafka.schemaregistry.storage.SchemaValue}
* or {@link io.confluent.kafka.schemaregistry.storage.DeleteSubjectValue}
* or {@link io.confluent.kafka.schemaregistry.storage.ClearSubjectValue}
*/
@Override
public SchemaRegistryValue deserializeValue(SchemaRegistryKey key, byte[] value) throws SerializationException {
SchemaRegistryValue schemaRegistryValue = null;
if (key.getKeyType().equals(SchemaRegistryKeyType.CONFIG)) {
try {
schemaRegistryValue = JacksonMapper.INSTANCE.readValue(value, ConfigValue.class);
} catch (IOException e) {
throw new SerializationException("Error while deserializing config", e);
}
} else if (key.getKeyType().equals(SchemaRegistryKeyType.MODE)) {
try {
schemaRegistryValue = JacksonMapper.INSTANCE.readValue(value, ModeValue.class);
} catch (IOException e) {
throw new SerializationException("Error while deserializing schema", e);
}
} else if (key.getKeyType().equals(SchemaRegistryKeyType.SCHEMA)) {
try {
validateMagicByte((SchemaKey) key);
schemaRegistryValue = JacksonMapper.INSTANCE.readValue(value, SchemaValue.class);
} catch (IOException e) {
throw new SerializationException("Error while deserializing schema", e);
}
} else if (key.getKeyType().equals(SchemaRegistryKeyType.CONTEXT)) {
try {
schemaRegistryValue = JacksonMapper.INSTANCE.readValue(value, ContextValue.class);
} catch (IOException e) {
throw new SerializationException("Error while deserializing Delete Subject message", e);
}
} else if (key.getKeyType().equals(SchemaRegistryKeyType.DELETE_SUBJECT)) {
try {
schemaRegistryValue = JacksonMapper.INSTANCE.readValue(value, DeleteSubjectValue.class);
} catch (IOException e) {
throw new SerializationException("Error while deserializing Delete Subject message", e);
}
} else if (key.getKeyType().equals(SchemaRegistryKeyType.CLEAR_SUBJECT)) {
try {
schemaRegistryValue = JacksonMapper.INSTANCE.readValue(value, ClearSubjectValue.class);
} catch (IOException e) {
throw new SerializationException("Error while deserializing Clear Subject message", e);
}
} else {
throw new SerializationException("Unrecognized key type. Must be one of schema or config");
}
return schemaRegistryValue;
}
use of io.confluent.kafka.schemaregistry.storage.exceptions.SerializationException in project schema-registry by confluentinc.
the class KafkaStore method put.
@Override
public V put(K key, V value) throws StoreTimeoutException, StoreException {
assertInitialized();
if (key == null) {
throw new StoreException("Key should not be null");
}
V oldValue = get(key);
// write to the Kafka topic
ProducerRecord<byte[], byte[]> producerRecord = null;
try {
producerRecord = new ProducerRecord<byte[], byte[]>(topic, 0, this.serializer.serializeKey(key), value == null ? null : this.serializer.serializeValue(value));
} catch (SerializationException e) {
throw new StoreException("Error serializing schema while creating the Kafka produce " + "record", e);
}
boolean knownSuccessfulWrite = false;
try {
log.trace("Sending record to KafkaStore topic: " + producerRecord);
Future<RecordMetadata> ack = producer.send(producerRecord);
RecordMetadata recordMetadata = ack.get(timeout, TimeUnit.MILLISECONDS);
log.trace("Waiting for the local store to catch up to offset " + recordMetadata.offset());
this.lastWrittenOffset = recordMetadata.offset();
if (key instanceof SubjectKey) {
setLastOffset(((SubjectKey) key).getSubject(), recordMetadata.offset());
}
waitUntilKafkaReaderReachesOffset(recordMetadata.offset(), timeout);
knownSuccessfulWrite = true;
} catch (InterruptedException e) {
throw new StoreException("Put operation interrupted while waiting for an ack from Kafka", e);
} catch (ExecutionException e) {
throw new StoreException("Put operation failed while waiting for an ack from Kafka", e);
} catch (TimeoutException e) {
throw new StoreTimeoutException("Put operation timed out while waiting for an ack from Kafka", e);
} catch (KafkaException ke) {
throw new StoreException("Put operation to Kafka failed", ke);
} finally {
if (!knownSuccessfulWrite) {
markLastWrittenOffsetInvalid();
}
}
return oldValue;
}
Aggregations