Search in sources :

Example 1 with SerializationException

use of org.apache.kafka.common.errors.SerializationException in project kafka by apache.

the class KafkaProducer method doSend.

/**
     * Implementation of asynchronously send a record to a topic.
     */
private Future<RecordMetadata> doSend(ProducerRecord<K, V> record, Callback callback) {
    TopicPartition tp = null;
    try {
        // first make sure the metadata for the topic is available
        ClusterAndWaitTime clusterAndWaitTime = waitOnMetadata(record.topic(), record.partition(), maxBlockTimeMs);
        long remainingWaitMs = Math.max(0, maxBlockTimeMs - clusterAndWaitTime.waitedOnMetadataMs);
        Cluster cluster = clusterAndWaitTime.cluster;
        byte[] serializedKey;
        try {
            serializedKey = keySerializer.serialize(record.topic(), record.key());
        } catch (ClassCastException cce) {
            throw new SerializationException("Can't convert key of class " + record.key().getClass().getName() + " to class " + producerConfig.getClass(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG).getName() + " specified in key.serializer");
        }
        byte[] serializedValue;
        try {
            serializedValue = valueSerializer.serialize(record.topic(), record.value());
        } catch (ClassCastException cce) {
            throw new SerializationException("Can't convert value of class " + record.value().getClass().getName() + " to class " + producerConfig.getClass(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG).getName() + " specified in value.serializer");
        }
        int partition = partition(record, serializedKey, serializedValue, cluster);
        int serializedSize = Records.LOG_OVERHEAD + Record.recordSize(serializedKey, serializedValue);
        ensureValidRecordSize(serializedSize);
        tp = new TopicPartition(record.topic(), partition);
        long timestamp = record.timestamp() == null ? time.milliseconds() : record.timestamp();
        log.trace("Sending record {} with callback {} to topic {} partition {}", record, callback, record.topic(), partition);
        // producer callback will make sure to call both 'callback' and interceptor callback
        Callback interceptCallback = this.interceptors == null ? callback : new InterceptorCallback<>(callback, this.interceptors, tp);
        RecordAccumulator.RecordAppendResult result = accumulator.append(tp, timestamp, serializedKey, serializedValue, interceptCallback, remainingWaitMs);
        if (result.batchIsFull || result.newBatchCreated) {
            log.trace("Waking up the sender since topic {} partition {} is either full or getting a new batch", record.topic(), partition);
            this.sender.wakeup();
        }
        return result.future;
    // handling exceptions and record the errors;
    // for API exceptions return them in the future,
    // for other exceptions throw directly
    } catch (ApiException e) {
        log.debug("Exception occurred during message send:", e);
        if (callback != null)
            callback.onCompletion(null, e);
        this.errors.record();
        if (this.interceptors != null)
            this.interceptors.onSendError(record, tp, e);
        return new FutureFailure(e);
    } catch (InterruptedException e) {
        this.errors.record();
        if (this.interceptors != null)
            this.interceptors.onSendError(record, tp, e);
        throw new InterruptException(e);
    } catch (BufferExhaustedException e) {
        this.errors.record();
        this.metrics.sensor("buffer-exhausted-records").record();
        if (this.interceptors != null)
            this.interceptors.onSendError(record, tp, e);
        throw e;
    } catch (KafkaException e) {
        this.errors.record();
        if (this.interceptors != null)
            this.interceptors.onSendError(record, tp, e);
        throw e;
    } catch (Exception e) {
        // we notify interceptor about all exceptions, since onSend is called before anything else in this method
        if (this.interceptors != null)
            this.interceptors.onSendError(record, tp, e);
        throw e;
    }
}
Also used : SerializationException(org.apache.kafka.common.errors.SerializationException) InterruptException(org.apache.kafka.common.errors.InterruptException) Cluster(org.apache.kafka.common.Cluster) RecordAccumulator(org.apache.kafka.clients.producer.internals.RecordAccumulator) SerializationException(org.apache.kafka.common.errors.SerializationException) KafkaException(org.apache.kafka.common.KafkaException) InterruptException(org.apache.kafka.common.errors.InterruptException) TimeoutException(org.apache.kafka.common.errors.TimeoutException) ConfigException(org.apache.kafka.common.config.ConfigException) ExecutionException(java.util.concurrent.ExecutionException) TopicAuthorizationException(org.apache.kafka.common.errors.TopicAuthorizationException) RecordTooLargeException(org.apache.kafka.common.errors.RecordTooLargeException) ApiException(org.apache.kafka.common.errors.ApiException) TopicPartition(org.apache.kafka.common.TopicPartition) KafkaException(org.apache.kafka.common.KafkaException) ApiException(org.apache.kafka.common.errors.ApiException)

Example 2 with SerializationException

use of org.apache.kafka.common.errors.SerializationException in project kafka by apache.

the class FetcherTest method testFetchedRecordsRaisesOnSerializationErrors.

@Test
public void testFetchedRecordsRaisesOnSerializationErrors() {
    // raise an exception from somewhere in the middle of the fetch response
    // so that we can verify that our position does not advance after raising
    ByteArrayDeserializer deserializer = new ByteArrayDeserializer() {

        int i = 0;

        @Override
        public byte[] deserialize(String topic, byte[] data) {
            if (i++ == 1)
                throw new SerializationException();
            return data;
        }
    };
    Fetcher<byte[], byte[]> fetcher = createFetcher(subscriptions, new Metrics(time), deserializer, deserializer);
    subscriptions.assignFromUser(singleton(tp));
    subscriptions.seek(tp, 1);
    client.prepareResponse(matchesOffset(tp, 1), fetchResponse(this.records, Errors.NONE, 100L, 0));
    assertEquals(1, fetcher.sendFetches());
    consumerClient.poll(0);
    try {
        fetcher.fetchedRecords();
        fail("fetchedRecords should have raised");
    } catch (SerializationException e) {
        // the position should not advance since no data has been returned
        assertEquals(1, subscriptions.position(tp).longValue());
    }
}
Also used : Metrics(org.apache.kafka.common.metrics.Metrics) SerializationException(org.apache.kafka.common.errors.SerializationException) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) Test(org.junit.Test)

Example 3 with SerializationException

use of org.apache.kafka.common.errors.SerializationException in project kafka by apache.

the class Fetcher method parseRecord.

/**
     * Parse the record entry, deserializing the key / value fields if necessary
     */
private ConsumerRecord<K, V> parseRecord(TopicPartition partition, LogEntry logEntry) {
    Record record = logEntry.record();
    if (this.checkCrcs) {
        try {
            record.ensureValid();
        } catch (InvalidRecordException e) {
            throw new KafkaException("Record for partition " + partition + " at offset " + logEntry.offset() + " is invalid, cause: " + e.getMessage());
        }
    }
    try {
        long offset = logEntry.offset();
        long timestamp = record.timestamp();
        TimestampType timestampType = record.timestampType();
        ByteBuffer keyBytes = record.key();
        byte[] keyByteArray = keyBytes == null ? null : Utils.toArray(keyBytes);
        K key = keyBytes == null ? null : this.keyDeserializer.deserialize(partition.topic(), keyByteArray);
        ByteBuffer valueBytes = record.value();
        byte[] valueByteArray = valueBytes == null ? null : Utils.toArray(valueBytes);
        V value = valueBytes == null ? null : this.valueDeserializer.deserialize(partition.topic(), valueByteArray);
        return new ConsumerRecord<>(partition.topic(), partition.partition(), offset, timestamp, timestampType, record.checksum(), keyByteArray == null ? ConsumerRecord.NULL_SIZE : keyByteArray.length, valueByteArray == null ? ConsumerRecord.NULL_SIZE : valueByteArray.length, key, value);
    } catch (RuntimeException e) {
        throw new SerializationException("Error deserializing key/value for partition " + partition + " at offset " + logEntry.offset(), e);
    }
}
Also used : SerializationException(org.apache.kafka.common.errors.SerializationException) TimestampType(org.apache.kafka.common.record.TimestampType) Record(org.apache.kafka.common.record.Record) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) KafkaException(org.apache.kafka.common.KafkaException) InvalidRecordException(org.apache.kafka.common.record.InvalidRecordException) ByteBuffer(java.nio.ByteBuffer) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord)

Example 4 with SerializationException

use of org.apache.kafka.common.errors.SerializationException in project kafka by apache.

the class JsonConverter method toConnectData.

@Override
public SchemaAndValue toConnectData(String topic, byte[] value) {
    JsonNode jsonValue;
    try {
        jsonValue = deserializer.deserialize(topic, value);
    } catch (SerializationException e) {
        throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e);
    }
    if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload")))
        throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration.");
    // was stripped during serialization and we need to fill in an all-encompassing schema.
    if (!enableSchemas) {
        ObjectNode envelope = JsonNodeFactory.instance.objectNode();
        envelope.set("schema", null);
        envelope.set("payload", jsonValue);
        jsonValue = envelope;
    }
    return jsonToConnect(jsonValue);
}
Also used : DataException(org.apache.kafka.connect.errors.DataException) SerializationException(org.apache.kafka.common.errors.SerializationException) ObjectNode(com.fasterxml.jackson.databind.node.ObjectNode) JsonNode(com.fasterxml.jackson.databind.JsonNode)

Aggregations

SerializationException (org.apache.kafka.common.errors.SerializationException)4 KafkaException (org.apache.kafka.common.KafkaException)2 JsonNode (com.fasterxml.jackson.databind.JsonNode)1 ObjectNode (com.fasterxml.jackson.databind.node.ObjectNode)1 ByteBuffer (java.nio.ByteBuffer)1 ExecutionException (java.util.concurrent.ExecutionException)1 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)1 RecordAccumulator (org.apache.kafka.clients.producer.internals.RecordAccumulator)1 Cluster (org.apache.kafka.common.Cluster)1 TopicPartition (org.apache.kafka.common.TopicPartition)1 ConfigException (org.apache.kafka.common.config.ConfigException)1 ApiException (org.apache.kafka.common.errors.ApiException)1 InterruptException (org.apache.kafka.common.errors.InterruptException)1 RecordTooLargeException (org.apache.kafka.common.errors.RecordTooLargeException)1 TimeoutException (org.apache.kafka.common.errors.TimeoutException)1 TopicAuthorizationException (org.apache.kafka.common.errors.TopicAuthorizationException)1 Metrics (org.apache.kafka.common.metrics.Metrics)1 InvalidRecordException (org.apache.kafka.common.record.InvalidRecordException)1 Record (org.apache.kafka.common.record.Record)1 TimestampType (org.apache.kafka.common.record.TimestampType)1