Search in sources :

Example 1 with RecordHeaders

use of org.apache.kafka.common.header.internals.RecordHeaders in project apache-kafka-on-k8s by banzaicloud.

the class WorkerSourceTask method convertHeaderFor.

private RecordHeaders convertHeaderFor(SourceRecord record) {
    Headers headers = record.headers();
    RecordHeaders result = new RecordHeaders();
    if (headers != null) {
        String topic = record.topic();
        for (Header header : headers) {
            String key = header.key();
            byte[] rawHeader = headerConverter.fromConnectHeader(topic, key, header.schema(), header.value());
            result.add(key, rawHeader);
        }
    }
    return result;
}
Also used : RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) Header(org.apache.kafka.connect.header.Header) Headers(org.apache.kafka.connect.header.Headers) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders)

Example 2 with RecordHeaders

use of org.apache.kafka.common.header.internals.RecordHeaders in project apache-kafka-on-k8s by banzaicloud.

the class WorkerSourceTask method sendRecords.

/**
 * Try to send a batch of records. If a send fails and is retriable, this saves the remainder of the batch so it can
 * be retried after backing off. If a send fails and is not retriable, this will throw a ConnectException.
 * @return true if all messages were sent, false if some need to be retried
 */
private boolean sendRecords() {
    int processed = 0;
    recordBatch(toSend.size());
    final SourceRecordWriteCounter counter = new SourceRecordWriteCounter(toSend.size(), sourceTaskMetricsGroup);
    for (final SourceRecord preTransformRecord : toSend) {
        final SourceRecord record = transformationChain.apply(preTransformRecord);
        if (record == null) {
            counter.skipRecord();
            commitTaskRecord(preTransformRecord);
            continue;
        }
        RecordHeaders headers = convertHeaderFor(record);
        byte[] key = keyConverter.fromConnectData(record.topic(), record.keySchema(), record.key());
        byte[] value = valueConverter.fromConnectData(record.topic(), record.valueSchema(), record.value());
        final ProducerRecord<byte[], byte[]> producerRecord = new ProducerRecord<>(record.topic(), record.kafkaPartition(), ConnectUtils.checkAndConvertTimestamp(record.timestamp()), key, value, headers);
        log.trace("{} Appending record with key {}, value {}", this, record.key(), record.value());
        // messages and update the offsets.
        synchronized (this) {
            if (!lastSendFailed) {
                if (!flushing) {
                    outstandingMessages.put(producerRecord, producerRecord);
                } else {
                    outstandingMessagesBacklog.put(producerRecord, producerRecord);
                }
                // Offsets are converted & serialized in the OffsetWriter
                offsetWriter.offset(record.sourcePartition(), record.sourceOffset());
            }
        }
        try {
            final String topic = producerRecord.topic();
            producer.send(producerRecord, new Callback() {

                @Override
                public void onCompletion(RecordMetadata recordMetadata, Exception e) {
                    if (e != null) {
                        // Given the default settings for zero data loss, this should basically never happen --
                        // between "infinite" retries, indefinite blocking on full buffers, and "infinite" request
                        // timeouts, callbacks with exceptions should never be invoked in practice. If the
                        // user overrode these settings, the best we can do is notify them of the failure via
                        // logging.
                        log.error("{} failed to send record to {}: {}", this, topic, e);
                        log.debug("{} Failed record: {}", this, preTransformRecord);
                    } else {
                        log.trace("{} Wrote record successfully: topic {} partition {} offset {}", this, recordMetadata.topic(), recordMetadata.partition(), recordMetadata.offset());
                        commitTaskRecord(preTransformRecord);
                    }
                    recordSent(producerRecord);
                    counter.completeRecord();
                }
            });
            lastSendFailed = false;
        } catch (RetriableException e) {
            log.warn("{} Failed to send {}, backing off before retrying:", this, producerRecord, e);
            toSend = toSend.subList(processed, toSend.size());
            lastSendFailed = true;
            counter.retryRemaining();
            return false;
        } catch (KafkaException e) {
            throw new ConnectException("Unrecoverable exception trying to send", e);
        }
        processed++;
    }
    toSend = null;
    return true;
}
Also used : SourceRecord(org.apache.kafka.connect.source.SourceRecord) KafkaException(org.apache.kafka.common.KafkaException) TimeoutException(java.util.concurrent.TimeoutException) RetriableException(org.apache.kafka.common.errors.RetriableException) ExecutionException(java.util.concurrent.ExecutionException) ConnectException(org.apache.kafka.connect.errors.ConnectException) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) Callback(org.apache.kafka.clients.producer.Callback) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) KafkaException(org.apache.kafka.common.KafkaException) RetriableException(org.apache.kafka.common.errors.RetriableException) ConnectException(org.apache.kafka.connect.errors.ConnectException)

Example 3 with RecordHeaders

use of org.apache.kafka.common.header.internals.RecordHeaders in project apache-kafka-on-k8s by banzaicloud.

the class ConsumerRecordTest method testNullChecksumInConstructor.

@Test
@SuppressWarnings("deprecation")
public void testNullChecksumInConstructor() {
    String key = "key";
    String value = "value";
    long timestamp = 242341324L;
    ConsumerRecord<String, String> record = new ConsumerRecord<>("topic", 0, 23L, timestamp, TimestampType.CREATE_TIME, null, key.length(), value.length(), key, value, new RecordHeaders());
    assertEquals(DefaultRecord.computePartialChecksum(timestamp, key.length(), value.length()), record.checksum());
}
Also used : RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) Test(org.junit.Test)

Example 4 with RecordHeaders

use of org.apache.kafka.common.header.internals.RecordHeaders in project apache-kafka-on-k8s by banzaicloud.

the class SourceNodeTest method shouldProvideTopicHeadersAndDataToValueDeserializer.

@Test
public void shouldProvideTopicHeadersAndDataToValueDeserializer() {
    final SourceNode<String, String> sourceNode = new MockSourceNode<>(new String[] { "" }, new TheExtendedDeserializer(), new TheExtendedDeserializer());
    final RecordHeaders headers = new RecordHeaders();
    final String deserializedValue = sourceNode.deserializeValue("topic", headers, "data".getBytes(StandardCharsets.UTF_8));
    assertThat(deserializedValue, is("topic" + headers + "data"));
}
Also used : RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) MockSourceNode(org.apache.kafka.test.MockSourceNode) Test(org.junit.Test)

Example 5 with RecordHeaders

use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.

the class WorkerSourceTask method convertTransformedRecord.

/**
 * Convert the source record into a producer record.
 *
 * @param record the transformed record
 * @return the producer record which can sent over to Kafka. A null is returned if the input is null or
 * if an error was encountered during any of the converter stages.
 */
private ProducerRecord<byte[], byte[]> convertTransformedRecord(SourceRecord record) {
    if (record == null) {
        return null;
    }
    RecordHeaders headers = retryWithToleranceOperator.execute(() -> convertHeaderFor(record), Stage.HEADER_CONVERTER, headerConverter.getClass());
    byte[] key = retryWithToleranceOperator.execute(() -> keyConverter.fromConnectData(record.topic(), headers, record.keySchema(), record.key()), Stage.KEY_CONVERTER, keyConverter.getClass());
    byte[] value = retryWithToleranceOperator.execute(() -> valueConverter.fromConnectData(record.topic(), headers, record.valueSchema(), record.value()), Stage.VALUE_CONVERTER, valueConverter.getClass());
    if (retryWithToleranceOperator.failed()) {
        return null;
    }
    return new ProducerRecord<>(record.topic(), record.kafkaPartition(), ConnectUtils.checkAndConvertTimestamp(record.timestamp()), key, value, headers);
}
Also used : RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord)

Aggregations

RecordHeaders (org.apache.kafka.common.header.internals.RecordHeaders)156 Test (org.junit.Test)111 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)52 ProcessorRecordContext (org.apache.kafka.streams.processor.internals.ProcessorRecordContext)41 Headers (org.apache.kafka.common.header.Headers)34 RecordHeader (org.apache.kafka.common.header.internals.RecordHeader)27 PrepareForTest (org.powermock.core.classloader.annotations.PrepareForTest)24 TopicPartition (org.apache.kafka.common.TopicPartition)22 Position (org.apache.kafka.streams.query.Position)17 ArrayList (java.util.ArrayList)13 Header (org.apache.kafka.common.header.Header)13 HashMap (java.util.HashMap)12 ByteBuffer (java.nio.ByteBuffer)11 Struct (org.apache.kafka.connect.data.Struct)11 Test (org.junit.jupiter.api.Test)11 LinkedHashMap (java.util.LinkedHashMap)9 Bytes (org.apache.kafka.common.utils.Bytes)9 StreamsException (org.apache.kafka.streams.errors.StreamsException)9 ProducerRecord (org.apache.kafka.clients.producer.ProducerRecord)8 Metrics (org.apache.kafka.common.metrics.Metrics)8