Search in sources :

Example 11 with Headers

use of org.apache.kafka.connect.header.Headers in project apache-kafka-on-k8s by banzaicloud.

the class SinkRecordTest method shouldCreateSinkRecordWithHeaders.

@Test
public void shouldCreateSinkRecordWithHeaders() {
    Headers headers = new ConnectHeaders().addString("h1", "hv1").addBoolean("h2", true);
    record = new SinkRecord(TOPIC_NAME, PARTITION_NUMBER, Schema.STRING_SCHEMA, "key", Schema.BOOLEAN_SCHEMA, false, KAFKA_OFFSET, KAFKA_TIMESTAMP, TS_TYPE, headers);
    assertNotNull(record.headers());
    assertSame(headers, record.headers());
    assertFalse(record.headers().isEmpty());
}
Also used : ConnectHeaders(org.apache.kafka.connect.header.ConnectHeaders) ConnectHeaders(org.apache.kafka.connect.header.ConnectHeaders) Headers(org.apache.kafka.connect.header.Headers) Test(org.junit.Test)

Example 12 with Headers

use of org.apache.kafka.connect.header.Headers in project apache-kafka-on-k8s by banzaicloud.

the class SinkRecordTest method shouldDuplicateRecordUsingNewHeaders.

@Test
public void shouldDuplicateRecordUsingNewHeaders() {
    Headers newHeaders = new ConnectHeaders().addString("h3", "hv3");
    SinkRecord duplicate = record.newRecord(TOPIC_NAME, PARTITION_NUMBER, Schema.STRING_SCHEMA, "key", Schema.BOOLEAN_SCHEMA, false, KAFKA_TIMESTAMP, newHeaders);
    assertEquals(TOPIC_NAME, duplicate.topic());
    assertEquals(PARTITION_NUMBER, duplicate.kafkaPartition());
    assertEquals(Schema.STRING_SCHEMA, duplicate.keySchema());
    assertEquals("key", duplicate.key());
    assertEquals(Schema.BOOLEAN_SCHEMA, duplicate.valueSchema());
    assertEquals(false, duplicate.value());
    assertEquals(KAFKA_OFFSET, duplicate.kafkaOffset());
    assertEquals(KAFKA_TIMESTAMP, duplicate.timestamp());
    assertEquals(TS_TYPE, duplicate.timestampType());
    assertNotNull(duplicate.headers());
    assertEquals(newHeaders, duplicate.headers());
    assertSame(newHeaders, duplicate.headers());
    assertNotSame(record.headers(), duplicate.headers());
    assertNotEquals(record.headers(), duplicate.headers());
}
Also used : ConnectHeaders(org.apache.kafka.connect.header.ConnectHeaders) ConnectHeaders(org.apache.kafka.connect.header.ConnectHeaders) Headers(org.apache.kafka.connect.header.Headers) Test(org.junit.Test)

Example 13 with Headers

use of org.apache.kafka.connect.header.Headers in project apache-kafka-on-k8s by banzaicloud.

the class SourceRecordTest method shouldCreateSinkRecordWithHeaders.

@Test
public void shouldCreateSinkRecordWithHeaders() {
    Headers headers = new ConnectHeaders().addString("h1", "hv1").addBoolean("h2", true);
    record = new SourceRecord(SOURCE_PARTITION, SOURCE_OFFSET, TOPIC_NAME, PARTITION_NUMBER, Schema.STRING_SCHEMA, "key", Schema.BOOLEAN_SCHEMA, false, KAFKA_TIMESTAMP, headers);
    assertNotNull(record.headers());
    assertSame(headers, record.headers());
    assertFalse(record.headers().isEmpty());
}
Also used : ConnectHeaders(org.apache.kafka.connect.header.ConnectHeaders) ConnectHeaders(org.apache.kafka.connect.header.ConnectHeaders) Headers(org.apache.kafka.connect.header.Headers) Test(org.junit.Test)

Example 14 with Headers

use of org.apache.kafka.connect.header.Headers in project kafka by apache.

the class WorkerSinkTask method convertAndTransformRecord.

private SinkRecord convertAndTransformRecord(final ConsumerRecord<byte[], byte[]> msg) {
    SchemaAndValue keyAndSchema = retryWithToleranceOperator.execute(() -> keyConverter.toConnectData(msg.topic(), msg.headers(), msg.key()), Stage.KEY_CONVERTER, keyConverter.getClass());
    SchemaAndValue valueAndSchema = retryWithToleranceOperator.execute(() -> valueConverter.toConnectData(msg.topic(), msg.headers(), msg.value()), Stage.VALUE_CONVERTER, valueConverter.getClass());
    Headers headers = retryWithToleranceOperator.execute(() -> convertHeadersFor(msg), Stage.HEADER_CONVERTER, headerConverter.getClass());
    if (retryWithToleranceOperator.failed()) {
        return null;
    }
    Long timestamp = ConnectUtils.checkAndConvertTimestamp(msg.timestamp());
    SinkRecord origRecord = new SinkRecord(msg.topic(), msg.partition(), keyAndSchema.schema(), keyAndSchema.value(), valueAndSchema.schema(), valueAndSchema.value(), msg.offset(), timestamp, msg.timestampType(), headers);
    log.trace("{} Applying transformations to record in topic '{}' partition {} at offset {} and timestamp {} with key {} and value {}", this, msg.topic(), msg.partition(), msg.offset(), timestamp, keyAndSchema.value(), valueAndSchema.value());
    if (isTopicTrackingEnabled) {
        recordActiveTopic(origRecord.topic());
    }
    // Apply the transformations
    SinkRecord transformedRecord = transformationChain.apply(origRecord);
    if (transformedRecord == null) {
        return null;
    }
    // Error reporting will need to correlate each sink record with the original consumer record
    return new InternalSinkRecord(msg, transformedRecord);
}
Also used : ConnectHeaders(org.apache.kafka.connect.header.ConnectHeaders) Headers(org.apache.kafka.connect.header.Headers) SinkRecord(org.apache.kafka.connect.sink.SinkRecord) SchemaAndValue(org.apache.kafka.connect.data.SchemaAndValue)

Example 15 with Headers

use of org.apache.kafka.connect.header.Headers in project kafka by apache.

the class WorkerSinkTask method convertHeadersFor.

private Headers convertHeadersFor(ConsumerRecord<byte[], byte[]> record) {
    Headers result = new ConnectHeaders();
    org.apache.kafka.common.header.Headers recordHeaders = record.headers();
    if (recordHeaders != null) {
        String topic = record.topic();
        for (org.apache.kafka.common.header.Header recordHeader : recordHeaders) {
            SchemaAndValue schemaAndValue = headerConverter.toConnectHeader(topic, recordHeader.key(), recordHeader.value());
            result.add(recordHeader.key(), schemaAndValue);
        }
    }
    return result;
}
Also used : ConnectHeaders(org.apache.kafka.connect.header.ConnectHeaders) ConnectHeaders(org.apache.kafka.connect.header.ConnectHeaders) Headers(org.apache.kafka.connect.header.Headers) SchemaAndValue(org.apache.kafka.connect.data.SchemaAndValue)

Aggregations

Headers (org.apache.kafka.connect.header.Headers)22 ConnectHeaders (org.apache.kafka.connect.header.ConnectHeaders)17 Test (org.junit.jupiter.api.Test)7 SourceRecord (org.apache.kafka.connect.source.SourceRecord)5 SchemaAndValue (org.apache.kafka.connect.data.SchemaAndValue)4 Test (org.junit.Test)4 TopicPartition (org.apache.kafka.common.TopicPartition)2 RecordHeaders (org.apache.kafka.common.header.internals.RecordHeaders)2 Header (org.apache.kafka.connect.header.Header)2 SinkRecord (org.apache.kafka.connect.sink.SinkRecord)2 HashMap (java.util.HashMap)1 OffsetAndMetadata (org.apache.kafka.clients.consumer.OffsetAndMetadata)1 Field (org.apache.kafka.connect.data.Field)1 Schema (org.apache.kafka.connect.data.Schema)1 Struct (org.apache.kafka.connect.data.Struct)1 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)1 MethodSource (org.junit.jupiter.params.provider.MethodSource)1