use of org.apache.kafka.connect.header.ConnectHeaders in project apache-kafka-on-k8s by banzaicloud.
the class SourceRecordTest method shouldCreateSinkRecordWithHeaders.
@Test
public void shouldCreateSinkRecordWithHeaders() {
Headers headers = new ConnectHeaders().addString("h1", "hv1").addBoolean("h2", true);
record = new SourceRecord(SOURCE_PARTITION, SOURCE_OFFSET, TOPIC_NAME, PARTITION_NUMBER, Schema.STRING_SCHEMA, "key", Schema.BOOLEAN_SCHEMA, false, KAFKA_TIMESTAMP, headers);
assertNotNull(record.headers());
assertSame(headers, record.headers());
assertFalse(record.headers().isEmpty());
}
use of org.apache.kafka.connect.header.ConnectHeaders in project kafka by apache.
the class WorkerSinkTask method convertHeadersFor.
private Headers convertHeadersFor(ConsumerRecord<byte[], byte[]> record) {
Headers result = new ConnectHeaders();
org.apache.kafka.common.header.Headers recordHeaders = record.headers();
if (recordHeaders != null) {
String topic = record.topic();
for (org.apache.kafka.common.header.Header recordHeader : recordHeaders) {
SchemaAndValue schemaAndValue = headerConverter.toConnectHeader(topic, recordHeader.key(), recordHeader.value());
result.add(recordHeader.key(), schemaAndValue);
}
}
return result;
}
use of org.apache.kafka.connect.header.ConnectHeaders in project kafka by apache.
the class SinkRecordTest method shouldCreateSinkRecordWithHeaders.
@Test
public void shouldCreateSinkRecordWithHeaders() {
Headers headers = new ConnectHeaders().addString("h1", "hv1").addBoolean("h2", true);
record = new SinkRecord(TOPIC_NAME, PARTITION_NUMBER, Schema.STRING_SCHEMA, "key", Schema.BOOLEAN_SCHEMA, false, KAFKA_OFFSET, KAFKA_TIMESTAMP, TS_TYPE, headers);
assertNotNull(record.headers());
assertSame(headers, record.headers());
assertFalse(record.headers().isEmpty());
}
use of org.apache.kafka.connect.header.ConnectHeaders in project kafka by apache.
the class SinkRecordTest method shouldDuplicateRecordUsingNewHeaders.
@Test
public void shouldDuplicateRecordUsingNewHeaders() {
Headers newHeaders = new ConnectHeaders().addString("h3", "hv3");
SinkRecord duplicate = record.newRecord(TOPIC_NAME, PARTITION_NUMBER, Schema.STRING_SCHEMA, "key", Schema.BOOLEAN_SCHEMA, false, KAFKA_TIMESTAMP, newHeaders);
assertEquals(TOPIC_NAME, duplicate.topic());
assertEquals(PARTITION_NUMBER, duplicate.kafkaPartition());
assertEquals(Schema.STRING_SCHEMA, duplicate.keySchema());
assertEquals("key", duplicate.key());
assertEquals(Schema.BOOLEAN_SCHEMA, duplicate.valueSchema());
assertEquals(false, duplicate.value());
assertEquals(KAFKA_OFFSET, duplicate.kafkaOffset());
assertEquals(KAFKA_TIMESTAMP, duplicate.timestamp());
assertEquals(TS_TYPE, duplicate.timestampType());
assertNotNull(duplicate.headers());
assertEquals(newHeaders, duplicate.headers());
assertSame(newHeaders, duplicate.headers());
assertNotSame(record.headers(), duplicate.headers());
assertNotEquals(record.headers(), duplicate.headers());
}
use of org.apache.kafka.connect.header.ConnectHeaders in project kafka by apache.
the class SourceRecordTest method shouldCreateSinkRecordWithHeaders.
@Test
public void shouldCreateSinkRecordWithHeaders() {
Headers headers = new ConnectHeaders().addString("h1", "hv1").addBoolean("h2", true);
record = new SourceRecord(SOURCE_PARTITION, SOURCE_OFFSET, TOPIC_NAME, PARTITION_NUMBER, Schema.STRING_SCHEMA, "key", Schema.BOOLEAN_SCHEMA, false, KAFKA_TIMESTAMP, headers);
assertNotNull(record.headers());
assertSame(headers, record.headers());
assertFalse(record.headers().isEmpty());
}
Aggregations