use of org.apache.kafka.connect.header.Headers in project kafka by apache.
the class HeaderFromTest method withSchema.
@ParameterizedTest
@MethodSource("data")
public void withSchema(String description, boolean keyTransform, RecordBuilder originalBuilder, List<String> transformFields, List<String> headers1, HeaderFrom.Operation operation, RecordBuilder expectedBuilder) {
HeaderFrom<SourceRecord> xform = keyTransform ? new HeaderFrom.Key<>() : new HeaderFrom.Value<>();
xform.configure(config(headers1, transformFields, operation));
ConnectHeaders headers = new ConnectHeaders();
headers.addString("existing", "existing-value");
Headers expect = headers.duplicate();
for (int i = 0; i < headers1.size(); i++) {
expect.add(headers1.get(i), originalBuilder.fieldValues.get(i), originalBuilder.fieldSchemas.get(i));
}
SourceRecord originalRecord = originalBuilder.withSchema(keyTransform);
SourceRecord expectedRecord = expectedBuilder.withSchema(keyTransform);
SourceRecord xformed = xform.apply(originalRecord);
assertSameRecord(expectedRecord, xformed);
}
use of org.apache.kafka.connect.header.Headers in project kafka by apache.
the class HeaderFrom method applySchemaless.
private R applySchemaless(R record, Object operatingValue) {
Headers updatedHeaders = record.headers().duplicate();
Map<String, Object> value = Requirements.requireMap(operatingValue, "header " + operation);
Map<String, Object> updatedValue = new HashMap<>(value);
for (int i = 0; i < fields.size(); i++) {
String fieldName = fields.get(i);
Object fieldValue = value.get(fieldName);
String headerName = headers.get(i);
if (operation == Operation.MOVE) {
updatedValue.remove(fieldName);
}
updatedHeaders.add(headerName, fieldValue, null);
}
return newRecord(record, null, updatedValue, updatedHeaders);
}
use of org.apache.kafka.connect.header.Headers in project kafka by apache.
the class InsertHeaderTest method insertionWithExistingSameHeader.
@Test
public void insertionWithExistingSameHeader() {
xform.configure(config("existing", "inserted-value"));
ConnectHeaders headers = new ConnectHeaders();
headers.addString("existing", "preexisting-value");
Headers expect = headers.duplicate().addString("existing", "inserted-value");
SourceRecord original = sourceRecord(headers);
SourceRecord xformed = xform.apply(original);
assertNonHeaders(original, xformed);
assertEquals(expect, xformed.headers());
}
use of org.apache.kafka.connect.header.Headers in project kafka by apache.
the class InsertHeaderTest method insertionWithExistingOtherHeader.
@Test
public void insertionWithExistingOtherHeader() {
xform.configure(config("inserted", "inserted-value"));
ConnectHeaders headers = new ConnectHeaders();
headers.addString("existing", "existing-value");
Headers expect = headers.duplicate().addString("inserted", "inserted-value");
SourceRecord original = sourceRecord(headers);
SourceRecord xformed = xform.apply(original);
assertNonHeaders(original, xformed);
assertEquals(expect, xformed.headers());
}
use of org.apache.kafka.connect.header.Headers in project apache-kafka-on-k8s by banzaicloud.
the class WorkerSinkTask method convertMessages.
private void convertMessages(ConsumerRecords<byte[], byte[]> msgs) {
origOffsets.clear();
for (ConsumerRecord<byte[], byte[]> msg : msgs) {
log.trace("{} Consuming and converting message in topic '{}' partition {} at offset {} and timestamp {}", this, msg.topic(), msg.partition(), msg.offset(), msg.timestamp());
SchemaAndValue keyAndSchema = keyConverter.toConnectData(msg.topic(), msg.key());
SchemaAndValue valueAndSchema = valueConverter.toConnectData(msg.topic(), msg.value());
Headers headers = convertHeadersFor(msg);
Long timestamp = ConnectUtils.checkAndConvertTimestamp(msg.timestamp());
SinkRecord origRecord = new SinkRecord(msg.topic(), msg.partition(), keyAndSchema.schema(), keyAndSchema.value(), valueAndSchema.schema(), valueAndSchema.value(), msg.offset(), timestamp, msg.timestampType(), headers);
log.trace("{} Applying transformations to record in topic '{}' partition {} at offset {} and timestamp {} with key {} and value {}", this, msg.topic(), msg.partition(), msg.offset(), timestamp, keyAndSchema.value(), valueAndSchema.value());
SinkRecord transRecord = transformationChain.apply(origRecord);
origOffsets.put(new TopicPartition(origRecord.topic(), origRecord.kafkaPartition()), new OffsetAndMetadata(origRecord.kafkaOffset() + 1));
if (transRecord != null) {
messageBatch.add(transRecord);
} else {
log.trace("{} Transformations returned null, so dropping record in topic '{}' partition {} at offset {} and timestamp {} with key {} and value {}", this, msg.topic(), msg.partition(), msg.offset(), timestamp, keyAndSchema.value(), valueAndSchema.value());
}
}
sinkTaskMetricsGroup.recordConsumedOffsets(origOffsets);
}
Aggregations