use of org.apache.kafka.connect.runtime.InternalSinkRecord in project kafka by apache.
the class WorkerErrantRecordReporter method report.
@Override
public Future<Void> report(SinkRecord record, Throwable error) {
ConsumerRecord<byte[], byte[]> consumerRecord;
// report modified or new records, so handle both cases
if (record instanceof InternalSinkRecord) {
consumerRecord = ((InternalSinkRecord) record).originalRecord();
} else {
// Generate a new consumer record from the modified sink record. We prefer
// to send the original consumer record (pre-transformed) to the DLQ,
// but in this case we don't have one and send the potentially transformed
// record instead
String topic = record.topic();
byte[] key = keyConverter.fromConnectData(topic, record.keySchema(), record.key());
byte[] value = valueConverter.fromConnectData(topic, record.valueSchema(), record.value());
RecordHeaders headers = new RecordHeaders();
if (record.headers() != null) {
for (Header header : record.headers()) {
String headerKey = header.key();
byte[] rawHeader = headerConverter.fromConnectHeader(topic, headerKey, header.schema(), header.value());
headers.add(headerKey, rawHeader);
}
}
int keyLength = key != null ? key.length : -1;
int valLength = value != null ? value.length : -1;
consumerRecord = new ConsumerRecord<>(record.topic(), record.kafkaPartition(), record.kafkaOffset(), record.timestamp(), record.timestampType(), keyLength, valLength, key, value, headers, Optional.empty());
}
Future<Void> future = retryWithToleranceOperator.executeFailed(Stage.TASK_PUT, SinkTask.class, consumerRecord, error);
if (!future.isDone()) {
TopicPartition partition = new TopicPartition(consumerRecord.topic(), consumerRecord.partition());
futures.computeIfAbsent(partition, p -> new ArrayList<>()).add(future);
}
return future;
}
Aggregations