use of org.apache.kafka.common.header.internals.RecordHeaders in project apache-kafka-on-k8s by banzaicloud.
the class WorkerSourceTask method convertHeaderFor.
private RecordHeaders convertHeaderFor(SourceRecord record) {
Headers headers = record.headers();
RecordHeaders result = new RecordHeaders();
if (headers != null) {
String topic = record.topic();
for (Header header : headers) {
String key = header.key();
byte[] rawHeader = headerConverter.fromConnectHeader(topic, key, header.schema(), header.value());
result.add(key, rawHeader);
}
}
return result;
}
use of org.apache.kafka.common.header.internals.RecordHeaders in project apache-kafka-on-k8s by banzaicloud.
the class WorkerSourceTask method sendRecords.
/**
* Try to send a batch of records. If a send fails and is retriable, this saves the remainder of the batch so it can
* be retried after backing off. If a send fails and is not retriable, this will throw a ConnectException.
* @return true if all messages were sent, false if some need to be retried
*/
private boolean sendRecords() {
int processed = 0;
recordBatch(toSend.size());
final SourceRecordWriteCounter counter = new SourceRecordWriteCounter(toSend.size(), sourceTaskMetricsGroup);
for (final SourceRecord preTransformRecord : toSend) {
final SourceRecord record = transformationChain.apply(preTransformRecord);
if (record == null) {
counter.skipRecord();
commitTaskRecord(preTransformRecord);
continue;
}
RecordHeaders headers = convertHeaderFor(record);
byte[] key = keyConverter.fromConnectData(record.topic(), record.keySchema(), record.key());
byte[] value = valueConverter.fromConnectData(record.topic(), record.valueSchema(), record.value());
final ProducerRecord<byte[], byte[]> producerRecord = new ProducerRecord<>(record.topic(), record.kafkaPartition(), ConnectUtils.checkAndConvertTimestamp(record.timestamp()), key, value, headers);
log.trace("{} Appending record with key {}, value {}", this, record.key(), record.value());
// messages and update the offsets.
synchronized (this) {
if (!lastSendFailed) {
if (!flushing) {
outstandingMessages.put(producerRecord, producerRecord);
} else {
outstandingMessagesBacklog.put(producerRecord, producerRecord);
}
// Offsets are converted & serialized in the OffsetWriter
offsetWriter.offset(record.sourcePartition(), record.sourceOffset());
}
}
try {
final String topic = producerRecord.topic();
producer.send(producerRecord, new Callback() {
@Override
public void onCompletion(RecordMetadata recordMetadata, Exception e) {
if (e != null) {
// Given the default settings for zero data loss, this should basically never happen --
// between "infinite" retries, indefinite blocking on full buffers, and "infinite" request
// timeouts, callbacks with exceptions should never be invoked in practice. If the
// user overrode these settings, the best we can do is notify them of the failure via
// logging.
log.error("{} failed to send record to {}: {}", this, topic, e);
log.debug("{} Failed record: {}", this, preTransformRecord);
} else {
log.trace("{} Wrote record successfully: topic {} partition {} offset {}", this, recordMetadata.topic(), recordMetadata.partition(), recordMetadata.offset());
commitTaskRecord(preTransformRecord);
}
recordSent(producerRecord);
counter.completeRecord();
}
});
lastSendFailed = false;
} catch (RetriableException e) {
log.warn("{} Failed to send {}, backing off before retrying:", this, producerRecord, e);
toSend = toSend.subList(processed, toSend.size());
lastSendFailed = true;
counter.retryRemaining();
return false;
} catch (KafkaException e) {
throw new ConnectException("Unrecoverable exception trying to send", e);
}
processed++;
}
toSend = null;
return true;
}
use of org.apache.kafka.common.header.internals.RecordHeaders in project apache-kafka-on-k8s by banzaicloud.
the class ConsumerRecordTest method testNullChecksumInConstructor.
@Test
@SuppressWarnings("deprecation")
public void testNullChecksumInConstructor() {
String key = "key";
String value = "value";
long timestamp = 242341324L;
ConsumerRecord<String, String> record = new ConsumerRecord<>("topic", 0, 23L, timestamp, TimestampType.CREATE_TIME, null, key.length(), value.length(), key, value, new RecordHeaders());
assertEquals(DefaultRecord.computePartialChecksum(timestamp, key.length(), value.length()), record.checksum());
}
use of org.apache.kafka.common.header.internals.RecordHeaders in project apache-kafka-on-k8s by banzaicloud.
the class SourceNodeTest method shouldProvideTopicHeadersAndDataToValueDeserializer.
@Test
public void shouldProvideTopicHeadersAndDataToValueDeserializer() {
final SourceNode<String, String> sourceNode = new MockSourceNode<>(new String[] { "" }, new TheExtendedDeserializer(), new TheExtendedDeserializer());
final RecordHeaders headers = new RecordHeaders();
final String deserializedValue = sourceNode.deserializeValue("topic", headers, "data".getBytes(StandardCharsets.UTF_8));
assertThat(deserializedValue, is("topic" + headers + "data"));
}
use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.
the class WorkerSourceTask method convertTransformedRecord.
/**
* Convert the source record into a producer record.
*
* @param record the transformed record
* @return the producer record which can sent over to Kafka. A null is returned if the input is null or
* if an error was encountered during any of the converter stages.
*/
private ProducerRecord<byte[], byte[]> convertTransformedRecord(SourceRecord record) {
if (record == null) {
return null;
}
RecordHeaders headers = retryWithToleranceOperator.execute(() -> convertHeaderFor(record), Stage.HEADER_CONVERTER, headerConverter.getClass());
byte[] key = retryWithToleranceOperator.execute(() -> keyConverter.fromConnectData(record.topic(), headers, record.keySchema(), record.key()), Stage.KEY_CONVERTER, keyConverter.getClass());
byte[] value = retryWithToleranceOperator.execute(() -> valueConverter.fromConnectData(record.topic(), headers, record.valueSchema(), record.value()), Stage.VALUE_CONVERTER, valueConverter.getClass());
if (retryWithToleranceOperator.failed()) {
return null;
}
return new ProducerRecord<>(record.topic(), record.kafkaPartition(), ConnectUtils.checkAndConvertTimestamp(record.timestamp()), key, value, headers);
}
Aggregations