use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.
the class WorkerErrantRecordReporter method report.
@Override
public Future<Void> report(SinkRecord record, Throwable error) {
ConsumerRecord<byte[], byte[]> consumerRecord;
// report modified or new records, so handle both cases
if (record instanceof InternalSinkRecord) {
consumerRecord = ((InternalSinkRecord) record).originalRecord();
} else {
// Generate a new consumer record from the modified sink record. We prefer
// to send the original consumer record (pre-transformed) to the DLQ,
// but in this case we don't have one and send the potentially transformed
// record instead
String topic = record.topic();
byte[] key = keyConverter.fromConnectData(topic, record.keySchema(), record.key());
byte[] value = valueConverter.fromConnectData(topic, record.valueSchema(), record.value());
RecordHeaders headers = new RecordHeaders();
if (record.headers() != null) {
for (Header header : record.headers()) {
String headerKey = header.key();
byte[] rawHeader = headerConverter.fromConnectHeader(topic, headerKey, header.schema(), header.value());
headers.add(headerKey, rawHeader);
}
}
int keyLength = key != null ? key.length : -1;
int valLength = value != null ? value.length : -1;
consumerRecord = new ConsumerRecord<>(record.topic(), record.kafkaPartition(), record.kafkaOffset(), record.timestamp(), record.timestampType(), keyLength, valLength, key, value, headers, Optional.empty());
}
Future<Void> future = retryWithToleranceOperator.executeFailed(Stage.TASK_PUT, SinkTask.class, consumerRecord, error);
if (!future.isDone()) {
TopicPartition partition = new TopicPartition(consumerRecord.topic(), consumerRecord.partition());
futures.computeIfAbsent(partition, p -> new ArrayList<>()).add(future);
}
return future;
}
use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.
the class MockConsumerInterceptor method onConsume.
@Override
public ConsumerRecords<String, String> onConsume(ConsumerRecords<String, String> records) {
// This will ensure that we get the cluster metadata when onConsume is called for the first time
// as subsequent compareAndSet operations will fail.
CLUSTER_ID_BEFORE_ON_CONSUME.compareAndSet(NO_CLUSTER_ID, CLUSTER_META.get());
Map<TopicPartition, List<ConsumerRecord<String, String>>> recordMap = new HashMap<>();
for (TopicPartition tp : records.partitions()) {
List<ConsumerRecord<String, String>> lst = new ArrayList<>();
for (ConsumerRecord<String, String> record : records.records(tp)) {
lst.add(new ConsumerRecord<>(record.topic(), record.partition(), record.offset(), record.timestamp(), record.timestampType(), record.serializedKeySize(), record.serializedValueSize(), record.key(), record.value().toUpperCase(Locale.ROOT), new RecordHeaders(), Optional.empty()));
}
recordMap.put(tp, lst);
}
return new ConsumerRecords<>(recordMap);
}
use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.
the class WorkerSinkTaskThreadedTest method expectOnePoll.
@SuppressWarnings("unchecked")
private IExpectationSetters<Object> expectOnePoll() {
// Currently the SinkTask's put() method will not be invoked unless we provide some data, so instead of
// returning empty data, we return one record. The expectation is that the data will be ignored by the
// response behavior specified using the return value of this method.
EasyMock.expect(consumer.poll(Duration.ofMillis(EasyMock.anyLong()))).andAnswer(() -> {
// "Sleep" so time will progress
time.sleep(1L);
ConsumerRecords<byte[], byte[]> records = new ConsumerRecords<>(Collections.singletonMap(new TopicPartition(TOPIC, PARTITION), Arrays.asList(new ConsumerRecord<>(TOPIC, PARTITION, FIRST_OFFSET + recordsReturned, TIMESTAMP, TIMESTAMP_TYPE, 0, 0, RAW_KEY, RAW_VALUE, new RecordHeaders(), Optional.empty()))));
recordsReturned++;
return records;
});
EasyMock.expect(keyConverter.toConnectData(TOPIC, emptyHeaders(), RAW_KEY)).andReturn(new SchemaAndValue(KEY_SCHEMA, KEY));
EasyMock.expect(valueConverter.toConnectData(TOPIC, emptyHeaders(), RAW_VALUE)).andReturn(new SchemaAndValue(VALUE_SCHEMA, VALUE));
sinkTask.put(EasyMock.anyObject(Collection.class));
return EasyMock.expectLastCall();
}
use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.
the class WorkerSinkTaskThreadedTest method expectRebalanceDuringPoll.
@SuppressWarnings("unchecked")
private IExpectationSetters<Object> expectRebalanceDuringPoll() {
final List<TopicPartition> partitions = Arrays.asList(TOPIC_PARTITION, TOPIC_PARTITION2, TOPIC_PARTITION3);
final long startOffset = 40L;
final Map<TopicPartition, Long> offsets = new HashMap<>();
offsets.put(TOPIC_PARTITION, startOffset);
EasyMock.expect(consumer.poll(Duration.ofMillis(EasyMock.anyLong()))).andAnswer(() -> {
// "Sleep" so time will progress
time.sleep(1L);
sinkTaskContext.getValue().offset(offsets);
rebalanceListener.getValue().onPartitionsAssigned(partitions);
ConsumerRecords<byte[], byte[]> records = new ConsumerRecords<>(Collections.singletonMap(new TopicPartition(TOPIC, PARTITION), Arrays.asList(new ConsumerRecord<>(TOPIC, PARTITION, FIRST_OFFSET + recordsReturned, TIMESTAMP, TIMESTAMP_TYPE, 0, 0, RAW_KEY, RAW_VALUE, new RecordHeaders(), Optional.empty()))));
recordsReturned++;
return records;
});
EasyMock.expect(consumer.position(TOPIC_PARTITION)).andReturn(FIRST_OFFSET);
EasyMock.expect(consumer.position(TOPIC_PARTITION2)).andReturn(FIRST_OFFSET);
EasyMock.expect(consumer.position(TOPIC_PARTITION3)).andReturn(FIRST_OFFSET);
sinkTask.open(partitions);
EasyMock.expectLastCall();
consumer.seek(TOPIC_PARTITION, startOffset);
EasyMock.expectLastCall();
EasyMock.expect(keyConverter.toConnectData(TOPIC, emptyHeaders(), RAW_KEY)).andReturn(new SchemaAndValue(KEY_SCHEMA, KEY));
EasyMock.expect(valueConverter.toConnectData(TOPIC, emptyHeaders(), RAW_VALUE)).andReturn(new SchemaAndValue(VALUE_SCHEMA, VALUE));
sinkTask.put(EasyMock.anyObject(Collection.class));
return EasyMock.expectLastCall();
}
use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.
the class Fetcher method parseRecord.
/**
* Parse the record entry, deserializing the key / value fields if necessary
*/
private ConsumerRecord<K, V> parseRecord(TopicPartition partition, RecordBatch batch, Record record) {
try {
long offset = record.offset();
long timestamp = record.timestamp();
Optional<Integer> leaderEpoch = maybeLeaderEpoch(batch.partitionLeaderEpoch());
TimestampType timestampType = batch.timestampType();
Headers headers = new RecordHeaders(record.headers());
ByteBuffer keyBytes = record.key();
byte[] keyByteArray = keyBytes == null ? null : Utils.toArray(keyBytes);
K key = keyBytes == null ? null : this.keyDeserializer.deserialize(partition.topic(), headers, keyByteArray);
ByteBuffer valueBytes = record.value();
byte[] valueByteArray = valueBytes == null ? null : Utils.toArray(valueBytes);
V value = valueBytes == null ? null : this.valueDeserializer.deserialize(partition.topic(), headers, valueByteArray);
return new ConsumerRecord<>(partition.topic(), partition.partition(), offset, timestamp, timestampType, keyByteArray == null ? ConsumerRecord.NULL_SIZE : keyByteArray.length, valueByteArray == null ? ConsumerRecord.NULL_SIZE : valueByteArray.length, key, value, headers, leaderEpoch);
} catch (RuntimeException e) {
throw new RecordDeserializationException(partition, record.offset(), "Error deserializing key/value for partition " + partition + " at offset " + record.offset() + ". If needed, please seek past the record to continue consumption.", e);
}
}
Aggregations