use of org.apache.kafka.common.header.internals.RecordHeaders in project apache-kafka-on-k8s by banzaicloud.
the class Fetcher method parseRecord.
/**
* Parse the record entry, deserializing the key / value fields if necessary
*/
private ConsumerRecord<K, V> parseRecord(TopicPartition partition, RecordBatch batch, Record record) {
try {
long offset = record.offset();
long timestamp = record.timestamp();
TimestampType timestampType = batch.timestampType();
Headers headers = new RecordHeaders(record.headers());
ByteBuffer keyBytes = record.key();
byte[] keyByteArray = keyBytes == null ? null : Utils.toArray(keyBytes);
K key = keyBytes == null ? null : this.keyDeserializer.deserialize(partition.topic(), headers, keyByteArray);
ByteBuffer valueBytes = record.value();
byte[] valueByteArray = valueBytes == null ? null : Utils.toArray(valueBytes);
V value = valueBytes == null ? null : this.valueDeserializer.deserialize(partition.topic(), headers, valueByteArray);
return new ConsumerRecord<>(partition.topic(), partition.partition(), offset, timestamp, timestampType, record.checksumOrNull(), keyByteArray == null ? ConsumerRecord.NULL_SIZE : keyByteArray.length, valueByteArray == null ? ConsumerRecord.NULL_SIZE : valueByteArray.length, key, value, headers);
} catch (RuntimeException e) {
throw new SerializationException("Error deserializing key/value for partition " + partition + " at offset " + record.offset() + ". If needed, please seek past the record to continue consumption.", e);
}
}
use of org.apache.kafka.common.header.internals.RecordHeaders in project apache-kafka-on-k8s by banzaicloud.
the class SourceNodeTest method shouldProvideTopicHeadersAndDataToKeyDeserializer.
@Test
public void shouldProvideTopicHeadersAndDataToKeyDeserializer() {
final SourceNode<String, String> sourceNode = new MockSourceNode<>(new String[] { "" }, new TheExtendedDeserializer(), new TheExtendedDeserializer());
final RecordHeaders headers = new RecordHeaders();
final String deserializeKey = sourceNode.deserializeKey("topic", headers, "data".getBytes(StandardCharsets.UTF_8));
assertThat(deserializeKey, is("topic" + headers + "data"));
}
use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.
the class MirrorSourceTaskTest method testSerde.
@Test
public void testSerde() {
byte[] key = new byte[] { 'a', 'b', 'c', 'd', 'e' };
byte[] value = new byte[] { 'f', 'g', 'h', 'i', 'j', 'k' };
Headers headers = new RecordHeaders();
headers.add("header1", new byte[] { 'l', 'm', 'n', 'o' });
headers.add("header2", new byte[] { 'p', 'q', 'r', 's', 't' });
ConsumerRecord<byte[], byte[]> consumerRecord = new ConsumerRecord<>("topic1", 2, 3L, 4L, TimestampType.CREATE_TIME, 5, 6, key, value, headers, Optional.empty());
MirrorSourceTask mirrorSourceTask = new MirrorSourceTask(null, null, "cluster7", new DefaultReplicationPolicy(), 50);
SourceRecord sourceRecord = mirrorSourceTask.convertRecord(consumerRecord);
assertEquals("cluster7.topic1", sourceRecord.topic(), "Failure on cluster7.topic1 consumerRecord serde");
assertEquals(2, sourceRecord.kafkaPartition().intValue(), "sourceRecord kafka partition is incorrect");
assertEquals(new TopicPartition("topic1", 2), MirrorUtils.unwrapPartition(sourceRecord.sourcePartition()), "topic1 unwrapped from sourcePartition is incorrect");
assertEquals(3L, MirrorUtils.unwrapOffset(sourceRecord.sourceOffset()).longValue(), "sourceRecord's sourceOffset is incorrect");
assertEquals(4L, sourceRecord.timestamp().longValue(), "sourceRecord's timestamp is incorrect");
assertEquals(key, sourceRecord.key(), "sourceRecord's key is incorrect");
assertEquals(value, sourceRecord.value(), "sourceRecord's value is incorrect");
assertEquals(headers.lastHeader("header1").value(), sourceRecord.headers().lastWithName("header1").value(), "sourceRecord's header1 is incorrect");
assertEquals(headers.lastHeader("header2").value(), sourceRecord.headers().lastWithName("header2").value(), "sourceRecord's header2 is incorrect");
}
use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.
the class KStreamSessionWindowAggregateProcessorTest method shouldLogAndMeterWhenSkippingLateRecordWithNonzeroGrace.
@Test
public void shouldLogAndMeterWhenSkippingLateRecordWithNonzeroGrace() {
setup(false);
final Processor<String, String, Windowed<String>, Change<Long>> processor = new KStreamSessionWindowAggregate<>(SessionWindows.ofInactivityGapAndGrace(ofMillis(10L), ofMillis(1L)), STORE_NAME, initializer, aggregator, sessionMerger).get();
processor.init(context);
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KStreamSessionWindowAggregate.class)) {
// dummy record to establish stream time = 0
context.setRecordContext(new ProcessorRecordContext(0, -2, -3, "topic", new RecordHeaders()));
processor.process(new Record<>("dummy", "dummy", 0L));
// record arrives on time, should not be skipped
context.setRecordContext(new ProcessorRecordContext(0, -2, -3, "topic", new RecordHeaders()));
processor.process(new Record<>("OnTime1", "1", 0L));
// dummy record to advance stream time = 11, 10 for gap time plus 1 to place at edge of window
context.setRecordContext(new ProcessorRecordContext(11, -2, -3, "topic", new RecordHeaders()));
processor.process(new Record<>("dummy", "dummy", 11L));
// delayed record arrives on time, should not be skipped
context.setRecordContext(new ProcessorRecordContext(0, -2, -3, "topic", new RecordHeaders()));
processor.process(new Record<>("OnTime2", "1", 0L));
// dummy record to advance stream time = 12, 10 for gap time plus 2 to place outside window
context.setRecordContext(new ProcessorRecordContext(12, -2, -3, "topic", new RecordHeaders()));
processor.process(new Record<>("dummy", "dummy", 12L));
// delayed record arrives late
context.setRecordContext(new ProcessorRecordContext(0, -2, -3, "topic", new RecordHeaders()));
processor.process(new Record<>("Late1", "1", 0L));
assertThat(appender.getMessages(), hasItem("Skipping record for expired window." + " topic=[topic] partition=[-3] offset=[-2] timestamp=[0] window=[0,0] expiration=[1] streamTime=[12]"));
}
final MetricName dropTotal;
final MetricName dropRate;
dropTotal = new MetricName("dropped-records-total", "stream-task-metrics", "The total number of dropped records", mkMap(mkEntry("thread-id", threadId), mkEntry("task-id", "0_0")));
dropRate = new MetricName("dropped-records-rate", "stream-task-metrics", "The average number of dropped records per second", mkMap(mkEntry("thread-id", threadId), mkEntry("task-id", "0_0")));
assertThat(metrics.metrics().get(dropTotal).metricValue(), is(1.0));
assertThat((Double) metrics.metrics().get(dropRate).metricValue(), greaterThan(0.0));
}
use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.
the class KTableTransformValuesTest method shouldTransformOnGetIfNotMaterialized.
@Test
public void shouldTransformOnGetIfNotMaterialized() {
final KTableTransformValues<String, String, String> transformValues = new KTableTransformValues<>(parent, new ExclamationValueTransformerSupplier(), null);
expect(parent.valueGetterSupplier()).andReturn(parentGetterSupplier);
expect(parentGetterSupplier.get()).andReturn(parentGetter);
expect(parentGetter.get("Key")).andReturn(ValueAndTimestamp.make("Value", 73L));
final ProcessorRecordContext recordContext = new ProcessorRecordContext(42L, 23L, -1, "foo", new RecordHeaders());
expect(context.recordContext()).andReturn(recordContext);
context.setRecordContext(new ProcessorRecordContext(73L, -1L, -1, null, new RecordHeaders()));
expectLastCall();
context.setRecordContext(recordContext);
expectLastCall();
replay(parent, parentGetterSupplier, parentGetter, context);
final KTableValueGetter<String, String> getter = transformValues.view().get();
getter.init(context);
final String result = getter.get("Key").value();
assertThat(result, is("Key->Value!"));
verify(context);
}
Aggregations