Search in sources :

Example 86 with RecordHeaders

use of org.apache.kafka.common.header.internals.RecordHeaders in project apache-kafka-on-k8s by banzaicloud.

the class Fetcher method parseRecord.

/**
 * Parse the record entry, deserializing the key / value fields if necessary
 */
private ConsumerRecord<K, V> parseRecord(TopicPartition partition, RecordBatch batch, Record record) {
    try {
        long offset = record.offset();
        long timestamp = record.timestamp();
        TimestampType timestampType = batch.timestampType();
        Headers headers = new RecordHeaders(record.headers());
        ByteBuffer keyBytes = record.key();
        byte[] keyByteArray = keyBytes == null ? null : Utils.toArray(keyBytes);
        K key = keyBytes == null ? null : this.keyDeserializer.deserialize(partition.topic(), headers, keyByteArray);
        ByteBuffer valueBytes = record.value();
        byte[] valueByteArray = valueBytes == null ? null : Utils.toArray(valueBytes);
        V value = valueBytes == null ? null : this.valueDeserializer.deserialize(partition.topic(), headers, valueByteArray);
        return new ConsumerRecord<>(partition.topic(), partition.partition(), offset, timestamp, timestampType, record.checksumOrNull(), keyByteArray == null ? ConsumerRecord.NULL_SIZE : keyByteArray.length, valueByteArray == null ? ConsumerRecord.NULL_SIZE : valueByteArray.length, key, value, headers);
    } catch (RuntimeException e) {
        throw new SerializationException("Error deserializing key/value for partition " + partition + " at offset " + record.offset() + ". If needed, please seek past the record to continue consumption.", e);
    }
}
Also used : RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) SerializationException(org.apache.kafka.common.errors.SerializationException) Headers(org.apache.kafka.common.header.Headers) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) TimestampType(org.apache.kafka.common.record.TimestampType) ByteBuffer(java.nio.ByteBuffer) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord)

Example 87 with RecordHeaders

use of org.apache.kafka.common.header.internals.RecordHeaders in project apache-kafka-on-k8s by banzaicloud.

the class SourceNodeTest method shouldProvideTopicHeadersAndDataToKeyDeserializer.

@Test
public void shouldProvideTopicHeadersAndDataToKeyDeserializer() {
    final SourceNode<String, String> sourceNode = new MockSourceNode<>(new String[] { "" }, new TheExtendedDeserializer(), new TheExtendedDeserializer());
    final RecordHeaders headers = new RecordHeaders();
    final String deserializeKey = sourceNode.deserializeKey("topic", headers, "data".getBytes(StandardCharsets.UTF_8));
    assertThat(deserializeKey, is("topic" + headers + "data"));
}
Also used : RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) MockSourceNode(org.apache.kafka.test.MockSourceNode) Test(org.junit.Test)

Example 88 with RecordHeaders

use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.

the class MirrorSourceTaskTest method testSerde.

@Test
public void testSerde() {
    byte[] key = new byte[] { 'a', 'b', 'c', 'd', 'e' };
    byte[] value = new byte[] { 'f', 'g', 'h', 'i', 'j', 'k' };
    Headers headers = new RecordHeaders();
    headers.add("header1", new byte[] { 'l', 'm', 'n', 'o' });
    headers.add("header2", new byte[] { 'p', 'q', 'r', 's', 't' });
    ConsumerRecord<byte[], byte[]> consumerRecord = new ConsumerRecord<>("topic1", 2, 3L, 4L, TimestampType.CREATE_TIME, 5, 6, key, value, headers, Optional.empty());
    MirrorSourceTask mirrorSourceTask = new MirrorSourceTask(null, null, "cluster7", new DefaultReplicationPolicy(), 50);
    SourceRecord sourceRecord = mirrorSourceTask.convertRecord(consumerRecord);
    assertEquals("cluster7.topic1", sourceRecord.topic(), "Failure on cluster7.topic1 consumerRecord serde");
    assertEquals(2, sourceRecord.kafkaPartition().intValue(), "sourceRecord kafka partition is incorrect");
    assertEquals(new TopicPartition("topic1", 2), MirrorUtils.unwrapPartition(sourceRecord.sourcePartition()), "topic1 unwrapped from sourcePartition is incorrect");
    assertEquals(3L, MirrorUtils.unwrapOffset(sourceRecord.sourceOffset()).longValue(), "sourceRecord's sourceOffset is incorrect");
    assertEquals(4L, sourceRecord.timestamp().longValue(), "sourceRecord's timestamp is incorrect");
    assertEquals(key, sourceRecord.key(), "sourceRecord's key is incorrect");
    assertEquals(value, sourceRecord.value(), "sourceRecord's value is incorrect");
    assertEquals(headers.lastHeader("header1").value(), sourceRecord.headers().lastWithName("header1").value(), "sourceRecord's header1 is incorrect");
    assertEquals(headers.lastHeader("header2").value(), sourceRecord.headers().lastWithName("header2").value(), "sourceRecord's header2 is incorrect");
}
Also used : RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) Headers(org.apache.kafka.common.header.Headers) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) TopicPartition(org.apache.kafka.common.TopicPartition) SourceRecord(org.apache.kafka.connect.source.SourceRecord) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Test(org.junit.jupiter.api.Test)

Example 89 with RecordHeaders

use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.

the class KStreamSessionWindowAggregateProcessorTest method shouldLogAndMeterWhenSkippingLateRecordWithNonzeroGrace.

@Test
public void shouldLogAndMeterWhenSkippingLateRecordWithNonzeroGrace() {
    setup(false);
    final Processor<String, String, Windowed<String>, Change<Long>> processor = new KStreamSessionWindowAggregate<>(SessionWindows.ofInactivityGapAndGrace(ofMillis(10L), ofMillis(1L)), STORE_NAME, initializer, aggregator, sessionMerger).get();
    processor.init(context);
    try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KStreamSessionWindowAggregate.class)) {
        // dummy record to establish stream time = 0
        context.setRecordContext(new ProcessorRecordContext(0, -2, -3, "topic", new RecordHeaders()));
        processor.process(new Record<>("dummy", "dummy", 0L));
        // record arrives on time, should not be skipped
        context.setRecordContext(new ProcessorRecordContext(0, -2, -3, "topic", new RecordHeaders()));
        processor.process(new Record<>("OnTime1", "1", 0L));
        // dummy record to advance stream time = 11, 10 for gap time plus 1 to place at edge of window
        context.setRecordContext(new ProcessorRecordContext(11, -2, -3, "topic", new RecordHeaders()));
        processor.process(new Record<>("dummy", "dummy", 11L));
        // delayed record arrives on time, should not be skipped
        context.setRecordContext(new ProcessorRecordContext(0, -2, -3, "topic", new RecordHeaders()));
        processor.process(new Record<>("OnTime2", "1", 0L));
        // dummy record to advance stream time = 12, 10 for gap time plus 2 to place outside window
        context.setRecordContext(new ProcessorRecordContext(12, -2, -3, "topic", new RecordHeaders()));
        processor.process(new Record<>("dummy", "dummy", 12L));
        // delayed record arrives late
        context.setRecordContext(new ProcessorRecordContext(0, -2, -3, "topic", new RecordHeaders()));
        processor.process(new Record<>("Late1", "1", 0L));
        assertThat(appender.getMessages(), hasItem("Skipping record for expired window." + " topic=[topic] partition=[-3] offset=[-2] timestamp=[0] window=[0,0] expiration=[1] streamTime=[12]"));
    }
    final MetricName dropTotal;
    final MetricName dropRate;
    dropTotal = new MetricName("dropped-records-total", "stream-task-metrics", "The total number of dropped records", mkMap(mkEntry("thread-id", threadId), mkEntry("task-id", "0_0")));
    dropRate = new MetricName("dropped-records-rate", "stream-task-metrics", "The average number of dropped records per second", mkMap(mkEntry("thread-id", threadId), mkEntry("task-id", "0_0")));
    assertThat(metrics.metrics().get(dropTotal).metricValue(), is(1.0));
    assertThat((Double) metrics.metrics().get(dropRate).metricValue(), greaterThan(0.0));
}
Also used : Windowed(org.apache.kafka.streams.kstream.Windowed) MetricName(org.apache.kafka.common.MetricName) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) ProcessorRecordContext(org.apache.kafka.streams.processor.internals.ProcessorRecordContext) LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) Test(org.junit.Test)

Example 90 with RecordHeaders

use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.

the class KTableTransformValuesTest method shouldTransformOnGetIfNotMaterialized.

@Test
public void shouldTransformOnGetIfNotMaterialized() {
    final KTableTransformValues<String, String, String> transformValues = new KTableTransformValues<>(parent, new ExclamationValueTransformerSupplier(), null);
    expect(parent.valueGetterSupplier()).andReturn(parentGetterSupplier);
    expect(parentGetterSupplier.get()).andReturn(parentGetter);
    expect(parentGetter.get("Key")).andReturn(ValueAndTimestamp.make("Value", 73L));
    final ProcessorRecordContext recordContext = new ProcessorRecordContext(42L, 23L, -1, "foo", new RecordHeaders());
    expect(context.recordContext()).andReturn(recordContext);
    context.setRecordContext(new ProcessorRecordContext(73L, -1L, -1, null, new RecordHeaders()));
    expectLastCall();
    context.setRecordContext(recordContext);
    expectLastCall();
    replay(parent, parentGetterSupplier, parentGetter, context);
    final KTableValueGetter<String, String> getter = transformValues.view().get();
    getter.init(context);
    final String result = getter.get("Key").value();
    assertThat(result, is("Key->Value!"));
    verify(context);
}
Also used : RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) ProcessorRecordContext(org.apache.kafka.streams.processor.internals.ProcessorRecordContext) Test(org.junit.Test)

Aggregations

RecordHeaders (org.apache.kafka.common.header.internals.RecordHeaders)149 Test (org.junit.Test)107 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)49 ProcessorRecordContext (org.apache.kafka.streams.processor.internals.ProcessorRecordContext)41 Headers (org.apache.kafka.common.header.Headers)33 RecordHeader (org.apache.kafka.common.header.internals.RecordHeader)24 PrepareForTest (org.powermock.core.classloader.annotations.PrepareForTest)24 TopicPartition (org.apache.kafka.common.TopicPartition)22 Position (org.apache.kafka.streams.query.Position)17 ArrayList (java.util.ArrayList)12 HashMap (java.util.HashMap)12 ByteBuffer (java.nio.ByteBuffer)11 Struct (org.apache.kafka.connect.data.Struct)11 Test (org.junit.jupiter.api.Test)11 Header (org.apache.kafka.common.header.Header)10 LinkedHashMap (java.util.LinkedHashMap)9 Bytes (org.apache.kafka.common.utils.Bytes)9 StreamsException (org.apache.kafka.streams.errors.StreamsException)9 ProducerRecord (org.apache.kafka.clients.producer.ProducerRecord)8 Metrics (org.apache.kafka.common.metrics.Metrics)8