Search in sources :

Example 91 with RecordHeaders

use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.

the class KStreamSessionWindowAggregateProcessorTest method shouldLogAndMeterWhenSkippingLateRecordWithZeroGrace.

@Test
public void shouldLogAndMeterWhenSkippingLateRecordWithZeroGrace() {
    setup(false);
    final Processor<String, String, Windowed<String>, Change<Long>> processor = new KStreamSessionWindowAggregate<>(SessionWindows.ofInactivityGapAndGrace(ofMillis(10L), ofMillis(0L)), STORE_NAME, initializer, aggregator, sessionMerger).get();
    processor.init(context);
    // dummy record to establish stream time = 0
    context.setRecordContext(new ProcessorRecordContext(0, -2, -3, "topic", new RecordHeaders()));
    processor.process(new Record<>("dummy", "dummy", 0L));
    // record arrives on time, should not be skipped
    context.setRecordContext(new ProcessorRecordContext(0, -2, -3, "topic", new RecordHeaders()));
    processor.process(new Record<>("OnTime1", "1", 0L));
    // dummy record to advance stream time = 11, 10 for gap time plus 1 to place outside window
    context.setRecordContext(new ProcessorRecordContext(11, -2, -3, "topic", new RecordHeaders()));
    processor.process(new Record<>("dummy", "dummy", 11L));
    try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KStreamSessionWindowAggregate.class)) {
        // record is late
        context.setRecordContext(new ProcessorRecordContext(0, -2, -3, "topic", new RecordHeaders()));
        processor.process(new Record<>("Late1", "1", 0L));
        assertThat(appender.getMessages(), hasItem("Skipping record for expired window." + " topic=[topic] partition=[-3] offset=[-2] timestamp=[0] window=[0,0] expiration=[1] streamTime=[11]"));
    }
    final MetricName dropTotal;
    final MetricName dropRate;
    dropTotal = new MetricName("dropped-records-total", "stream-task-metrics", "The total number of dropped records", mkMap(mkEntry("thread-id", threadId), mkEntry("task-id", "0_0")));
    dropRate = new MetricName("dropped-records-rate", "stream-task-metrics", "The average number of dropped records per second", mkMap(mkEntry("thread-id", threadId), mkEntry("task-id", "0_0")));
    assertThat(metrics.metrics().get(dropTotal).metricValue(), is(1.0));
    assertThat((Double) metrics.metrics().get(dropRate).metricValue(), greaterThan(0.0));
}
Also used : Windowed(org.apache.kafka.streams.kstream.Windowed) MetricName(org.apache.kafka.common.MetricName) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) ProcessorRecordContext(org.apache.kafka.streams.processor.internals.ProcessorRecordContext) LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) Test(org.junit.Test)

Example 92 with RecordHeaders

use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.

the class MemoryRecordsTest method testHasRoomForMethodWithHeaders.

@ParameterizedTest
@ArgumentsSource(MemoryRecordsArgumentsProvider.class)
public void testHasRoomForMethodWithHeaders(Args args) {
    byte magic = args.magic;
    MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(120), magic, args.compression, TimestampType.CREATE_TIME, 0L);
    builder.append(logAppendTime, "key".getBytes(), "value".getBytes());
    RecordHeaders headers = new RecordHeaders();
    for (int i = 0; i < 10; ++i) headers.add("hello", "world.world".getBytes());
    // Make sure that hasRoomFor accounts for header sizes by letting a record without headers pass, but stopping
    // a record with a large number of headers.
    assertTrue(builder.hasRoomFor(logAppendTime, "key".getBytes(), "value".getBytes(), Record.EMPTY_HEADERS));
    if (magic < MAGIC_VALUE_V2)
        assertTrue(builder.hasRoomFor(logAppendTime, "key".getBytes(), "value".getBytes(), headers.toArray()));
    else
        assertFalse(builder.hasRoomFor(logAppendTime, "key".getBytes(), "value".getBytes(), headers.toArray()));
}
Also used : RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) ArgumentsSource(org.junit.jupiter.params.provider.ArgumentsSource)

Example 93 with RecordHeaders

use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.

the class WorkerSourceTask method convertHeaderFor.

private RecordHeaders convertHeaderFor(SourceRecord record) {
    Headers headers = record.headers();
    RecordHeaders result = new RecordHeaders();
    if (headers != null) {
        String topic = record.topic();
        for (Header header : headers) {
            String key = header.key();
            byte[] rawHeader = headerConverter.fromConnectHeader(topic, key, header.schema(), header.value());
            result.add(key, rawHeader);
        }
    }
    return result;
}
Also used : RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) Header(org.apache.kafka.connect.header.Header) Headers(org.apache.kafka.connect.header.Headers) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders)

Example 94 with RecordHeaders

use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.

the class ProcessorContextImpl method logChange.

@Override
public void logChange(final String storeName, final Bytes key, final byte[] value, final long timestamp, final Position position) {
    throwUnsupportedOperationExceptionIfStandby("logChange");
    final TopicPartition changelogPartition = stateManager().registeredChangelogPartitionFor(storeName);
    final Headers headers;
    if (!consistencyEnabled) {
        headers = null;
    } else {
        // Add the vector clock to the header part of every record
        headers = new RecordHeaders();
        headers.add(ChangelogRecordDeserializationHelper.CHANGELOG_VERSION_HEADER_RECORD_CONSISTENCY);
        headers.add(new RecordHeader(ChangelogRecordDeserializationHelper.CHANGELOG_POSITION_HEADER_KEY, PositionSerde.serialize(position).array()));
    }
    collector.send(changelogPartition.topic(), key, value, headers, changelogPartition.partition(), timestamp, BYTES_KEY_SERIALIZER, BYTEARRAY_VALUE_SERIALIZER);
}
Also used : RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) TopicPartition(org.apache.kafka.common.TopicPartition) Headers(org.apache.kafka.common.header.Headers) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader)

Example 95 with RecordHeaders

use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.

the class ProcessorRecordContext method deserialize.

public static ProcessorRecordContext deserialize(final ByteBuffer buffer) {
    final long timestamp = buffer.getLong();
    final long offset = buffer.getLong();
    final String topic;
    {
        // we believe the topic will never be null when we serialize
        final byte[] topicBytes = requireNonNull(getNullableSizePrefixedArray(buffer));
        topic = new String(topicBytes, UTF_8);
    }
    final int partition = buffer.getInt();
    final int headerCount = buffer.getInt();
    final Headers headers;
    if (headerCount == -1) {
        // keep for backward compatibilty
        headers = new RecordHeaders();
    } else {
        final Header[] headerArr = new Header[headerCount];
        for (int i = 0; i < headerCount; i++) {
            final byte[] keyBytes = requireNonNull(getNullableSizePrefixedArray(buffer));
            final byte[] valueBytes = getNullableSizePrefixedArray(buffer);
            headerArr[i] = new RecordHeader(new String(keyBytes, UTF_8), valueBytes);
        }
        headers = new RecordHeaders(headerArr);
    }
    return new ProcessorRecordContext(timestamp, offset, partition, topic, headers);
}
Also used : RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) Header(org.apache.kafka.common.header.Header) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) Headers(org.apache.kafka.common.header.Headers) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader)

Aggregations

RecordHeaders (org.apache.kafka.common.header.internals.RecordHeaders)149 Test (org.junit.Test)107 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)49 ProcessorRecordContext (org.apache.kafka.streams.processor.internals.ProcessorRecordContext)41 Headers (org.apache.kafka.common.header.Headers)33 RecordHeader (org.apache.kafka.common.header.internals.RecordHeader)24 PrepareForTest (org.powermock.core.classloader.annotations.PrepareForTest)24 TopicPartition (org.apache.kafka.common.TopicPartition)22 Position (org.apache.kafka.streams.query.Position)17 ArrayList (java.util.ArrayList)12 HashMap (java.util.HashMap)12 ByteBuffer (java.nio.ByteBuffer)11 Struct (org.apache.kafka.connect.data.Struct)11 Test (org.junit.jupiter.api.Test)11 Header (org.apache.kafka.common.header.Header)10 LinkedHashMap (java.util.LinkedHashMap)9 Bytes (org.apache.kafka.common.utils.Bytes)9 StreamsException (org.apache.kafka.streams.errors.StreamsException)9 ProducerRecord (org.apache.kafka.clients.producer.ProducerRecord)8 Metrics (org.apache.kafka.common.metrics.Metrics)8