Search in sources :

Example 21 with RecordHeader

use of org.apache.kafka.common.header.internals.RecordHeader in project kafka by apache.

the class TimeOrderedKeyValueBufferTest method shouldRestoreV2Format.

@Test
public void shouldRestoreV2Format() {
    final TimeOrderedKeyValueBuffer<String, String> buffer = bufferSupplier.apply(testName);
    final MockInternalProcessorContext context = makeContext();
    buffer.init((StateStoreContext) context, buffer);
    final RecordBatchingStateRestoreCallback stateRestoreCallback = (RecordBatchingStateRestoreCallback) context.stateRestoreCallback(testName);
    context.setRecordContext(new ProcessorRecordContext(0, 0, 0, "", new RecordHeaders()));
    final RecordHeaders v2FlagHeaders = new RecordHeaders(new Header[] { new RecordHeader("v", new byte[] { (byte) 2 }) });
    // These serialized formats were captured by running version 2.3 code.
    // They verify that an upgrade from 2.3 will work.
    // Do not change them.
    final String toDeleteBinary = "0000000000000000000000000000000000000005746F70696300000000FFFFFFFF0000000EFFFFFFFF00000006646F6F6D6564FFFFFFFF0000000000000000";
    final String asdfBinary = "0000000000000001000000000000000000000005746F70696300000000FFFFFFFF0000000CFFFFFFFF0000000471776572FFFFFFFF0000000000000002";
    final String zxcvBinary1 = "0000000000000002000000000000000000000005746F70696300000000FFFFFFFF000000140000000749474E4F52454400000005336F34696D0000000870726576696F75730000000000000001";
    final String zxcvBinary2 = "0000000000000003000000000000000000000005746F70696300000000FFFFFFFF0000001100000005336F34696D000000046E6578740000000870726576696F75730000000000000001";
    stateRestoreCallback.restoreBatch(asList(new ConsumerRecord<>("changelog-topic", 0, 0, 999, TimestampType.CREATE_TIME, -1, -1, "todelete".getBytes(UTF_8), hexStringToByteArray(toDeleteBinary), v2FlagHeaders, Optional.empty()), new ConsumerRecord<>("changelog-topic", 0, 1, 9999, TimestampType.CREATE_TIME, -1, -1, "asdf".getBytes(UTF_8), hexStringToByteArray(asdfBinary), v2FlagHeaders, Optional.empty()), new ConsumerRecord<>("changelog-topic", 0, 2, 99, TimestampType.CREATE_TIME, -1, -1, "zxcv".getBytes(UTF_8), hexStringToByteArray(zxcvBinary1), v2FlagHeaders, Optional.empty()), new ConsumerRecord<>("changelog-topic", 0, 2, 100, TimestampType.CREATE_TIME, -1, -1, "zxcv".getBytes(UTF_8), hexStringToByteArray(zxcvBinary2), v2FlagHeaders, Optional.empty())));
    assertThat(buffer.numRecords(), is(3));
    assertThat(buffer.minTimestamp(), is(0L));
    assertThat(buffer.bufferSize(), is(142L));
    stateRestoreCallback.restoreBatch(singletonList(new ConsumerRecord<>("changelog-topic", 0, 3, 3, TimestampType.CREATE_TIME, -1, -1, "todelete".getBytes(UTF_8), null, new RecordHeaders(), Optional.empty())));
    assertThat(buffer.numRecords(), is(2));
    assertThat(buffer.minTimestamp(), is(1L));
    assertThat(buffer.bufferSize(), is(95L));
    assertThat(buffer.priorValueForBuffered("todelete"), is(Maybe.undefined()));
    assertThat(buffer.priorValueForBuffered("asdf"), is(Maybe.defined(null)));
    assertThat(buffer.priorValueForBuffered("zxcv"), is(Maybe.defined(ValueAndTimestamp.make("previous", -1))));
    // flush the buffer into a list in buffer order so we can make assertions about the contents.
    final List<Eviction<String, String>> evicted = new LinkedList<>();
    buffer.evictWhile(() -> true, evicted::add);
    // Several things to note:
    // * The buffered records are ordered according to their buffer time (serialized in the value of the changelog)
    // * The record timestamps are properly restored, and not conflated with the record's buffer time.
    // * The keys and values are properly restored
    // * The record topic is set to the original input topic, *not* the changelog topic
    // * The record offset preserves the original input record's offset, *not* the offset of the changelog record
    assertThat(evicted, is(asList(new Eviction<>("zxcv", new Change<>("next", "3o4im"), getContext(3L)), new Eviction<>("asdf", new Change<>("qwer", null), getContext(1L)))));
    cleanup(context, buffer);
}
Also used : Change(org.apache.kafka.streams.kstream.internals.Change) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) LinkedList(java.util.LinkedList) RecordBatchingStateRestoreCallback(org.apache.kafka.streams.processor.internals.RecordBatchingStateRestoreCallback) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) ProcessorRecordContext(org.apache.kafka.streams.processor.internals.ProcessorRecordContext) Eviction(org.apache.kafka.streams.state.internals.TimeOrderedKeyValueBuffer.Eviction) MockInternalProcessorContext(org.apache.kafka.test.MockInternalProcessorContext) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) Test(org.junit.Test)

Example 22 with RecordHeader

use of org.apache.kafka.common.header.internals.RecordHeader in project kafka by apache.

the class AbstractRocksDBSegmentedBytesStoreTest method getChangelogRecordsMultipleTopics.

private List<ConsumerRecord<byte[], byte[]>> getChangelogRecordsMultipleTopics() {
    final List<ConsumerRecord<byte[], byte[]>> records = new ArrayList<>();
    final Headers headers = new RecordHeaders();
    Position position1 = Position.emptyPosition();
    position1 = position1.withComponent("A", 0, 1);
    headers.add(ChangelogRecordDeserializationHelper.CHANGELOG_VERSION_HEADER_RECORD_CONSISTENCY);
    headers.add(new RecordHeader(ChangelogRecordDeserializationHelper.CHANGELOG_POSITION_HEADER_KEY, PositionSerde.serialize(position1).array()));
    records.add(new ConsumerRecord<>("", 0, 0L, RecordBatch.NO_TIMESTAMP, TimestampType.NO_TIMESTAMP_TYPE, -1, -1, serializeKey(new Windowed<>("a", windows[0])).get(), serializeValue(50L), headers, Optional.empty()));
    headers.remove(ChangelogRecordDeserializationHelper.CHANGELOG_POSITION_HEADER_KEY);
    position1 = position1.withComponent("B", 0, 2);
    headers.add(new RecordHeader(ChangelogRecordDeserializationHelper.CHANGELOG_POSITION_HEADER_KEY, PositionSerde.serialize(position1).array()));
    records.add(new ConsumerRecord<>("", 0, 0L, RecordBatch.NO_TIMESTAMP, TimestampType.NO_TIMESTAMP_TYPE, -1, -1, serializeKey(new Windowed<>("a", windows[2])).get(), serializeValue(100L), headers, Optional.empty()));
    headers.remove(ChangelogRecordDeserializationHelper.CHANGELOG_POSITION_HEADER_KEY);
    position1 = position1.withComponent("A", 0, 3);
    headers.add(new RecordHeader(ChangelogRecordDeserializationHelper.CHANGELOG_POSITION_HEADER_KEY, PositionSerde.serialize(position1).array()));
    records.add(new ConsumerRecord<>("", 0, 0L, RecordBatch.NO_TIMESTAMP, TimestampType.NO_TIMESTAMP_TYPE, -1, -1, serializeKey(new Windowed<>("a", windows[3])).get(), serializeValue(200L), headers, Optional.empty()));
    return records;
}
Also used : Windowed(org.apache.kafka.streams.kstream.Windowed) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) Position(org.apache.kafka.streams.query.Position) Headers(org.apache.kafka.common.header.Headers) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) ArrayList(java.util.ArrayList) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord)

Example 23 with RecordHeader

use of org.apache.kafka.common.header.internals.RecordHeader in project kafka by apache.

the class TestTopicsTest method testWithHeaders.

@Test
public void testWithHeaders() {
    long baseTime = 3;
    final Headers headers = new RecordHeaders(new Header[] { new RecordHeader("foo", "value".getBytes()), new RecordHeader("bar", null), new RecordHeader("\"A\\u00ea\\u00f1\\u00fcC\"", "value".getBytes()) });
    final TestInputTopic<Long, String> inputTopic = testDriver.createInputTopic(INPUT_TOPIC, longSerde.serializer(), stringSerde.serializer());
    final TestOutputTopic<Long, String> outputTopic = testDriver.createOutputTopic(OUTPUT_TOPIC, longSerde.deserializer(), stringSerde.deserializer());
    inputTopic.pipeInput(new TestRecord<>(1L, "Hello", headers));
    assertThat(outputTopic.readRecord(), allOf(hasProperty("key", equalTo(1L)), hasProperty("value", equalTo("Hello")), hasProperty("headers", equalTo(headers))));
    inputTopic.pipeInput(new TestRecord<>(2L, "Kafka", headers, ++baseTime));
    assertThat(outputTopic.readRecord(), is(equalTo(new TestRecord<>(2L, "Kafka", headers, baseTime))));
}
Also used : RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) Headers(org.apache.kafka.common.header.Headers) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) Test(org.junit.jupiter.api.Test)

Example 24 with RecordHeader

use of org.apache.kafka.common.header.internals.RecordHeader in project beam by apache.

the class NestedPayloadKafkaTable method transformOutput.

// Suppress nullability warnings: ProducerRecord is supposed to accept null arguments.
@SuppressWarnings("argument.type.incompatible")
@VisibleForTesting
ProducerRecord<byte[], byte[]> transformOutput(Row row) {
    row = castRow(row, row.getSchema(), schema);
    String topic = Iterables.getOnlyElement(getTopics());
    byte[] key = null;
    byte[] payload;
    List<Header> headers = ImmutableList.of();
    Long timestampMillis = null;
    if (schema.hasField(Schemas.MESSAGE_KEY_FIELD)) {
        key = row.getBytes(Schemas.MESSAGE_KEY_FIELD);
    }
    if (schema.hasField(Schemas.EVENT_TIMESTAMP_FIELD)) {
        ReadableDateTime time = row.getDateTime(Schemas.EVENT_TIMESTAMP_FIELD);
        if (time != null) {
            timestampMillis = time.getMillis();
        }
    }
    if (schema.hasField(Schemas.HEADERS_FIELD)) {
        Collection<Row> headerRows = checkArgumentNotNull(row.getArray(Schemas.HEADERS_FIELD));
        ImmutableList.Builder<Header> headersBuilder = ImmutableList.builder();
        headerRows.forEach(entry -> {
            String headerKey = checkArgumentNotNull(entry.getString(Schemas.HEADERS_KEY_FIELD));
            Collection<byte[]> values = checkArgumentNotNull(entry.getArray(Schemas.HEADERS_VALUES_FIELD));
            values.forEach(value -> headersBuilder.add(new RecordHeader(headerKey, value)));
        });
        headers = headersBuilder.build();
    }
    if (payloadSerializer == null) {
        payload = row.getBytes(Schemas.PAYLOAD_FIELD);
    } else {
        payload = payloadSerializer.serialize(checkArgumentNotNull(row.getRow(Schemas.PAYLOAD_FIELD)));
    }
    return new ProducerRecord<>(topic, null, timestampMillis, key, payload, headers);
}
Also used : ReadableDateTime(org.joda.time.ReadableDateTime) ImmutableList(org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.ImmutableList) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) Header(org.apache.kafka.common.header.Header) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) Cast.castRow(org.apache.beam.sdk.schemas.transforms.Cast.castRow) Row(org.apache.beam.sdk.values.Row) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) VisibleForTesting(org.apache.beam.vendor.guava.v26_0_jre.com.google.common.annotations.VisibleForTesting)

Example 25 with RecordHeader

use of org.apache.kafka.common.header.internals.RecordHeader in project hono by eclipse.

the class KafkaBasedCommandSenderTest method commandResponseRecord.

private ConsumerRecord<String, Buffer> commandResponseRecord(final String tenantId, final String deviceId, final String correlationId, final Integer status, final Buffer payload) {
    final List<Header> headers = new ArrayList<>();
    headers.add(new RecordHeader(MessageHelper.APP_PROPERTY_TENANT_ID, tenantId.getBytes()));
    headers.add(new RecordHeader(MessageHelper.APP_PROPERTY_DEVICE_ID, deviceId.getBytes()));
    headers.add(new RecordHeader(MessageHelper.SYS_PROPERTY_CORRELATION_ID, correlationId.getBytes()));
    if (status != null) {
        headers.add(new RecordHeader(MessageHelper.APP_PROPERTY_STATUS, String.valueOf(status).getBytes()));
    }
    return new ConsumerRecord<>(new HonoTopic(HonoTopic.Type.COMMAND_RESPONSE, tenantId).toString(), 0, 0, -1L, TimestampType.NO_TIMESTAMP_TYPE, -1L, -1, -1, deviceId, payload, new RecordHeaders(headers.toArray(Header[]::new)));
}
Also used : RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) Header(org.apache.kafka.common.header.Header) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) ArrayList(java.util.ArrayList) HonoTopic(org.eclipse.hono.client.kafka.HonoTopic) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord)

Aggregations

RecordHeader (org.apache.kafka.common.header.internals.RecordHeader)45 RecordHeaders (org.apache.kafka.common.header.internals.RecordHeaders)26 Header (org.apache.kafka.common.header.Header)21 Test (org.junit.Test)17 Headers (org.apache.kafka.common.header.Headers)16 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)15 ByteBuffer (java.nio.ByteBuffer)10 Test (org.junit.jupiter.api.Test)10 TopicPartition (org.apache.kafka.common.TopicPartition)8 ArrayList (java.util.ArrayList)7 DataOutputStream (java.io.DataOutputStream)6 ByteBufferOutputStream (org.apache.kafka.common.utils.ByteBufferOutputStream)6 ProcessorRecordContext (org.apache.kafka.streams.processor.internals.ProcessorRecordContext)5 RecordBatchingStateRestoreCallback (org.apache.kafka.streams.processor.internals.RecordBatchingStateRestoreCallback)5 Position (org.apache.kafka.streams.query.Position)5 MockInternalProcessorContext (org.apache.kafka.test.MockInternalProcessorContext)5 LinkedList (java.util.LinkedList)4 MemoryRecordsBuilder (org.apache.kafka.common.record.MemoryRecordsBuilder)4 Change (org.apache.kafka.streams.kstream.internals.Change)4 Eviction (org.apache.kafka.streams.state.internals.TimeOrderedKeyValueBuffer.Eviction)4