use of org.apache.kafka.common.header.internals.RecordHeader in project kafka by apache.
the class TimeOrderedKeyValueBufferTest method shouldRestoreV2Format.
@Test
public void shouldRestoreV2Format() {
final TimeOrderedKeyValueBuffer<String, String> buffer = bufferSupplier.apply(testName);
final MockInternalProcessorContext context = makeContext();
buffer.init((StateStoreContext) context, buffer);
final RecordBatchingStateRestoreCallback stateRestoreCallback = (RecordBatchingStateRestoreCallback) context.stateRestoreCallback(testName);
context.setRecordContext(new ProcessorRecordContext(0, 0, 0, "", new RecordHeaders()));
final RecordHeaders v2FlagHeaders = new RecordHeaders(new Header[] { new RecordHeader("v", new byte[] { (byte) 2 }) });
// These serialized formats were captured by running version 2.3 code.
// They verify that an upgrade from 2.3 will work.
// Do not change them.
final String toDeleteBinary = "0000000000000000000000000000000000000005746F70696300000000FFFFFFFF0000000EFFFFFFFF00000006646F6F6D6564FFFFFFFF0000000000000000";
final String asdfBinary = "0000000000000001000000000000000000000005746F70696300000000FFFFFFFF0000000CFFFFFFFF0000000471776572FFFFFFFF0000000000000002";
final String zxcvBinary1 = "0000000000000002000000000000000000000005746F70696300000000FFFFFFFF000000140000000749474E4F52454400000005336F34696D0000000870726576696F75730000000000000001";
final String zxcvBinary2 = "0000000000000003000000000000000000000005746F70696300000000FFFFFFFF0000001100000005336F34696D000000046E6578740000000870726576696F75730000000000000001";
stateRestoreCallback.restoreBatch(asList(new ConsumerRecord<>("changelog-topic", 0, 0, 999, TimestampType.CREATE_TIME, -1, -1, "todelete".getBytes(UTF_8), hexStringToByteArray(toDeleteBinary), v2FlagHeaders, Optional.empty()), new ConsumerRecord<>("changelog-topic", 0, 1, 9999, TimestampType.CREATE_TIME, -1, -1, "asdf".getBytes(UTF_8), hexStringToByteArray(asdfBinary), v2FlagHeaders, Optional.empty()), new ConsumerRecord<>("changelog-topic", 0, 2, 99, TimestampType.CREATE_TIME, -1, -1, "zxcv".getBytes(UTF_8), hexStringToByteArray(zxcvBinary1), v2FlagHeaders, Optional.empty()), new ConsumerRecord<>("changelog-topic", 0, 2, 100, TimestampType.CREATE_TIME, -1, -1, "zxcv".getBytes(UTF_8), hexStringToByteArray(zxcvBinary2), v2FlagHeaders, Optional.empty())));
assertThat(buffer.numRecords(), is(3));
assertThat(buffer.minTimestamp(), is(0L));
assertThat(buffer.bufferSize(), is(142L));
stateRestoreCallback.restoreBatch(singletonList(new ConsumerRecord<>("changelog-topic", 0, 3, 3, TimestampType.CREATE_TIME, -1, -1, "todelete".getBytes(UTF_8), null, new RecordHeaders(), Optional.empty())));
assertThat(buffer.numRecords(), is(2));
assertThat(buffer.minTimestamp(), is(1L));
assertThat(buffer.bufferSize(), is(95L));
assertThat(buffer.priorValueForBuffered("todelete"), is(Maybe.undefined()));
assertThat(buffer.priorValueForBuffered("asdf"), is(Maybe.defined(null)));
assertThat(buffer.priorValueForBuffered("zxcv"), is(Maybe.defined(ValueAndTimestamp.make("previous", -1))));
// flush the buffer into a list in buffer order so we can make assertions about the contents.
final List<Eviction<String, String>> evicted = new LinkedList<>();
buffer.evictWhile(() -> true, evicted::add);
// Several things to note:
// * The buffered records are ordered according to their buffer time (serialized in the value of the changelog)
// * The record timestamps are properly restored, and not conflated with the record's buffer time.
// * The keys and values are properly restored
// * The record topic is set to the original input topic, *not* the changelog topic
// * The record offset preserves the original input record's offset, *not* the offset of the changelog record
assertThat(evicted, is(asList(new Eviction<>("zxcv", new Change<>("next", "3o4im"), getContext(3L)), new Eviction<>("asdf", new Change<>("qwer", null), getContext(1L)))));
cleanup(context, buffer);
}
use of org.apache.kafka.common.header.internals.RecordHeader in project kafka by apache.
the class AbstractRocksDBSegmentedBytesStoreTest method getChangelogRecordsMultipleTopics.
private List<ConsumerRecord<byte[], byte[]>> getChangelogRecordsMultipleTopics() {
final List<ConsumerRecord<byte[], byte[]>> records = new ArrayList<>();
final Headers headers = new RecordHeaders();
Position position1 = Position.emptyPosition();
position1 = position1.withComponent("A", 0, 1);
headers.add(ChangelogRecordDeserializationHelper.CHANGELOG_VERSION_HEADER_RECORD_CONSISTENCY);
headers.add(new RecordHeader(ChangelogRecordDeserializationHelper.CHANGELOG_POSITION_HEADER_KEY, PositionSerde.serialize(position1).array()));
records.add(new ConsumerRecord<>("", 0, 0L, RecordBatch.NO_TIMESTAMP, TimestampType.NO_TIMESTAMP_TYPE, -1, -1, serializeKey(new Windowed<>("a", windows[0])).get(), serializeValue(50L), headers, Optional.empty()));
headers.remove(ChangelogRecordDeserializationHelper.CHANGELOG_POSITION_HEADER_KEY);
position1 = position1.withComponent("B", 0, 2);
headers.add(new RecordHeader(ChangelogRecordDeserializationHelper.CHANGELOG_POSITION_HEADER_KEY, PositionSerde.serialize(position1).array()));
records.add(new ConsumerRecord<>("", 0, 0L, RecordBatch.NO_TIMESTAMP, TimestampType.NO_TIMESTAMP_TYPE, -1, -1, serializeKey(new Windowed<>("a", windows[2])).get(), serializeValue(100L), headers, Optional.empty()));
headers.remove(ChangelogRecordDeserializationHelper.CHANGELOG_POSITION_HEADER_KEY);
position1 = position1.withComponent("A", 0, 3);
headers.add(new RecordHeader(ChangelogRecordDeserializationHelper.CHANGELOG_POSITION_HEADER_KEY, PositionSerde.serialize(position1).array()));
records.add(new ConsumerRecord<>("", 0, 0L, RecordBatch.NO_TIMESTAMP, TimestampType.NO_TIMESTAMP_TYPE, -1, -1, serializeKey(new Windowed<>("a", windows[3])).get(), serializeValue(200L), headers, Optional.empty()));
return records;
}
use of org.apache.kafka.common.header.internals.RecordHeader in project kafka by apache.
the class TestTopicsTest method testWithHeaders.
@Test
public void testWithHeaders() {
long baseTime = 3;
final Headers headers = new RecordHeaders(new Header[] { new RecordHeader("foo", "value".getBytes()), new RecordHeader("bar", null), new RecordHeader("\"A\\u00ea\\u00f1\\u00fcC\"", "value".getBytes()) });
final TestInputTopic<Long, String> inputTopic = testDriver.createInputTopic(INPUT_TOPIC, longSerde.serializer(), stringSerde.serializer());
final TestOutputTopic<Long, String> outputTopic = testDriver.createOutputTopic(OUTPUT_TOPIC, longSerde.deserializer(), stringSerde.deserializer());
inputTopic.pipeInput(new TestRecord<>(1L, "Hello", headers));
assertThat(outputTopic.readRecord(), allOf(hasProperty("key", equalTo(1L)), hasProperty("value", equalTo("Hello")), hasProperty("headers", equalTo(headers))));
inputTopic.pipeInput(new TestRecord<>(2L, "Kafka", headers, ++baseTime));
assertThat(outputTopic.readRecord(), is(equalTo(new TestRecord<>(2L, "Kafka", headers, baseTime))));
}
use of org.apache.kafka.common.header.internals.RecordHeader in project beam by apache.
the class NestedPayloadKafkaTable method transformOutput.
// Suppress nullability warnings: ProducerRecord is supposed to accept null arguments.
@SuppressWarnings("argument.type.incompatible")
@VisibleForTesting
ProducerRecord<byte[], byte[]> transformOutput(Row row) {
row = castRow(row, row.getSchema(), schema);
String topic = Iterables.getOnlyElement(getTopics());
byte[] key = null;
byte[] payload;
List<Header> headers = ImmutableList.of();
Long timestampMillis = null;
if (schema.hasField(Schemas.MESSAGE_KEY_FIELD)) {
key = row.getBytes(Schemas.MESSAGE_KEY_FIELD);
}
if (schema.hasField(Schemas.EVENT_TIMESTAMP_FIELD)) {
ReadableDateTime time = row.getDateTime(Schemas.EVENT_TIMESTAMP_FIELD);
if (time != null) {
timestampMillis = time.getMillis();
}
}
if (schema.hasField(Schemas.HEADERS_FIELD)) {
Collection<Row> headerRows = checkArgumentNotNull(row.getArray(Schemas.HEADERS_FIELD));
ImmutableList.Builder<Header> headersBuilder = ImmutableList.builder();
headerRows.forEach(entry -> {
String headerKey = checkArgumentNotNull(entry.getString(Schemas.HEADERS_KEY_FIELD));
Collection<byte[]> values = checkArgumentNotNull(entry.getArray(Schemas.HEADERS_VALUES_FIELD));
values.forEach(value -> headersBuilder.add(new RecordHeader(headerKey, value)));
});
headers = headersBuilder.build();
}
if (payloadSerializer == null) {
payload = row.getBytes(Schemas.PAYLOAD_FIELD);
} else {
payload = payloadSerializer.serialize(checkArgumentNotNull(row.getRow(Schemas.PAYLOAD_FIELD)));
}
return new ProducerRecord<>(topic, null, timestampMillis, key, payload, headers);
}
use of org.apache.kafka.common.header.internals.RecordHeader in project hono by eclipse.
the class KafkaBasedCommandSenderTest method commandResponseRecord.
private ConsumerRecord<String, Buffer> commandResponseRecord(final String tenantId, final String deviceId, final String correlationId, final Integer status, final Buffer payload) {
final List<Header> headers = new ArrayList<>();
headers.add(new RecordHeader(MessageHelper.APP_PROPERTY_TENANT_ID, tenantId.getBytes()));
headers.add(new RecordHeader(MessageHelper.APP_PROPERTY_DEVICE_ID, deviceId.getBytes()));
headers.add(new RecordHeader(MessageHelper.SYS_PROPERTY_CORRELATION_ID, correlationId.getBytes()));
if (status != null) {
headers.add(new RecordHeader(MessageHelper.APP_PROPERTY_STATUS, String.valueOf(status).getBytes()));
}
return new ConsumerRecord<>(new HonoTopic(HonoTopic.Type.COMMAND_RESPONSE, tenantId).toString(), 0, 0, -1L, TimestampType.NO_TIMESTAMP_TYPE, -1L, -1, -1, deviceId, payload, new RecordHeaders(headers.toArray(Header[]::new)));
}
Aggregations