use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.
the class InternalMockProcessorContext method logChange.
@Override
public void logChange(final String storeName, final Bytes key, final byte[] value, final long timestamp, final Position position) {
Headers headers = new RecordHeaders();
if (!consistencyEnabled) {
headers = null;
} else {
// Add the vector clock to the header part of every record
headers.add(ChangelogRecordDeserializationHelper.CHANGELOG_VERSION_HEADER_RECORD_CONSISTENCY);
headers.add(new RecordHeader(ChangelogRecordDeserializationHelper.CHANGELOG_POSITION_HEADER_KEY, PositionSerde.serialize(position).array()));
}
recordCollector().send(storeName + "-changelog", key, value, headers, taskId().partition(), timestamp, BYTES_KEY_SERIALIZER, BYTEARRAY_VALUE_SERIALIZER);
}
use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.
the class TimeOrderedKeyValueBufferTest method shouldRestoreV2Format.
@Test
public void shouldRestoreV2Format() {
final TimeOrderedKeyValueBuffer<String, String> buffer = bufferSupplier.apply(testName);
final MockInternalProcessorContext context = makeContext();
buffer.init((StateStoreContext) context, buffer);
final RecordBatchingStateRestoreCallback stateRestoreCallback = (RecordBatchingStateRestoreCallback) context.stateRestoreCallback(testName);
context.setRecordContext(new ProcessorRecordContext(0, 0, 0, "", new RecordHeaders()));
final RecordHeaders v2FlagHeaders = new RecordHeaders(new Header[] { new RecordHeader("v", new byte[] { (byte) 2 }) });
// These serialized formats were captured by running version 2.3 code.
// They verify that an upgrade from 2.3 will work.
// Do not change them.
final String toDeleteBinary = "0000000000000000000000000000000000000005746F70696300000000FFFFFFFF0000000EFFFFFFFF00000006646F6F6D6564FFFFFFFF0000000000000000";
final String asdfBinary = "0000000000000001000000000000000000000005746F70696300000000FFFFFFFF0000000CFFFFFFFF0000000471776572FFFFFFFF0000000000000002";
final String zxcvBinary1 = "0000000000000002000000000000000000000005746F70696300000000FFFFFFFF000000140000000749474E4F52454400000005336F34696D0000000870726576696F75730000000000000001";
final String zxcvBinary2 = "0000000000000003000000000000000000000005746F70696300000000FFFFFFFF0000001100000005336F34696D000000046E6578740000000870726576696F75730000000000000001";
stateRestoreCallback.restoreBatch(asList(new ConsumerRecord<>("changelog-topic", 0, 0, 999, TimestampType.CREATE_TIME, -1, -1, "todelete".getBytes(UTF_8), hexStringToByteArray(toDeleteBinary), v2FlagHeaders, Optional.empty()), new ConsumerRecord<>("changelog-topic", 0, 1, 9999, TimestampType.CREATE_TIME, -1, -1, "asdf".getBytes(UTF_8), hexStringToByteArray(asdfBinary), v2FlagHeaders, Optional.empty()), new ConsumerRecord<>("changelog-topic", 0, 2, 99, TimestampType.CREATE_TIME, -1, -1, "zxcv".getBytes(UTF_8), hexStringToByteArray(zxcvBinary1), v2FlagHeaders, Optional.empty()), new ConsumerRecord<>("changelog-topic", 0, 2, 100, TimestampType.CREATE_TIME, -1, -1, "zxcv".getBytes(UTF_8), hexStringToByteArray(zxcvBinary2), v2FlagHeaders, Optional.empty())));
assertThat(buffer.numRecords(), is(3));
assertThat(buffer.minTimestamp(), is(0L));
assertThat(buffer.bufferSize(), is(142L));
stateRestoreCallback.restoreBatch(singletonList(new ConsumerRecord<>("changelog-topic", 0, 3, 3, TimestampType.CREATE_TIME, -1, -1, "todelete".getBytes(UTF_8), null, new RecordHeaders(), Optional.empty())));
assertThat(buffer.numRecords(), is(2));
assertThat(buffer.minTimestamp(), is(1L));
assertThat(buffer.bufferSize(), is(95L));
assertThat(buffer.priorValueForBuffered("todelete"), is(Maybe.undefined()));
assertThat(buffer.priorValueForBuffered("asdf"), is(Maybe.defined(null)));
assertThat(buffer.priorValueForBuffered("zxcv"), is(Maybe.defined(ValueAndTimestamp.make("previous", -1))));
// flush the buffer into a list in buffer order so we can make assertions about the contents.
final List<Eviction<String, String>> evicted = new LinkedList<>();
buffer.evictWhile(() -> true, evicted::add);
// Several things to note:
// * The buffered records are ordered according to their buffer time (serialized in the value of the changelog)
// * The record timestamps are properly restored, and not conflated with the record's buffer time.
// * The keys and values are properly restored
// * The record topic is set to the original input topic, *not* the changelog topic
// * The record offset preserves the original input record's offset, *not* the offset of the changelog record
assertThat(evicted, is(asList(new Eviction<>("zxcv", new Change<>("next", "3o4im"), getContext(3L)), new Eviction<>("asdf", new Change<>("qwer", null), getContext(1L)))));
cleanup(context, buffer);
}
use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.
the class RocksDBWindowStoreTest method shouldMatchPositionAfterPut.
@Test
public void shouldMatchPositionAfterPut() {
final MeteredWindowStore<Integer, String> meteredSessionStore = (MeteredWindowStore<Integer, String>) windowStore;
final ChangeLoggingWindowBytesStore changeLoggingSessionBytesStore = (ChangeLoggingWindowBytesStore) meteredSessionStore.wrapped();
final RocksDBWindowStore rocksDBWindowStore = (RocksDBWindowStore) changeLoggingSessionBytesStore.wrapped();
context.setRecordContext(new ProcessorRecordContext(0, 1, 0, "", new RecordHeaders()));
windowStore.put(0, "0", SEGMENT_INTERVAL);
context.setRecordContext(new ProcessorRecordContext(0, 2, 0, "", new RecordHeaders()));
windowStore.put(1, "1", SEGMENT_INTERVAL);
context.setRecordContext(new ProcessorRecordContext(0, 3, 0, "", new RecordHeaders()));
windowStore.put(2, "2", SEGMENT_INTERVAL);
context.setRecordContext(new ProcessorRecordContext(0, 4, 0, "", new RecordHeaders()));
windowStore.put(3, "3", SEGMENT_INTERVAL);
final Position expected = Position.fromMap(mkMap(mkEntry("", mkMap(mkEntry(0, 4L)))));
final Position actual = rocksDBWindowStore.getPosition();
assertEquals(expected, actual);
}
use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.
the class WorkerSinkTaskTest method testHeaders.
@Test
public void testHeaders() throws Exception {
Headers headers = new RecordHeaders();
headers.add("header_key", "header_value".getBytes());
createTask(initialState);
expectInitializeTask();
expectTaskGetTopic(true);
expectPollInitialAssignment();
expectConsumerPoll(1, headers);
expectConversionAndTransformation(1, null, headers);
sinkTask.put(EasyMock.anyObject());
EasyMock.expectLastCall();
PowerMock.replayAll();
workerTask.initialize(TASK_CONFIG);
workerTask.initializeAndStart();
// iter 1 -- initial assignment
workerTask.iteration();
// iter 2 -- deliver 1 record
workerTask.iteration();
PowerMock.verifyAll();
}
use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.
the class AbstractRocksDBSegmentedBytesStoreTest method getChangelogRecordsMultipleTopics.
private List<ConsumerRecord<byte[], byte[]>> getChangelogRecordsMultipleTopics() {
final List<ConsumerRecord<byte[], byte[]>> records = new ArrayList<>();
final Headers headers = new RecordHeaders();
Position position1 = Position.emptyPosition();
position1 = position1.withComponent("A", 0, 1);
headers.add(ChangelogRecordDeserializationHelper.CHANGELOG_VERSION_HEADER_RECORD_CONSISTENCY);
headers.add(new RecordHeader(ChangelogRecordDeserializationHelper.CHANGELOG_POSITION_HEADER_KEY, PositionSerde.serialize(position1).array()));
records.add(new ConsumerRecord<>("", 0, 0L, RecordBatch.NO_TIMESTAMP, TimestampType.NO_TIMESTAMP_TYPE, -1, -1, serializeKey(new Windowed<>("a", windows[0])).get(), serializeValue(50L), headers, Optional.empty()));
headers.remove(ChangelogRecordDeserializationHelper.CHANGELOG_POSITION_HEADER_KEY);
position1 = position1.withComponent("B", 0, 2);
headers.add(new RecordHeader(ChangelogRecordDeserializationHelper.CHANGELOG_POSITION_HEADER_KEY, PositionSerde.serialize(position1).array()));
records.add(new ConsumerRecord<>("", 0, 0L, RecordBatch.NO_TIMESTAMP, TimestampType.NO_TIMESTAMP_TYPE, -1, -1, serializeKey(new Windowed<>("a", windows[2])).get(), serializeValue(100L), headers, Optional.empty()));
headers.remove(ChangelogRecordDeserializationHelper.CHANGELOG_POSITION_HEADER_KEY);
position1 = position1.withComponent("A", 0, 3);
headers.add(new RecordHeader(ChangelogRecordDeserializationHelper.CHANGELOG_POSITION_HEADER_KEY, PositionSerde.serialize(position1).array()));
records.add(new ConsumerRecord<>("", 0, 0L, RecordBatch.NO_TIMESTAMP, TimestampType.NO_TIMESTAMP_TYPE, -1, -1, serializeKey(new Windowed<>("a", windows[3])).get(), serializeValue(200L), headers, Optional.empty()));
return records;
}
Aggregations