use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.
the class TimeOrderedKeyValueBufferTest method shouldRestoreV3Format.
@Test
public void shouldRestoreV3Format() {
final TimeOrderedKeyValueBuffer<String, String> buffer = bufferSupplier.apply(testName);
final MockInternalProcessorContext context = makeContext();
buffer.init((StateStoreContext) context, buffer);
final RecordBatchingStateRestoreCallback stateRestoreCallback = (RecordBatchingStateRestoreCallback) context.stateRestoreCallback(testName);
context.setRecordContext(new ProcessorRecordContext(0, 0, 0, "", new RecordHeaders()));
final RecordHeaders headers = new RecordHeaders(new Header[] { new RecordHeader("v", new byte[] { (byte) 3 }) });
// These serialized formats were captured by running version 2.4 code.
// They verify that an upgrade from 2.4 will work.
// Do not change them.
final String toDeleteBinary = "0000000000000000000000000000000000000005746F70696300000000FFFFFFFFFFFFFFFFFFFFFFFF00000006646F6F6D65640000000000000000";
final String asdfBinary = "0000000000000001000000000000000000000005746F70696300000000FFFFFFFFFFFFFFFFFFFFFFFF00000004717765720000000000000002";
final String zxcvBinary1 = "0000000000000002000000000000000000000005746F70696300000000FFFFFFFF0000000870726576696F75730000000749474E4F52454400000005336F34696D0000000000000001";
final String zxcvBinary2 = "0000000000000003000000000000000000000005746F70696300000000FFFFFFFF0000000870726576696F757300000005336F34696D000000046E6578740000000000000001";
stateRestoreCallback.restoreBatch(asList(new ConsumerRecord<>("changelog-topic", 0, 0, 999, TimestampType.CREATE_TIME, -1, -1, "todelete".getBytes(UTF_8), hexStringToByteArray(toDeleteBinary), headers, Optional.empty()), new ConsumerRecord<>("changelog-topic", 0, 1, 9999, TimestampType.CREATE_TIME, -1, -1, "asdf".getBytes(UTF_8), hexStringToByteArray(asdfBinary), headers, Optional.empty()), new ConsumerRecord<>("changelog-topic", 0, 2, 99, TimestampType.CREATE_TIME, -1, -1, "zxcv".getBytes(UTF_8), hexStringToByteArray(zxcvBinary1), headers, Optional.empty()), new ConsumerRecord<>("changelog-topic", 0, 2, 100, TimestampType.CREATE_TIME, -1, -1, "zxcv".getBytes(UTF_8), hexStringToByteArray(zxcvBinary2), headers, Optional.empty())));
assertThat(buffer.numRecords(), is(3));
assertThat(buffer.minTimestamp(), is(0L));
assertThat(buffer.bufferSize(), is(142L));
stateRestoreCallback.restoreBatch(singletonList(new ConsumerRecord<>("changelog-topic", 0, 3, 3, TimestampType.CREATE_TIME, -1, -1, "todelete".getBytes(UTF_8), null, new RecordHeaders(), Optional.empty())));
assertThat(buffer.numRecords(), is(2));
assertThat(buffer.minTimestamp(), is(1L));
assertThat(buffer.bufferSize(), is(95L));
assertThat(buffer.priorValueForBuffered("todelete"), is(Maybe.undefined()));
assertThat(buffer.priorValueForBuffered("asdf"), is(Maybe.defined(null)));
assertThat(buffer.priorValueForBuffered("zxcv"), is(Maybe.defined(ValueAndTimestamp.make("previous", -1))));
// flush the buffer into a list in buffer order so we can make assertions about the contents.
final List<Eviction<String, String>> evicted = new LinkedList<>();
buffer.evictWhile(() -> true, evicted::add);
// Several things to note:
// * The buffered records are ordered according to their buffer time (serialized in the value of the changelog)
// * The record timestamps are properly restored, and not conflated with the record's buffer time.
// * The keys and values are properly restored
// * The record topic is set to the original input topic, *not* the changelog topic
// * The record offset preserves the original input record's offset, *not* the offset of the changelog record
assertThat(evicted, is(asList(new Eviction<>("zxcv", new Change<>("next", "3o4im"), getContext(3L)), new Eviction<>("asdf", new Change<>("qwer", null), getContext(1L)))));
cleanup(context, buffer);
}
use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.
the class TimeOrderedKeyValueBufferTest method shouldFlush.
@Test
public void shouldFlush() {
final TimeOrderedKeyValueBuffer<String, String> buffer = bufferSupplier.apply(testName);
final MockInternalProcessorContext context = makeContext();
buffer.init((StateStoreContext) context, buffer);
putRecord(buffer, context, 2L, 0L, "asdf", "2093j");
putRecord(buffer, context, 1L, 1L, "zxcv", "3gon4i");
putRecord(buffer, context, 0L, 2L, "deleteme", "deadbeef");
// replace "deleteme" with a tombstone
buffer.evictWhile(() -> buffer.minTimestamp() < 1, kv -> {
});
// flush everything to the changelog
buffer.flush();
// the buffer should serialize the buffer time and the value as byte[],
// which we can't compare for equality using ProducerRecord.
// As a workaround, I'm deserializing them and shoving them in a KeyValue, just for ease of testing.
final List<ProducerRecord<String, KeyValue<Long, BufferValue>>> collected = ((MockRecordCollector) context.recordCollector()).collected().stream().map(pr -> {
final KeyValue<Long, BufferValue> niceValue;
if (pr.value() == null) {
niceValue = null;
} else {
final byte[] serializedValue = (byte[]) pr.value();
final ByteBuffer valueBuffer = ByteBuffer.wrap(serializedValue);
final BufferValue contextualRecord = BufferValue.deserialize(valueBuffer);
final long timestamp = valueBuffer.getLong();
niceValue = new KeyValue<>(timestamp, contextualRecord);
}
return new ProducerRecord<>(pr.topic(), pr.partition(), pr.timestamp(), pr.key().toString(), niceValue, pr.headers());
}).collect(Collectors.toList());
assertThat(collected, is(asList(new ProducerRecord<>(APP_ID + "-" + testName + "-changelog", // Producer will assign
0, null, "deleteme", null, new RecordHeaders()), new ProducerRecord<>(APP_ID + "-" + testName + "-changelog", 0, null, "zxcv", new KeyValue<>(1L, getBufferValue("3gon4i", 1)), CHANGELOG_HEADERS), new ProducerRecord<>(APP_ID + "-" + testName + "-changelog", 0, null, "asdf", new KeyValue<>(2L, getBufferValue("2093j", 0)), CHANGELOG_HEADERS))));
cleanup(context, buffer);
}
use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.
the class TimeOrderedKeyValueBufferTest method shouldRestoreV1Format.
@Test
public void shouldRestoreV1Format() {
final TimeOrderedKeyValueBuffer<String, String> buffer = bufferSupplier.apply(testName);
final MockInternalProcessorContext context = makeContext();
buffer.init((StateStoreContext) context, buffer);
final RecordBatchingStateRestoreCallback stateRestoreCallback = (RecordBatchingStateRestoreCallback) context.stateRestoreCallback(testName);
context.setRecordContext(new ProcessorRecordContext(0, 0, 0, "", new RecordHeaders()));
final RecordHeaders v1FlagHeaders = new RecordHeaders(new Header[] { new RecordHeader("v", new byte[] { (byte) 1 }) });
// These serialized formats were captured by running version 2.2 code.
// They verify that an upgrade from 2.2 will work.
// Do not change them.
final String toDeleteBinary = "00000000000000000000000000000000000000000000000000000005746F70696300000000FFFFFFFF0000000EFFFFFFFF00000006646F6F6D6564";
final String asdfBinary = "00000000000000020000000000000001000000000000000000000005746F70696300000000FFFFFFFF0000000CFFFFFFFF0000000471776572";
final String zxcvBinary1 = "00000000000000010000000000000002000000000000000000000005746F70696300000000FFFFFFFF000000150000000870726576696F757300000005336F34696D";
final String zxcvBinary2 = "00000000000000010000000000000003000000000000000000000005746F70696300000000FFFFFFFF0000001100000005336F34696D000000046E657874";
stateRestoreCallback.restoreBatch(asList(new ConsumerRecord<>("changelog-topic", 0, 0, 999, TimestampType.CREATE_TIME, -1, -1, "todelete".getBytes(UTF_8), hexStringToByteArray(toDeleteBinary), v1FlagHeaders, Optional.empty()), new ConsumerRecord<>("changelog-topic", 0, 1, 9999, TimestampType.CREATE_TIME, -1, -1, "asdf".getBytes(UTF_8), hexStringToByteArray(asdfBinary), v1FlagHeaders, Optional.empty()), new ConsumerRecord<>("changelog-topic", 0, 2, 99, TimestampType.CREATE_TIME, -1, -1, "zxcv".getBytes(UTF_8), hexStringToByteArray(zxcvBinary1), v1FlagHeaders, Optional.empty()), new ConsumerRecord<>("changelog-topic", 0, 3, 100, TimestampType.CREATE_TIME, -1, -1, "zxcv".getBytes(UTF_8), hexStringToByteArray(zxcvBinary2), v1FlagHeaders, Optional.empty())));
assertThat(buffer.numRecords(), is(3));
assertThat(buffer.minTimestamp(), is(0L));
assertThat(buffer.bufferSize(), is(142L));
stateRestoreCallback.restoreBatch(singletonList(new ConsumerRecord<>("changelog-topic", 0, 3, 3, TimestampType.CREATE_TIME, -1, -1, "todelete".getBytes(UTF_8), null, new RecordHeaders(), Optional.empty())));
assertThat(buffer.numRecords(), is(2));
assertThat(buffer.minTimestamp(), is(1L));
assertThat(buffer.bufferSize(), is(95L));
assertThat(buffer.priorValueForBuffered("todelete"), is(Maybe.undefined()));
assertThat(buffer.priorValueForBuffered("asdf"), is(Maybe.defined(null)));
assertThat(buffer.priorValueForBuffered("zxcv"), is(Maybe.defined(ValueAndTimestamp.make("previous", -1))));
// flush the buffer into a list in buffer order so we can make assertions about the contents.
final List<Eviction<String, String>> evicted = new LinkedList<>();
buffer.evictWhile(() -> true, evicted::add);
// Several things to note:
// * The buffered records are ordered according to their buffer time (serialized in the value of the changelog)
// * The record timestamps are properly restored, and not conflated with the record's buffer time.
// * The keys and values are properly restored
// * The record topic is set to the original input topic, *not* the changelog topic
// * The record offset preserves the original input record's offset, *not* the offset of the changelog record
assertThat(evicted, is(asList(new Eviction<>("zxcv", new Change<>("next", "3o4im"), getContext(3L)), new Eviction<>("asdf", new Change<>("qwer", null), getContext(1L)))));
cleanup(context, buffer);
}
use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.
the class TimeOrderedKeyValueBufferTest method shouldRestoreOldUnversionedFormat.
@Test
public void shouldRestoreOldUnversionedFormat() {
final TimeOrderedKeyValueBuffer<String, String> buffer = bufferSupplier.apply(testName);
final MockInternalProcessorContext context = makeContext();
buffer.init((StateStoreContext) context, buffer);
final RecordBatchingStateRestoreCallback stateRestoreCallback = (RecordBatchingStateRestoreCallback) context.stateRestoreCallback(testName);
context.setRecordContext(new ProcessorRecordContext(0, 0, 0, "", new RecordHeaders()));
// These serialized formats were captured by running version 2.1 code.
// They verify that an upgrade from 2.1 will work.
// Do not change them.
final String toDeleteBinaryValue = "0000000000000000FFFFFFFF00000006646F6F6D6564";
final String asdfBinaryValue = "0000000000000002FFFFFFFF0000000471776572";
final String zxcvBinaryValue1 = "00000000000000010000000870726576696F757300000005656F34696D";
final String zxcvBinaryValue2 = "000000000000000100000005656F34696D000000046E657874";
stateRestoreCallback.restoreBatch(asList(new ConsumerRecord<>("changelog-topic", 0, 0, 0, TimestampType.CREATE_TIME, -1, -1, "todelete".getBytes(UTF_8), hexStringToByteArray(toDeleteBinaryValue), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>("changelog-topic", 0, 1, 1, TimestampType.CREATE_TIME, -1, -1, "asdf".getBytes(UTF_8), hexStringToByteArray(asdfBinaryValue), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>("changelog-topic", 0, 2, 2, TimestampType.CREATE_TIME, -1, -1, "zxcv".getBytes(UTF_8), hexStringToByteArray(zxcvBinaryValue1), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>("changelog-topic", 0, 3, 3, TimestampType.CREATE_TIME, -1, -1, "zxcv".getBytes(UTF_8), hexStringToByteArray(zxcvBinaryValue2), new RecordHeaders(), Optional.empty())));
assertThat(buffer.numRecords(), is(3));
assertThat(buffer.minTimestamp(), is(0L));
assertThat(buffer.bufferSize(), is(172L));
stateRestoreCallback.restoreBatch(singletonList(new ConsumerRecord<>("changelog-topic", 0, 3, 3, TimestampType.CREATE_TIME, -1, -1, "todelete".getBytes(UTF_8), null, new RecordHeaders(), Optional.empty())));
assertThat(buffer.numRecords(), is(2));
assertThat(buffer.minTimestamp(), is(1L));
assertThat(buffer.bufferSize(), is(115L));
assertThat(buffer.priorValueForBuffered("todelete"), is(Maybe.undefined()));
assertThat(buffer.priorValueForBuffered("asdf"), is(Maybe.defined(null)));
assertThat(buffer.priorValueForBuffered("zxcv"), is(Maybe.defined(ValueAndTimestamp.make("previous", -1))));
// flush the buffer into a list in buffer order so we can make assertions about the contents.
final List<Eviction<String, String>> evicted = new LinkedList<>();
buffer.evictWhile(() -> true, evicted::add);
// Several things to note:
// * The buffered records are ordered according to their buffer time (serialized in the value of the changelog)
// * The record timestamps are properly restored, and not conflated with the record's buffer time.
// * The keys and values are properly restored
// * The record topic is set to the changelog topic. This was an oversight in the original implementation,
// which is fixed in changelog format v1. But upgraded applications still need to be able to handle the
// original format.
assertThat(evicted, is(asList(new Eviction<>("zxcv", new Change<>("next", "eo4im"), new ProcessorRecordContext(3L, 3, 0, "changelog-topic", new RecordHeaders())), new Eviction<>("asdf", new Change<>("qwer", null), new ProcessorRecordContext(1L, 1, 0, "changelog-topic", new RecordHeaders())))));
cleanup(context, buffer);
}
use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.
the class TimeOrderedKeyValueBufferTest method shouldNotRestoreUnrecognizedVersionRecord.
@Test
public void shouldNotRestoreUnrecognizedVersionRecord() {
final TimeOrderedKeyValueBuffer<String, String> buffer = bufferSupplier.apply(testName);
final MockInternalProcessorContext context = makeContext();
buffer.init((StateStoreContext) context, buffer);
final RecordBatchingStateRestoreCallback stateRestoreCallback = (RecordBatchingStateRestoreCallback) context.stateRestoreCallback(testName);
context.setRecordContext(new ProcessorRecordContext(0, 0, 0, "", new RecordHeaders()));
final RecordHeaders unknownFlagHeaders = new RecordHeaders(new Header[] { new RecordHeader("v", new byte[] { (byte) -1 }) });
final byte[] todeleteValue = getBufferValue("doomed", 0).serialize(0).array();
try {
stateRestoreCallback.restoreBatch(singletonList(new ConsumerRecord<>("changelog-topic", 0, 0, 999, TimestampType.CREATE_TIME, -1, -1, "todelete".getBytes(UTF_8), ByteBuffer.allocate(Long.BYTES + todeleteValue.length).putLong(0L).put(todeleteValue).array(), unknownFlagHeaders, Optional.empty())));
fail("expected an exception");
} catch (final IllegalArgumentException expected) {
// nothing to do.
} finally {
cleanup(context, buffer);
}
}
Aggregations