use of org.apache.kafka.streams.processor.internals.RecordBatchingStateRestoreCallback in project kafka by apache.
the class TimeOrderedKeyValueBufferTest method shouldRestoreOldUnversionedFormat.
@Test
public void shouldRestoreOldUnversionedFormat() {
final TimeOrderedKeyValueBuffer<String, String> buffer = bufferSupplier.apply(testName);
final MockInternalProcessorContext context = makeContext();
buffer.init((StateStoreContext) context, buffer);
final RecordBatchingStateRestoreCallback stateRestoreCallback = (RecordBatchingStateRestoreCallback) context.stateRestoreCallback(testName);
context.setRecordContext(new ProcessorRecordContext(0, 0, 0, "", new RecordHeaders()));
// These serialized formats were captured by running version 2.1 code.
// They verify that an upgrade from 2.1 will work.
// Do not change them.
final String toDeleteBinaryValue = "0000000000000000FFFFFFFF00000006646F6F6D6564";
final String asdfBinaryValue = "0000000000000002FFFFFFFF0000000471776572";
final String zxcvBinaryValue1 = "00000000000000010000000870726576696F757300000005656F34696D";
final String zxcvBinaryValue2 = "000000000000000100000005656F34696D000000046E657874";
stateRestoreCallback.restoreBatch(asList(new ConsumerRecord<>("changelog-topic", 0, 0, 0, TimestampType.CREATE_TIME, -1, -1, "todelete".getBytes(UTF_8), hexStringToByteArray(toDeleteBinaryValue), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>("changelog-topic", 0, 1, 1, TimestampType.CREATE_TIME, -1, -1, "asdf".getBytes(UTF_8), hexStringToByteArray(asdfBinaryValue), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>("changelog-topic", 0, 2, 2, TimestampType.CREATE_TIME, -1, -1, "zxcv".getBytes(UTF_8), hexStringToByteArray(zxcvBinaryValue1), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>("changelog-topic", 0, 3, 3, TimestampType.CREATE_TIME, -1, -1, "zxcv".getBytes(UTF_8), hexStringToByteArray(zxcvBinaryValue2), new RecordHeaders(), Optional.empty())));
assertThat(buffer.numRecords(), is(3));
assertThat(buffer.minTimestamp(), is(0L));
assertThat(buffer.bufferSize(), is(172L));
stateRestoreCallback.restoreBatch(singletonList(new ConsumerRecord<>("changelog-topic", 0, 3, 3, TimestampType.CREATE_TIME, -1, -1, "todelete".getBytes(UTF_8), null, new RecordHeaders(), Optional.empty())));
assertThat(buffer.numRecords(), is(2));
assertThat(buffer.minTimestamp(), is(1L));
assertThat(buffer.bufferSize(), is(115L));
assertThat(buffer.priorValueForBuffered("todelete"), is(Maybe.undefined()));
assertThat(buffer.priorValueForBuffered("asdf"), is(Maybe.defined(null)));
assertThat(buffer.priorValueForBuffered("zxcv"), is(Maybe.defined(ValueAndTimestamp.make("previous", -1))));
// flush the buffer into a list in buffer order so we can make assertions about the contents.
final List<Eviction<String, String>> evicted = new LinkedList<>();
buffer.evictWhile(() -> true, evicted::add);
// Several things to note:
// * The buffered records are ordered according to their buffer time (serialized in the value of the changelog)
// * The record timestamps are properly restored, and not conflated with the record's buffer time.
// * The keys and values are properly restored
// * The record topic is set to the changelog topic. This was an oversight in the original implementation,
// which is fixed in changelog format v1. But upgraded applications still need to be able to handle the
// original format.
assertThat(evicted, is(asList(new Eviction<>("zxcv", new Change<>("next", "eo4im"), new ProcessorRecordContext(3L, 3, 0, "changelog-topic", new RecordHeaders())), new Eviction<>("asdf", new Change<>("qwer", null), new ProcessorRecordContext(1L, 1, 0, "changelog-topic", new RecordHeaders())))));
cleanup(context, buffer);
}
use of org.apache.kafka.streams.processor.internals.RecordBatchingStateRestoreCallback in project kafka by apache.
the class TimeOrderedKeyValueBufferTest method shouldNotRestoreUnrecognizedVersionRecord.
@Test
public void shouldNotRestoreUnrecognizedVersionRecord() {
final TimeOrderedKeyValueBuffer<String, String> buffer = bufferSupplier.apply(testName);
final MockInternalProcessorContext context = makeContext();
buffer.init((StateStoreContext) context, buffer);
final RecordBatchingStateRestoreCallback stateRestoreCallback = (RecordBatchingStateRestoreCallback) context.stateRestoreCallback(testName);
context.setRecordContext(new ProcessorRecordContext(0, 0, 0, "", new RecordHeaders()));
final RecordHeaders unknownFlagHeaders = new RecordHeaders(new Header[] { new RecordHeader("v", new byte[] { (byte) -1 }) });
final byte[] todeleteValue = getBufferValue("doomed", 0).serialize(0).array();
try {
stateRestoreCallback.restoreBatch(singletonList(new ConsumerRecord<>("changelog-topic", 0, 0, 999, TimestampType.CREATE_TIME, -1, -1, "todelete".getBytes(UTF_8), ByteBuffer.allocate(Long.BYTES + todeleteValue.length).putLong(0L).put(todeleteValue).array(), unknownFlagHeaders, Optional.empty())));
fail("expected an exception");
} catch (final IllegalArgumentException expected) {
// nothing to do.
} finally {
cleanup(context, buffer);
}
}
use of org.apache.kafka.streams.processor.internals.RecordBatchingStateRestoreCallback in project kafka by apache.
the class InternalMockProcessorContext method restore.
public void restore(final String storeName, final Iterable<KeyValue<byte[], byte[]>> changeLog) {
final RecordBatchingStateRestoreCallback restoreCallback = adapt(restoreFuncs.get(storeName));
final List<ConsumerRecord<byte[], byte[]>> records = new ArrayList<>();
for (final KeyValue<byte[], byte[]> keyValue : changeLog) {
records.add(new ConsumerRecord<>("", 0, 0L, keyValue.key, keyValue.value));
}
restoreCallback.restoreBatch(records);
}
use of org.apache.kafka.streams.processor.internals.RecordBatchingStateRestoreCallback in project kafka by apache.
the class TimeOrderedKeyValueBufferTest method shouldRestoreV2Format.
@Test
public void shouldRestoreV2Format() {
final TimeOrderedKeyValueBuffer<String, String> buffer = bufferSupplier.apply(testName);
final MockInternalProcessorContext context = makeContext();
buffer.init((StateStoreContext) context, buffer);
final RecordBatchingStateRestoreCallback stateRestoreCallback = (RecordBatchingStateRestoreCallback) context.stateRestoreCallback(testName);
context.setRecordContext(new ProcessorRecordContext(0, 0, 0, "", new RecordHeaders()));
final RecordHeaders v2FlagHeaders = new RecordHeaders(new Header[] { new RecordHeader("v", new byte[] { (byte) 2 }) });
// These serialized formats were captured by running version 2.3 code.
// They verify that an upgrade from 2.3 will work.
// Do not change them.
final String toDeleteBinary = "0000000000000000000000000000000000000005746F70696300000000FFFFFFFF0000000EFFFFFFFF00000006646F6F6D6564FFFFFFFF0000000000000000";
final String asdfBinary = "0000000000000001000000000000000000000005746F70696300000000FFFFFFFF0000000CFFFFFFFF0000000471776572FFFFFFFF0000000000000002";
final String zxcvBinary1 = "0000000000000002000000000000000000000005746F70696300000000FFFFFFFF000000140000000749474E4F52454400000005336F34696D0000000870726576696F75730000000000000001";
final String zxcvBinary2 = "0000000000000003000000000000000000000005746F70696300000000FFFFFFFF0000001100000005336F34696D000000046E6578740000000870726576696F75730000000000000001";
stateRestoreCallback.restoreBatch(asList(new ConsumerRecord<>("changelog-topic", 0, 0, 999, TimestampType.CREATE_TIME, -1, -1, "todelete".getBytes(UTF_8), hexStringToByteArray(toDeleteBinary), v2FlagHeaders, Optional.empty()), new ConsumerRecord<>("changelog-topic", 0, 1, 9999, TimestampType.CREATE_TIME, -1, -1, "asdf".getBytes(UTF_8), hexStringToByteArray(asdfBinary), v2FlagHeaders, Optional.empty()), new ConsumerRecord<>("changelog-topic", 0, 2, 99, TimestampType.CREATE_TIME, -1, -1, "zxcv".getBytes(UTF_8), hexStringToByteArray(zxcvBinary1), v2FlagHeaders, Optional.empty()), new ConsumerRecord<>("changelog-topic", 0, 2, 100, TimestampType.CREATE_TIME, -1, -1, "zxcv".getBytes(UTF_8), hexStringToByteArray(zxcvBinary2), v2FlagHeaders, Optional.empty())));
assertThat(buffer.numRecords(), is(3));
assertThat(buffer.minTimestamp(), is(0L));
assertThat(buffer.bufferSize(), is(142L));
stateRestoreCallback.restoreBatch(singletonList(new ConsumerRecord<>("changelog-topic", 0, 3, 3, TimestampType.CREATE_TIME, -1, -1, "todelete".getBytes(UTF_8), null, new RecordHeaders(), Optional.empty())));
assertThat(buffer.numRecords(), is(2));
assertThat(buffer.minTimestamp(), is(1L));
assertThat(buffer.bufferSize(), is(95L));
assertThat(buffer.priorValueForBuffered("todelete"), is(Maybe.undefined()));
assertThat(buffer.priorValueForBuffered("asdf"), is(Maybe.defined(null)));
assertThat(buffer.priorValueForBuffered("zxcv"), is(Maybe.defined(ValueAndTimestamp.make("previous", -1))));
// flush the buffer into a list in buffer order so we can make assertions about the contents.
final List<Eviction<String, String>> evicted = new LinkedList<>();
buffer.evictWhile(() -> true, evicted::add);
// Several things to note:
// * The buffered records are ordered according to their buffer time (serialized in the value of the changelog)
// * The record timestamps are properly restored, and not conflated with the record's buffer time.
// * The keys and values are properly restored
// * The record topic is set to the original input topic, *not* the changelog topic
// * The record offset preserves the original input record's offset, *not* the offset of the changelog record
assertThat(evicted, is(asList(new Eviction<>("zxcv", new Change<>("next", "3o4im"), getContext(3L)), new Eviction<>("asdf", new Change<>("qwer", null), getContext(1L)))));
cleanup(context, buffer);
}
use of org.apache.kafka.streams.processor.internals.RecordBatchingStateRestoreCallback in project kafka by apache.
the class RocksDBStore method init.
@Override
public void init(final StateStoreContext context, final StateStore root) {
// open the DB dir
metricsRecorder.init(getMetricsImpl(context), context.taskId());
openDB(context.appConfigs(), context.stateDir());
final File positionCheckpointFile = new File(context.stateDir(), name() + ".position");
this.positionCheckpoint = new OffsetCheckpoint(positionCheckpointFile);
this.position = StoreQueryUtils.readPositionFromCheckpoint(positionCheckpoint);
// value getter should always read directly from rocksDB
// since it is only for values that are already flushed
this.context = context;
context.register(root, (RecordBatchingStateRestoreCallback) this::restoreBatch, () -> StoreQueryUtils.checkpointPosition(positionCheckpoint, position));
consistencyEnabled = StreamsConfig.InternalConfig.getBoolean(context.appConfigs(), IQ_CONSISTENCY_OFFSET_VECTOR_ENABLED, false);
}
Aggregations