use of org.apache.kafka.streams.query.Position in project kafka by apache.
the class RocksDBStoreTest method createChangelogRecord.
private ConsumerRecord<byte[], byte[]> createChangelogRecord(final byte[] key, final byte[] value, final String topic, final int partition, final long offset) {
final Headers headers = new RecordHeaders();
Position position = Position.emptyPosition();
position = position.withComponent(topic, partition, offset);
headers.add(ChangelogRecordDeserializationHelper.CHANGELOG_VERSION_HEADER_RECORD_CONSISTENCY);
headers.add(new RecordHeader(ChangelogRecordDeserializationHelper.CHANGELOG_POSITION_HEADER_KEY, PositionSerde.serialize(position).array()));
return new ConsumerRecord<>("", 0, 0L, RecordBatch.NO_TIMESTAMP, TimestampType.NO_TIMESTAMP_TYPE, -1, -1, key, value, headers, Optional.empty());
}
use of org.apache.kafka.streams.query.Position in project kafka by apache.
the class RocksDBStoreTest method shouldMatchPositionAfterPut.
@Test
public void shouldMatchPositionAfterPut() {
rocksDBStore.init((StateStoreContext) context, rocksDBStore);
context.setRecordContext(new ProcessorRecordContext(0, 1, 0, "", new RecordHeaders()));
rocksDBStore.put(new Bytes(stringSerializer.serialize(null, "one")), stringSerializer.serialize(null, "A"));
context.setRecordContext(new ProcessorRecordContext(0, 2, 0, "", new RecordHeaders()));
rocksDBStore.put(new Bytes(stringSerializer.serialize(null, "two")), stringSerializer.serialize(null, "B"));
context.setRecordContext(new ProcessorRecordContext(0, 3, 0, "", new RecordHeaders()));
rocksDBStore.put(new Bytes(stringSerializer.serialize(null, "three")), stringSerializer.serialize(null, "C"));
final Position expected = Position.fromMap(mkMap(mkEntry("", mkMap(mkEntry(0, 3L)))));
final Position actual = rocksDBStore.getPosition();
assertEquals(expected, actual);
}
use of org.apache.kafka.streams.query.Position in project kafka by apache.
the class WriteConsistencyVectorTest method shouldSendConsistencyVectorToChangelogTopic.
@Test
public void shouldSendConsistencyVectorToChangelogTopic() {
final Position position = Position.emptyPosition();
position.withComponent(INPUT_TOPIC_NAME, INPUT_PARTITION, INPUT_OFFSET);
context.setRecordContext(new ProcessorRecordContext(-1, INPUT_OFFSET, INPUT_PARTITION, INPUT_TOPIC_NAME, new RecordHeaders()));
final Headers headers = new RecordHeaders();
headers.add(ChangelogRecordDeserializationHelper.CHANGELOG_VERSION_HEADER_RECORD_CONSISTENCY);
headers.add(new RecordHeader(ChangelogRecordDeserializationHelper.CHANGELOG_POSITION_HEADER_KEY, PositionSerde.serialize(position).array()));
recordCollector.send(CHANGELOG_PARTITION.topic(), KEY_BYTES, VALUE_BYTES, headers, CHANGELOG_PARTITION.partition(), TIMESTAMP, BYTES_KEY_SERIALIZER, BYTEARRAY_VALUE_SERIALIZER);
final StreamTask task = EasyMock.createNiceMock(StreamTask.class);
replay(recordCollector, task);
context.transitionToActive(task, recordCollector, null);
context.logChange(REGISTERED_STORE_NAME, KEY_BYTES, VALUE_BYTES, TIMESTAMP, position);
verify(recordCollector);
}
use of org.apache.kafka.streams.query.Position in project ksql by confluentinc.
the class KsMaterializedTableIQv2Test method getTestPosition.
private static Position getTestPosition() {
final Position position = Position.emptyPosition();
position.withComponent(TOPIC, PARTITION, OFFSET);
Position.fromMap(ImmutableMap.of(TOPIC, ImmutableMap.of(PARTITION, OFFSET)));
return position;
}
use of org.apache.kafka.streams.query.Position in project ksql by confluentinc.
the class KsMaterializedWindowTable method get.
public KsMaterializedQueryResult<WindowedRow> get(final int partition, final Range<Instant> windowStartBounds, final Range<Instant> windowEndBounds, final Optional<Position> position) {
try {
final ReadOnlyWindowStore<GenericKey, ValueAndTimestamp<GenericRow>> store = stateStore.store(QueryableStoreTypes.timestampedWindowStore(), partition);
final Instant lower = calculateLowerBound(windowStartBounds, windowEndBounds);
final Instant upper = calculateUpperBound(windowStartBounds, windowEndBounds);
final KeyValueIterator<Windowed<GenericKey>, ValueAndTimestamp<GenericRow>> iterator = cacheBypassFetcherAll.fetchAll(store, lower, upper);
return KsMaterializedQueryResult.rowIterator(Streams.stream(IteratorUtil.onComplete(iterator, iterator::close)).map(next -> {
final Instant windowStart = next.key.window().startTime();
if (!windowStartBounds.contains(windowStart)) {
return null;
}
final Instant windowEnd = next.key.window().endTime();
if (!windowEndBounds.contains(windowEnd)) {
return null;
}
final TimeWindow window = new TimeWindow(windowStart.toEpochMilli(), windowEnd.toEpochMilli());
final WindowedRow row = WindowedRow.of(stateStore.schema(), new Windowed<>(next.key.key(), window), next.value.value(), next.value.timestamp());
return row;
}).filter(Objects::nonNull).iterator());
} catch (final Exception e) {
throw new MaterializationException("Failed to scan materialized table", e);
}
}
Aggregations