Search in sources :

Example 26 with Position

use of org.apache.kafka.streams.query.Position in project kafka by apache.

the class RocksDBStoreTest method createChangelogRecord.

private ConsumerRecord<byte[], byte[]> createChangelogRecord(final byte[] key, final byte[] value, final String topic, final int partition, final long offset) {
    final Headers headers = new RecordHeaders();
    Position position = Position.emptyPosition();
    position = position.withComponent(topic, partition, offset);
    headers.add(ChangelogRecordDeserializationHelper.CHANGELOG_VERSION_HEADER_RECORD_CONSISTENCY);
    headers.add(new RecordHeader(ChangelogRecordDeserializationHelper.CHANGELOG_POSITION_HEADER_KEY, PositionSerde.serialize(position).array()));
    return new ConsumerRecord<>("", 0, 0L, RecordBatch.NO_TIMESTAMP, TimestampType.NO_TIMESTAMP_TYPE, -1, -1, key, value, headers, Optional.empty());
}
Also used : RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) Position(org.apache.kafka.streams.query.Position) Headers(org.apache.kafka.common.header.Headers) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord)

Example 27 with Position

use of org.apache.kafka.streams.query.Position in project kafka by apache.

the class RocksDBStoreTest method shouldMatchPositionAfterPut.

@Test
public void shouldMatchPositionAfterPut() {
    rocksDBStore.init((StateStoreContext) context, rocksDBStore);
    context.setRecordContext(new ProcessorRecordContext(0, 1, 0, "", new RecordHeaders()));
    rocksDBStore.put(new Bytes(stringSerializer.serialize(null, "one")), stringSerializer.serialize(null, "A"));
    context.setRecordContext(new ProcessorRecordContext(0, 2, 0, "", new RecordHeaders()));
    rocksDBStore.put(new Bytes(stringSerializer.serialize(null, "two")), stringSerializer.serialize(null, "B"));
    context.setRecordContext(new ProcessorRecordContext(0, 3, 0, "", new RecordHeaders()));
    rocksDBStore.put(new Bytes(stringSerializer.serialize(null, "three")), stringSerializer.serialize(null, "C"));
    final Position expected = Position.fromMap(mkMap(mkEntry("", mkMap(mkEntry(0, 3L)))));
    final Position actual = rocksDBStore.getPosition();
    assertEquals(expected, actual);
}
Also used : Bytes(org.apache.kafka.common.utils.Bytes) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) ProcessorRecordContext(org.apache.kafka.streams.processor.internals.ProcessorRecordContext) Position(org.apache.kafka.streams.query.Position) Test(org.junit.Test)

Example 28 with Position

use of org.apache.kafka.streams.query.Position in project kafka by apache.

the class WriteConsistencyVectorTest method shouldSendConsistencyVectorToChangelogTopic.

@Test
public void shouldSendConsistencyVectorToChangelogTopic() {
    final Position position = Position.emptyPosition();
    position.withComponent(INPUT_TOPIC_NAME, INPUT_PARTITION, INPUT_OFFSET);
    context.setRecordContext(new ProcessorRecordContext(-1, INPUT_OFFSET, INPUT_PARTITION, INPUT_TOPIC_NAME, new RecordHeaders()));
    final Headers headers = new RecordHeaders();
    headers.add(ChangelogRecordDeserializationHelper.CHANGELOG_VERSION_HEADER_RECORD_CONSISTENCY);
    headers.add(new RecordHeader(ChangelogRecordDeserializationHelper.CHANGELOG_POSITION_HEADER_KEY, PositionSerde.serialize(position).array()));
    recordCollector.send(CHANGELOG_PARTITION.topic(), KEY_BYTES, VALUE_BYTES, headers, CHANGELOG_PARTITION.partition(), TIMESTAMP, BYTES_KEY_SERIALIZER, BYTEARRAY_VALUE_SERIALIZER);
    final StreamTask task = EasyMock.createNiceMock(StreamTask.class);
    replay(recordCollector, task);
    context.transitionToActive(task, recordCollector, null);
    context.logChange(REGISTERED_STORE_NAME, KEY_BYTES, VALUE_BYTES, TIMESTAMP, position);
    verify(recordCollector);
}
Also used : RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) Position(org.apache.kafka.streams.query.Position) Headers(org.apache.kafka.common.header.Headers) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) Test(org.junit.Test)

Example 29 with Position

use of org.apache.kafka.streams.query.Position in project ksql by confluentinc.

the class KsMaterializedTableIQv2Test method getTestPosition.

private static Position getTestPosition() {
    final Position position = Position.emptyPosition();
    position.withComponent(TOPIC, PARTITION, OFFSET);
    Position.fromMap(ImmutableMap.of(TOPIC, ImmutableMap.of(PARTITION, OFFSET)));
    return position;
}
Also used : Position(org.apache.kafka.streams.query.Position)

Example 30 with Position

use of org.apache.kafka.streams.query.Position in project ksql by confluentinc.

the class KsMaterializedWindowTable method get.

public KsMaterializedQueryResult<WindowedRow> get(final int partition, final Range<Instant> windowStartBounds, final Range<Instant> windowEndBounds, final Optional<Position> position) {
    try {
        final ReadOnlyWindowStore<GenericKey, ValueAndTimestamp<GenericRow>> store = stateStore.store(QueryableStoreTypes.timestampedWindowStore(), partition);
        final Instant lower = calculateLowerBound(windowStartBounds, windowEndBounds);
        final Instant upper = calculateUpperBound(windowStartBounds, windowEndBounds);
        final KeyValueIterator<Windowed<GenericKey>, ValueAndTimestamp<GenericRow>> iterator = cacheBypassFetcherAll.fetchAll(store, lower, upper);
        return KsMaterializedQueryResult.rowIterator(Streams.stream(IteratorUtil.onComplete(iterator, iterator::close)).map(next -> {
            final Instant windowStart = next.key.window().startTime();
            if (!windowStartBounds.contains(windowStart)) {
                return null;
            }
            final Instant windowEnd = next.key.window().endTime();
            if (!windowEndBounds.contains(windowEnd)) {
                return null;
            }
            final TimeWindow window = new TimeWindow(windowStart.toEpochMilli(), windowEnd.toEpochMilli());
            final WindowedRow row = WindowedRow.of(stateStore.schema(), new Windowed<>(next.key.key(), window), next.value.value(), next.value.timestamp());
            return row;
        }).filter(Objects::nonNull).iterator());
    } catch (final Exception e) {
        throw new MaterializationException("Failed to scan materialized table", e);
    }
}
Also used : ValueAndTimestamp(org.apache.kafka.streams.state.ValueAndTimestamp) Windowed(org.apache.kafka.streams.kstream.Windowed) ReadOnlyWindowStore(org.apache.kafka.streams.state.ReadOnlyWindowStore) MaterializationException(io.confluent.ksql.execution.streams.materialization.MaterializationException) ValueAndTimestamp(org.apache.kafka.streams.state.ValueAndTimestamp) WindowStoreCacheBypassFetcherAll(io.confluent.ksql.execution.streams.materialization.ks.WindowStoreCacheBypass.WindowStoreCacheBypassFetcherAll) IteratorUtil(io.confluent.ksql.util.IteratorUtil) ImmutableList(com.google.common.collect.ImmutableList) WindowedRow(io.confluent.ksql.execution.streams.materialization.WindowedRow) Windowed(org.apache.kafka.streams.kstream.Windowed) Duration(java.time.Duration) WindowStoreCacheBypassFetcherRange(io.confluent.ksql.execution.streams.materialization.ks.WindowStoreCacheBypass.WindowStoreCacheBypassFetcherRange) Position(org.apache.kafka.streams.query.Position) Range(com.google.common.collect.Range) KeyValue(org.apache.kafka.streams.KeyValue) MaterializedWindowedTable(io.confluent.ksql.execution.streams.materialization.MaterializedWindowedTable) Streams(com.google.common.collect.Streams) Instant(java.time.Instant) StreamsMaterializedWindowedTable(io.confluent.ksql.execution.streams.materialization.StreamsMaterializedWindowedTable) QueryableStoreTypes(org.apache.kafka.streams.state.QueryableStoreTypes) Objects(java.util.Objects) KeyValueIterator(org.apache.kafka.streams.state.KeyValueIterator) WindowStoreIterator(org.apache.kafka.streams.state.WindowStoreIterator) GenericRow(io.confluent.ksql.GenericRow) Optional(java.util.Optional) GenericKey(io.confluent.ksql.GenericKey) Builder(com.google.common.collect.ImmutableList.Builder) TimeWindow(org.apache.kafka.streams.kstream.internals.TimeWindow) WindowStoreCacheBypassFetcher(io.confluent.ksql.execution.streams.materialization.ks.WindowStoreCacheBypass.WindowStoreCacheBypassFetcher) Instant(java.time.Instant) Objects(java.util.Objects) GenericKey(io.confluent.ksql.GenericKey) WindowedRow(io.confluent.ksql.execution.streams.materialization.WindowedRow) TimeWindow(org.apache.kafka.streams.kstream.internals.TimeWindow) MaterializationException(io.confluent.ksql.execution.streams.materialization.MaterializationException) MaterializationException(io.confluent.ksql.execution.streams.materialization.MaterializationException)

Aggregations

Position (org.apache.kafka.streams.query.Position)33 RecordHeaders (org.apache.kafka.common.header.internals.RecordHeaders)17 Test (org.junit.Test)17 ProcessorRecordContext (org.apache.kafka.streams.processor.internals.ProcessorRecordContext)11 Windowed (org.apache.kafka.streams.kstream.Windowed)9 Objects (java.util.Objects)7 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)7 Bytes (org.apache.kafka.common.utils.Bytes)7 QueryResult (org.apache.kafka.streams.query.QueryResult)7 KeyValueIterator (org.apache.kafka.streams.state.KeyValueIterator)7 Headers (org.apache.kafka.common.header.Headers)6 PositionBound (org.apache.kafka.streams.query.PositionBound)6 RecordHeader (org.apache.kafka.common.header.internals.RecordHeader)5 KeyValue (org.apache.kafka.streams.KeyValue)5 ValueAndTimestamp (org.apache.kafka.streams.state.ValueAndTimestamp)5 ImmutableList (com.google.common.collect.ImmutableList)4 Streams (com.google.common.collect.Streams)4 GenericKey (io.confluent.ksql.GenericKey)4 GenericRow (io.confluent.ksql.GenericRow)4 MaterializationException (io.confluent.ksql.execution.streams.materialization.MaterializationException)4