Search in sources :

Example 16 with Position

use of org.apache.kafka.streams.query.Position in project kafka by apache.

the class InMemoryWindowStore method put.

@Override
public void put(final Bytes key, final byte[] value, final long windowStartTimestamp) {
    removeExpiredSegments();
    observedStreamTime = Math.max(observedStreamTime, windowStartTimestamp);
    if (windowStartTimestamp <= observedStreamTime - retentionPeriod) {
        expiredRecordSensor.record(1.0d, ProcessorContextUtils.currentSystemTime(context));
        LOG.warn("Skipping record for expired segment.");
    } else {
        if (value != null) {
            maybeUpdateSeqnumForDups();
            final Bytes keyBytes = retainDuplicates ? wrapForDups(key, seqnum) : key;
            segmentMap.computeIfAbsent(windowStartTimestamp, t -> new ConcurrentSkipListMap<>());
            segmentMap.get(windowStartTimestamp).put(keyBytes, value);
        } else if (!retainDuplicates) {
            // Skip if value is null and duplicates are allowed since this delete is a no-op
            segmentMap.computeIfPresent(windowStartTimestamp, (t, kvMap) -> {
                kvMap.remove(key);
                if (kvMap.isEmpty()) {
                    segmentMap.remove(windowStartTimestamp);
                }
                return kvMap;
            });
        }
    }
    StoreQueryUtils.updatePosition(position, stateStoreContext);
}
Also used : StreamsConfig(org.apache.kafka.streams.StreamsConfig) TaskMetrics(org.apache.kafka.streams.processor.internals.metrics.TaskMetrics) LoggerFactory(org.slf4j.LoggerFactory) PositionBound(org.apache.kafka.streams.query.PositionBound) RecordBatchingStateRestoreCallback(org.apache.kafka.streams.processor.internals.RecordBatchingStateRestoreCallback) WindowStore(org.apache.kafka.streams.state.WindowStore) ByteBuffer(java.nio.ByteBuffer) StoreToProcessorContextAdapter(org.apache.kafka.streams.processor.internals.StoreToProcessorContextAdapter) StateStoreContext(org.apache.kafka.streams.processor.StateStoreContext) Windowed(org.apache.kafka.streams.kstream.Windowed) Map(java.util.Map) ProcessorContextUtils(org.apache.kafka.streams.processor.internals.ProcessorContextUtils) StreamsMetricsImpl(org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl) NoSuchElementException(java.util.NoSuchElementException) QueryResult(org.apache.kafka.streams.query.QueryResult) Sensor(org.apache.kafka.common.metrics.Sensor) QueryConfig(org.apache.kafka.streams.query.QueryConfig) Logger(org.slf4j.Logger) Iterator(java.util.Iterator) Position(org.apache.kafka.streams.query.Position) Query(org.apache.kafka.streams.query.Query) WindowKeySchema.extractStoreTimestamp(org.apache.kafka.streams.state.internals.WindowKeySchema.extractStoreTimestamp) ConcurrentNavigableMap(java.util.concurrent.ConcurrentNavigableMap) KeyValue(org.apache.kafka.streams.KeyValue) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Set(java.util.Set) ChangelogRecordDeserializationHelper(org.apache.kafka.streams.processor.internals.ChangelogRecordDeserializationHelper) Bytes(org.apache.kafka.common.utils.Bytes) Objects(java.util.Objects) ProcessorContext(org.apache.kafka.streams.processor.ProcessorContext) KeyValueIterator(org.apache.kafka.streams.state.KeyValueIterator) ConcurrentSkipListMap(java.util.concurrent.ConcurrentSkipListMap) WindowKeySchema.extractStoreKeyBytes(org.apache.kafka.streams.state.internals.WindowKeySchema.extractStoreKeyBytes) StateStore(org.apache.kafka.streams.processor.StateStore) WindowStoreIterator(org.apache.kafka.streams.state.WindowStoreIterator) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) TimeWindow(org.apache.kafka.streams.kstream.internals.TimeWindow) IQ_CONSISTENCY_OFFSET_VECTOR_ENABLED(org.apache.kafka.streams.StreamsConfig.InternalConfig.IQ_CONSISTENCY_OFFSET_VECTOR_ENABLED) Bytes(org.apache.kafka.common.utils.Bytes) WindowKeySchema.extractStoreKeyBytes(org.apache.kafka.streams.state.internals.WindowKeySchema.extractStoreKeyBytes)

Example 17 with Position

use of org.apache.kafka.streams.query.Position in project kafka by apache.

the class CachingKeyValueStore method getPosition.

@Override
public Position getPosition() {
    // We return the merged position since the query uses the merged position as well
    final Position mergedPosition = Position.emptyPosition();
    mergedPosition.merge(position);
    mergedPosition.merge(wrapped().getPosition());
    return mergedPosition;
}
Also used : Position(org.apache.kafka.streams.query.Position)

Example 18 with Position

use of org.apache.kafka.streams.query.Position in project kafka by apache.

the class CachingKeyValueStore method query.

@SuppressWarnings("unchecked")
@Override
public <R> QueryResult<R> query(final Query<R> query, final PositionBound positionBound, final QueryConfig config) {
    final long start = config.isCollectExecutionInfo() ? System.nanoTime() : -1L;
    final QueryResult<R> result;
    final CacheQueryHandler handler = queryHandlers.get(query.getClass());
    if (handler == null) {
        result = wrapped().query(query, positionBound, config);
    } else {
        final int partition = context.taskId().partition();
        final Lock lock = this.lock.readLock();
        lock.lock();
        try {
            validateStoreOpen();
            final Position mergedPosition = getPosition();
            // We use the merged position since the cache and the store may be at different positions
            if (!StoreQueryUtils.isPermitted(mergedPosition, positionBound, partition)) {
                result = QueryResult.notUpToBound(mergedPosition, positionBound, partition);
            } else {
                result = (QueryResult<R>) handler.apply(query, mergedPosition, positionBound, config, this);
            }
        } finally {
            lock.unlock();
        }
    }
    if (config.isCollectExecutionInfo()) {
        result.addExecutionInfo("Handled in " + getClass() + " in " + (System.nanoTime() - start) + "ns");
    }
    return result;
}
Also used : Position(org.apache.kafka.streams.query.Position) ReentrantReadWriteLock(java.util.concurrent.locks.ReentrantReadWriteLock) ReadWriteLock(java.util.concurrent.locks.ReadWriteLock) Lock(java.util.concurrent.locks.Lock)

Example 19 with Position

use of org.apache.kafka.streams.query.Position in project kafka by apache.

the class CachingInMemoryKeyValueStoreTest method shouldMatchPositionAfterPut.

@Test
public void shouldMatchPositionAfterPut() {
    context.setRecordContext(new ProcessorRecordContext(0, 1, 0, "", new RecordHeaders()));
    store.put(bytesKey("key1"), bytesValue("value1"));
    context.setRecordContext(new ProcessorRecordContext(0, 2, 0, "", new RecordHeaders()));
    store.put(bytesKey("key2"), bytesValue("value2"));
    context.setRecordContext(new ProcessorRecordContext(0, 3, 0, "", new RecordHeaders()));
    store.put(bytesKey("key3"), bytesValue("value3"));
    final Position expected = Position.fromMap(mkMap(mkEntry("", mkMap(mkEntry(0, 3L)))));
    final Position actual = store.getPosition();
    assertEquals(expected, actual);
}
Also used : RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) ProcessorRecordContext(org.apache.kafka.streams.processor.internals.ProcessorRecordContext) Position(org.apache.kafka.streams.query.Position) Test(org.junit.Test)

Example 20 with Position

use of org.apache.kafka.streams.query.Position in project kafka by apache.

the class AbstractRocksDBSegmentedBytesStoreTest method getChangelogRecords.

private List<ConsumerRecord<byte[], byte[]>> getChangelogRecords() {
    final List<ConsumerRecord<byte[], byte[]>> records = new ArrayList<>();
    final Headers headers = new RecordHeaders();
    Position position1 = Position.emptyPosition();
    position1 = position1.withComponent("", 0, 1);
    headers.add(ChangelogRecordDeserializationHelper.CHANGELOG_VERSION_HEADER_RECORD_CONSISTENCY);
    headers.add(new RecordHeader(ChangelogRecordDeserializationHelper.CHANGELOG_POSITION_HEADER_KEY, PositionSerde.serialize(position1).array()));
    records.add(new ConsumerRecord<>("", 0, 0L, RecordBatch.NO_TIMESTAMP, TimestampType.NO_TIMESTAMP_TYPE, -1, -1, serializeKey(new Windowed<>("a", windows[0])).get(), serializeValue(50L), headers, Optional.empty()));
    headers.remove(ChangelogRecordDeserializationHelper.CHANGELOG_POSITION_HEADER_KEY);
    position1 = position1.withComponent("", 0, 2);
    headers.add(new RecordHeader(ChangelogRecordDeserializationHelper.CHANGELOG_POSITION_HEADER_KEY, PositionSerde.serialize(position1).array()));
    records.add(new ConsumerRecord<>("", 0, 0L, RecordBatch.NO_TIMESTAMP, TimestampType.NO_TIMESTAMP_TYPE, -1, -1, serializeKey(new Windowed<>("a", windows[2])).get(), serializeValue(100L), headers, Optional.empty()));
    headers.remove(ChangelogRecordDeserializationHelper.CHANGELOG_POSITION_HEADER_KEY);
    position1 = position1.withComponent("", 0, 3);
    headers.add(new RecordHeader(ChangelogRecordDeserializationHelper.CHANGELOG_POSITION_HEADER_KEY, PositionSerde.serialize(position1).array()));
    records.add(new ConsumerRecord<>("", 0, 0L, RecordBatch.NO_TIMESTAMP, TimestampType.NO_TIMESTAMP_TYPE, -1, -1, serializeKey(new Windowed<>("a", windows[3])).get(), serializeValue(200L), headers, Optional.empty()));
    return records;
}
Also used : Windowed(org.apache.kafka.streams.kstream.Windowed) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) Position(org.apache.kafka.streams.query.Position) Headers(org.apache.kafka.common.header.Headers) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) ArrayList(java.util.ArrayList) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord)

Aggregations

Position (org.apache.kafka.streams.query.Position)33 RecordHeaders (org.apache.kafka.common.header.internals.RecordHeaders)17 Test (org.junit.Test)17 ProcessorRecordContext (org.apache.kafka.streams.processor.internals.ProcessorRecordContext)11 Windowed (org.apache.kafka.streams.kstream.Windowed)9 Objects (java.util.Objects)7 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)7 Bytes (org.apache.kafka.common.utils.Bytes)7 QueryResult (org.apache.kafka.streams.query.QueryResult)7 KeyValueIterator (org.apache.kafka.streams.state.KeyValueIterator)7 Headers (org.apache.kafka.common.header.Headers)6 PositionBound (org.apache.kafka.streams.query.PositionBound)6 RecordHeader (org.apache.kafka.common.header.internals.RecordHeader)5 KeyValue (org.apache.kafka.streams.KeyValue)5 ValueAndTimestamp (org.apache.kafka.streams.state.ValueAndTimestamp)5 ImmutableList (com.google.common.collect.ImmutableList)4 Streams (com.google.common.collect.Streams)4 GenericKey (io.confluent.ksql.GenericKey)4 GenericRow (io.confluent.ksql.GenericRow)4 MaterializationException (io.confluent.ksql.execution.streams.materialization.MaterializationException)4