use of org.apache.kafka.streams.query.Position in project kafka by apache.
the class InMemoryWindowStore method put.
@Override
public void put(final Bytes key, final byte[] value, final long windowStartTimestamp) {
removeExpiredSegments();
observedStreamTime = Math.max(observedStreamTime, windowStartTimestamp);
if (windowStartTimestamp <= observedStreamTime - retentionPeriod) {
expiredRecordSensor.record(1.0d, ProcessorContextUtils.currentSystemTime(context));
LOG.warn("Skipping record for expired segment.");
} else {
if (value != null) {
maybeUpdateSeqnumForDups();
final Bytes keyBytes = retainDuplicates ? wrapForDups(key, seqnum) : key;
segmentMap.computeIfAbsent(windowStartTimestamp, t -> new ConcurrentSkipListMap<>());
segmentMap.get(windowStartTimestamp).put(keyBytes, value);
} else if (!retainDuplicates) {
// Skip if value is null and duplicates are allowed since this delete is a no-op
segmentMap.computeIfPresent(windowStartTimestamp, (t, kvMap) -> {
kvMap.remove(key);
if (kvMap.isEmpty()) {
segmentMap.remove(windowStartTimestamp);
}
return kvMap;
});
}
}
StoreQueryUtils.updatePosition(position, stateStoreContext);
}
use of org.apache.kafka.streams.query.Position in project kafka by apache.
the class CachingKeyValueStore method getPosition.
@Override
public Position getPosition() {
// We return the merged position since the query uses the merged position as well
final Position mergedPosition = Position.emptyPosition();
mergedPosition.merge(position);
mergedPosition.merge(wrapped().getPosition());
return mergedPosition;
}
use of org.apache.kafka.streams.query.Position in project kafka by apache.
the class CachingKeyValueStore method query.
@SuppressWarnings("unchecked")
@Override
public <R> QueryResult<R> query(final Query<R> query, final PositionBound positionBound, final QueryConfig config) {
final long start = config.isCollectExecutionInfo() ? System.nanoTime() : -1L;
final QueryResult<R> result;
final CacheQueryHandler handler = queryHandlers.get(query.getClass());
if (handler == null) {
result = wrapped().query(query, positionBound, config);
} else {
final int partition = context.taskId().partition();
final Lock lock = this.lock.readLock();
lock.lock();
try {
validateStoreOpen();
final Position mergedPosition = getPosition();
// We use the merged position since the cache and the store may be at different positions
if (!StoreQueryUtils.isPermitted(mergedPosition, positionBound, partition)) {
result = QueryResult.notUpToBound(mergedPosition, positionBound, partition);
} else {
result = (QueryResult<R>) handler.apply(query, mergedPosition, positionBound, config, this);
}
} finally {
lock.unlock();
}
}
if (config.isCollectExecutionInfo()) {
result.addExecutionInfo("Handled in " + getClass() + " in " + (System.nanoTime() - start) + "ns");
}
return result;
}
use of org.apache.kafka.streams.query.Position in project kafka by apache.
the class CachingInMemoryKeyValueStoreTest method shouldMatchPositionAfterPut.
@Test
public void shouldMatchPositionAfterPut() {
context.setRecordContext(new ProcessorRecordContext(0, 1, 0, "", new RecordHeaders()));
store.put(bytesKey("key1"), bytesValue("value1"));
context.setRecordContext(new ProcessorRecordContext(0, 2, 0, "", new RecordHeaders()));
store.put(bytesKey("key2"), bytesValue("value2"));
context.setRecordContext(new ProcessorRecordContext(0, 3, 0, "", new RecordHeaders()));
store.put(bytesKey("key3"), bytesValue("value3"));
final Position expected = Position.fromMap(mkMap(mkEntry("", mkMap(mkEntry(0, 3L)))));
final Position actual = store.getPosition();
assertEquals(expected, actual);
}
use of org.apache.kafka.streams.query.Position in project kafka by apache.
the class AbstractRocksDBSegmentedBytesStoreTest method getChangelogRecords.
private List<ConsumerRecord<byte[], byte[]>> getChangelogRecords() {
final List<ConsumerRecord<byte[], byte[]>> records = new ArrayList<>();
final Headers headers = new RecordHeaders();
Position position1 = Position.emptyPosition();
position1 = position1.withComponent("", 0, 1);
headers.add(ChangelogRecordDeserializationHelper.CHANGELOG_VERSION_HEADER_RECORD_CONSISTENCY);
headers.add(new RecordHeader(ChangelogRecordDeserializationHelper.CHANGELOG_POSITION_HEADER_KEY, PositionSerde.serialize(position1).array()));
records.add(new ConsumerRecord<>("", 0, 0L, RecordBatch.NO_TIMESTAMP, TimestampType.NO_TIMESTAMP_TYPE, -1, -1, serializeKey(new Windowed<>("a", windows[0])).get(), serializeValue(50L), headers, Optional.empty()));
headers.remove(ChangelogRecordDeserializationHelper.CHANGELOG_POSITION_HEADER_KEY);
position1 = position1.withComponent("", 0, 2);
headers.add(new RecordHeader(ChangelogRecordDeserializationHelper.CHANGELOG_POSITION_HEADER_KEY, PositionSerde.serialize(position1).array()));
records.add(new ConsumerRecord<>("", 0, 0L, RecordBatch.NO_TIMESTAMP, TimestampType.NO_TIMESTAMP_TYPE, -1, -1, serializeKey(new Windowed<>("a", windows[2])).get(), serializeValue(100L), headers, Optional.empty()));
headers.remove(ChangelogRecordDeserializationHelper.CHANGELOG_POSITION_HEADER_KEY);
position1 = position1.withComponent("", 0, 3);
headers.add(new RecordHeader(ChangelogRecordDeserializationHelper.CHANGELOG_POSITION_HEADER_KEY, PositionSerde.serialize(position1).array()));
records.add(new ConsumerRecord<>("", 0, 0L, RecordBatch.NO_TIMESTAMP, TimestampType.NO_TIMESTAMP_TYPE, -1, -1, serializeKey(new Windowed<>("a", windows[3])).get(), serializeValue(200L), headers, Optional.empty()));
return records;
}
Aggregations