use of org.apache.kafka.streams.processor.StateStoreContext in project kafka by apache.
the class StoreQueryUtilsTest method shouldReturnErrorOnBoundViolation.
@Test
public void shouldReturnErrorOnBoundViolation() {
@SuppressWarnings("unchecked") final KeyQuery<String, Integer> query = Mockito.mock(KeyQuery.class);
@SuppressWarnings("unchecked") final KeyValueStore<String, Integer> store = Mockito.mock(KeyValueStore.class);
final StateStoreContext context = Mockito.mock(StateStoreContext.class);
Mockito.when(context.taskId()).thenReturn(new TaskId(0, 0));
final QueryResult<Integer> queryResult = StoreQueryUtils.handleBasicQueries(query, PositionBound.at(Position.emptyPosition().withComponent("topic", 0, 1)), new QueryConfig(false), store, Position.emptyPosition().withComponent("topic", 0, 0), context);
assertThat(queryResult.isFailure(), is(true));
assertThat(queryResult.getFailureReason(), is(FailureReason.NOT_UP_TO_BOUND));
assertThat(queryResult.getFailureMessage(), is("For store partition 0, the current position Position{position={topic={0=0}}}" + " is not yet up to the bound" + " PositionBound{position=Position{position={topic={0=1}}}}"));
}
use of org.apache.kafka.streams.processor.StateStoreContext in project kafka by apache.
the class EosIntegrationTest method verifyStateIsInStoreAndOffsetsAreInCheckpoint.
private void verifyStateIsInStoreAndOffsetsAreInCheckpoint(final int partition, final Set<KeyValue<Long, Long>> expectedState) throws IOException {
final String stateStoreDir = stateTmpDir + File.separator + "appDir" + File.separator + applicationId + File.separator + "0_" + partition + File.separator;
// Verify that the data in the state store on disk is fully up-to-date
final StateStoreContext context = new MockInternalProcessorContext(new Properties(), new TaskId(0, 0), new File(stateStoreDir));
final MockKeyValueStore stateStore = new MockKeyValueStore("store", false);
final RocksDBStore store = (RocksDBStore) new RocksDbKeyValueBytesStoreSupplier(storeName, false).get();
store.init(context, stateStore);
store.all().forEachRemaining(kv -> {
final KeyValue<Long, Long> kv2 = new KeyValue<>(new BigInteger(kv.key.get()).longValue(), new BigInteger(kv.value).longValue());
expectedState.remove(kv2);
});
// Verify that the checkpointed offsets match exactly with max offset of the records in the changelog
final OffsetCheckpoint checkpoint = new OffsetCheckpoint(new File(stateStoreDir + ".checkpoint"));
final Map<TopicPartition, Long> checkpointedOffsets = checkpoint.read();
checkpointedOffsets.forEach(this::verifyChangelogMaxRecordOffsetMatchesCheckpointedOffset);
}
use of org.apache.kafka.streams.processor.StateStoreContext in project kafka by apache.
the class TimeOrderedKeyValueBufferTest method shouldFlush.
@Test
public void shouldFlush() {
final TimeOrderedKeyValueBuffer<String, String> buffer = bufferSupplier.apply(testName);
final MockInternalProcessorContext context = makeContext();
buffer.init((StateStoreContext) context, buffer);
putRecord(buffer, context, 2L, 0L, "asdf", "2093j");
putRecord(buffer, context, 1L, 1L, "zxcv", "3gon4i");
putRecord(buffer, context, 0L, 2L, "deleteme", "deadbeef");
// replace "deleteme" with a tombstone
buffer.evictWhile(() -> buffer.minTimestamp() < 1, kv -> {
});
// flush everything to the changelog
buffer.flush();
// the buffer should serialize the buffer time and the value as byte[],
// which we can't compare for equality using ProducerRecord.
// As a workaround, I'm deserializing them and shoving them in a KeyValue, just for ease of testing.
final List<ProducerRecord<String, KeyValue<Long, BufferValue>>> collected = ((MockRecordCollector) context.recordCollector()).collected().stream().map(pr -> {
final KeyValue<Long, BufferValue> niceValue;
if (pr.value() == null) {
niceValue = null;
} else {
final byte[] serializedValue = (byte[]) pr.value();
final ByteBuffer valueBuffer = ByteBuffer.wrap(serializedValue);
final BufferValue contextualRecord = BufferValue.deserialize(valueBuffer);
final long timestamp = valueBuffer.getLong();
niceValue = new KeyValue<>(timestamp, contextualRecord);
}
return new ProducerRecord<>(pr.topic(), pr.partition(), pr.timestamp(), pr.key().toString(), niceValue, pr.headers());
}).collect(Collectors.toList());
assertThat(collected, is(asList(new ProducerRecord<>(APP_ID + "-" + testName + "-changelog", // Producer will assign
0, null, "deleteme", null, new RecordHeaders()), new ProducerRecord<>(APP_ID + "-" + testName + "-changelog", 0, null, "zxcv", new KeyValue<>(1L, getBufferValue("3gon4i", 1)), CHANGELOG_HEADERS), new ProducerRecord<>(APP_ID + "-" + testName + "-changelog", 0, null, "asdf", new KeyValue<>(2L, getBufferValue("2093j", 0)), CHANGELOG_HEADERS))));
cleanup(context, buffer);
}
use of org.apache.kafka.streams.processor.StateStoreContext in project kafka by apache.
the class IQv2IntegrationTest method shouldNotRequireQueryHandler.
@Test
public void shouldNotRequireQueryHandler() {
final KeyQuery<Integer, ValueAndTimestamp<Integer>> query = KeyQuery.withKey(1);
final int partition = 1;
final Set<Integer> partitions = singleton(partition);
final StateQueryRequest<ValueAndTimestamp<Integer>> request = inStore(STORE_NAME).withQuery(query).withPartitions(partitions);
final StreamsBuilder builder = new StreamsBuilder();
builder.table(INPUT_TOPIC_NAME, Consumed.with(Serdes.Integer(), Serdes.Integer()), Materialized.as(new KeyValueBytesStoreSupplier() {
@Override
public String name() {
return STORE_NAME;
}
@Override
public KeyValueStore<Bytes, byte[]> get() {
return new KeyValueStore<Bytes, byte[]>() {
private boolean open = false;
private Map<Bytes, byte[]> map = new HashMap<>();
private Position position;
private StateStoreContext context;
@Override
public void put(final Bytes key, final byte[] value) {
map.put(key, value);
StoreQueryUtils.updatePosition(position, context);
}
@Override
public byte[] putIfAbsent(final Bytes key, final byte[] value) {
StoreQueryUtils.updatePosition(position, context);
return map.putIfAbsent(key, value);
}
@Override
public void putAll(final List<KeyValue<Bytes, byte[]>> entries) {
StoreQueryUtils.updatePosition(position, context);
for (final KeyValue<Bytes, byte[]> entry : entries) {
map.put(entry.key, entry.value);
}
}
@Override
public byte[] delete(final Bytes key) {
StoreQueryUtils.updatePosition(position, context);
return map.remove(key);
}
@Override
public String name() {
return STORE_NAME;
}
@Deprecated
@Override
public void init(final ProcessorContext context, final StateStore root) {
throw new UnsupportedOperationException();
}
@Override
public void init(final StateStoreContext context, final StateStore root) {
context.register(root, (key, value) -> put(Bytes.wrap(key), value));
this.open = true;
this.position = Position.emptyPosition();
this.context = context;
}
@Override
public void flush() {
}
@Override
public void close() {
this.open = false;
map.clear();
}
@Override
public boolean persistent() {
return false;
}
@Override
public boolean isOpen() {
return open;
}
@Override
public Position getPosition() {
return position;
}
@Override
public byte[] get(final Bytes key) {
return map.get(key);
}
@Override
public KeyValueIterator<Bytes, byte[]> range(final Bytes from, final Bytes to) {
throw new UnsupportedOperationException();
}
@Override
public KeyValueIterator<Bytes, byte[]> all() {
throw new UnsupportedOperationException();
}
@Override
public long approximateNumEntries() {
return map.size();
}
};
}
@Override
public String metricsScope() {
return "nonquery";
}
}));
kafkaStreams = new KafkaStreams(builder.build(), streamsConfiguration());
kafkaStreams.cleanUp();
kafkaStreams.start();
final StateQueryResult<ValueAndTimestamp<Integer>> result = IntegrationTestUtils.iqv2WaitForResult(kafkaStreams, request);
final QueryResult<ValueAndTimestamp<Integer>> queryResult = result.getPartitionResults().get(partition);
assertThat(queryResult.isFailure(), is(true));
assertThat(queryResult.getFailureReason(), is(FailureReason.UNKNOWN_QUERY_TYPE));
assertThat(queryResult.getFailureMessage(), matchesPattern("This store (.*) doesn't know how to execute the given query (.*)." + " Contact the store maintainer if you need support for a new query type."));
}
use of org.apache.kafka.streams.processor.StateStoreContext in project kafka by apache.
the class InMemoryWindowStore method put.
@Override
public void put(final Bytes key, final byte[] value, final long windowStartTimestamp) {
removeExpiredSegments();
observedStreamTime = Math.max(observedStreamTime, windowStartTimestamp);
if (windowStartTimestamp <= observedStreamTime - retentionPeriod) {
expiredRecordSensor.record(1.0d, ProcessorContextUtils.currentSystemTime(context));
LOG.warn("Skipping record for expired segment.");
} else {
if (value != null) {
maybeUpdateSeqnumForDups();
final Bytes keyBytes = retainDuplicates ? wrapForDups(key, seqnum) : key;
segmentMap.computeIfAbsent(windowStartTimestamp, t -> new ConcurrentSkipListMap<>());
segmentMap.get(windowStartTimestamp).put(keyBytes, value);
} else if (!retainDuplicates) {
// Skip if value is null and duplicates are allowed since this delete is a no-op
segmentMap.computeIfPresent(windowStartTimestamp, (t, kvMap) -> {
kvMap.remove(key);
if (kvMap.isEmpty()) {
segmentMap.remove(windowStartTimestamp);
}
return kvMap;
});
}
}
StoreQueryUtils.updatePosition(position, stateStoreContext);
}
Aggregations