use of org.apache.kafka.streams.query.Position in project kafka by apache.
the class IQv2IntegrationTest method shouldNotRequireQueryHandler.
@Test
public void shouldNotRequireQueryHandler() {
final KeyQuery<Integer, ValueAndTimestamp<Integer>> query = KeyQuery.withKey(1);
final int partition = 1;
final Set<Integer> partitions = singleton(partition);
final StateQueryRequest<ValueAndTimestamp<Integer>> request = inStore(STORE_NAME).withQuery(query).withPartitions(partitions);
final StreamsBuilder builder = new StreamsBuilder();
builder.table(INPUT_TOPIC_NAME, Consumed.with(Serdes.Integer(), Serdes.Integer()), Materialized.as(new KeyValueBytesStoreSupplier() {
@Override
public String name() {
return STORE_NAME;
}
@Override
public KeyValueStore<Bytes, byte[]> get() {
return new KeyValueStore<Bytes, byte[]>() {
private boolean open = false;
private Map<Bytes, byte[]> map = new HashMap<>();
private Position position;
private StateStoreContext context;
@Override
public void put(final Bytes key, final byte[] value) {
map.put(key, value);
StoreQueryUtils.updatePosition(position, context);
}
@Override
public byte[] putIfAbsent(final Bytes key, final byte[] value) {
StoreQueryUtils.updatePosition(position, context);
return map.putIfAbsent(key, value);
}
@Override
public void putAll(final List<KeyValue<Bytes, byte[]>> entries) {
StoreQueryUtils.updatePosition(position, context);
for (final KeyValue<Bytes, byte[]> entry : entries) {
map.put(entry.key, entry.value);
}
}
@Override
public byte[] delete(final Bytes key) {
StoreQueryUtils.updatePosition(position, context);
return map.remove(key);
}
@Override
public String name() {
return STORE_NAME;
}
@Deprecated
@Override
public void init(final ProcessorContext context, final StateStore root) {
throw new UnsupportedOperationException();
}
@Override
public void init(final StateStoreContext context, final StateStore root) {
context.register(root, (key, value) -> put(Bytes.wrap(key), value));
this.open = true;
this.position = Position.emptyPosition();
this.context = context;
}
@Override
public void flush() {
}
@Override
public void close() {
this.open = false;
map.clear();
}
@Override
public boolean persistent() {
return false;
}
@Override
public boolean isOpen() {
return open;
}
@Override
public Position getPosition() {
return position;
}
@Override
public byte[] get(final Bytes key) {
return map.get(key);
}
@Override
public KeyValueIterator<Bytes, byte[]> range(final Bytes from, final Bytes to) {
throw new UnsupportedOperationException();
}
@Override
public KeyValueIterator<Bytes, byte[]> all() {
throw new UnsupportedOperationException();
}
@Override
public long approximateNumEntries() {
return map.size();
}
};
}
@Override
public String metricsScope() {
return "nonquery";
}
}));
kafkaStreams = new KafkaStreams(builder.build(), streamsConfiguration());
kafkaStreams.cleanUp();
kafkaStreams.start();
final StateQueryResult<ValueAndTimestamp<Integer>> result = IntegrationTestUtils.iqv2WaitForResult(kafkaStreams, request);
final QueryResult<ValueAndTimestamp<Integer>> queryResult = result.getPartitionResults().get(partition);
assertThat(queryResult.isFailure(), is(true));
assertThat(queryResult.getFailureReason(), is(FailureReason.UNKNOWN_QUERY_TYPE));
assertThat(queryResult.getFailureMessage(), matchesPattern("This store (.*) doesn't know how to execute the given query (.*)." + " Contact the store maintainer if you need support for a new query type."));
}
use of org.apache.kafka.streams.query.Position in project kafka by apache.
the class ProcessorContextImplTest method shouldSendRecordHeadersToChangelogTopicWhenConsistencyEnabled.
@Test
public void shouldSendRecordHeadersToChangelogTopicWhenConsistencyEnabled() {
final Position position = Position.emptyPosition();
final Headers headers = new RecordHeaders();
headers.add(ChangelogRecordDeserializationHelper.CHANGELOG_VERSION_HEADER_RECORD_CONSISTENCY);
headers.add(new RecordHeader(ChangelogRecordDeserializationHelper.CHANGELOG_POSITION_HEADER_KEY, PositionSerde.serialize(position).array()));
recordCollector.send(CHANGELOG_PARTITION.topic(), KEY_BYTES, VALUE_BYTES, headers, CHANGELOG_PARTITION.partition(), TIMESTAMP, BYTES_KEY_SERIALIZER, BYTEARRAY_VALUE_SERIALIZER);
final StreamTask task = EasyMock.createNiceMock(StreamTask.class);
replay(recordCollector, task);
context = new ProcessorContextImpl(mock(TaskId.class), streamsConfigWithConsistencyMock(), stateManager, mock(StreamsMetricsImpl.class), mock(ThreadCache.class));
context.transitionToActive(task, recordCollector, null);
context.logChange(REGISTERED_STORE_NAME, KEY_BYTES, VALUE_BYTES, TIMESTAMP, position);
verify(recordCollector);
}
use of org.apache.kafka.streams.query.Position in project kafka by apache.
the class ProcessorStateManagerTest method shouldWritePositionCheckpointFile.
@Test
public void shouldWritePositionCheckpointFile() throws IOException {
final ProcessorStateManager stateMgr = getStateManager(Task.TaskType.ACTIVE);
final Position persistentPosition = Position.emptyPosition().withComponent(persistentStoreTopicName, 1, 123L);
final File persistentFile = new File(stateDirectory.getOrCreateDirectoryForTask(taskId), "shouldWritePositionCheckpointFile.position");
final StateStorePositionCommit persistentCheckpoint = new StateStorePositionCommit(persistentFile, persistentPosition);
stateMgr.registerStore(persistentStore, persistentStore.stateRestoreCallback, persistentCheckpoint);
assertFalse(persistentCheckpoint.getFile().exists());
stateMgr.checkpoint();
assertTrue(persistentCheckpoint.getFile().exists());
// the checkpoint file should contain an offset from the persistent store only.
final Map<TopicPartition, Long> persistentOffsets = persistentCheckpoint.getOffsetCheckpoint().read();
assertThat(persistentOffsets, is(singletonMap(new TopicPartition(persistentStoreTopicName, 1), 123L)));
assertEquals(persistentCheckpoint.getCheckpointedPosition(), persistentCheckpoint.getStateStorePosition());
stateMgr.close();
assertTrue(persistentStore.closed);
}
use of org.apache.kafka.streams.query.Position in project kafka by apache.
the class ProcessorStateManagerTest method shouldLoadMissingFileAsEmptyPosition.
@Test
public void shouldLoadMissingFileAsEmptyPosition() {
final Position persistentPosition = Position.emptyPosition().withComponent(persistentStoreTopicName, 1, 123L);
final File persistentFile = new File(stateDirectory.getOrCreateDirectoryForTask(taskId), "shouldFailWritingPositionCheckpointFile.position");
final StateStorePositionCommit persistentCheckpoint = new StateStorePositionCommit(persistentFile, persistentPosition);
assertFalse(persistentCheckpoint.getFile().exists());
assertEquals(persistentCheckpoint.getCheckpointedPosition(), Position.emptyPosition());
}
use of org.apache.kafka.streams.query.Position in project kafka by apache.
the class StoreQueryUtils method isPermitted.
public static boolean isPermitted(final Position position, final PositionBound positionBound, final int partition) {
final Position bound = positionBound.position();
for (final String topic : bound.getTopics()) {
final Map<Integer, Long> partitionBounds = bound.getPartitionPositions(topic);
final Map<Integer, Long> seenPartitionPositions = position.getPartitionPositions(topic);
if (!partitionBounds.containsKey(partition)) {
// this topic isn't bounded for our partition, so just skip over it.
} else {
if (!seenPartitionPositions.containsKey(partition)) {
// we haven't seen a partition that we have a bound for
return false;
} else if (seenPartitionPositions.get(partition) < partitionBounds.get(partition)) {
// our current position is behind the bound
return false;
}
}
}
return true;
}
Aggregations