Search in sources :

Example 1 with StateStoreContext

use of org.apache.kafka.streams.processor.StateStoreContext in project kafka by apache.

the class StoreQueryUtilsTest method shouldReturnErrorOnBoundViolation.

@Test
public void shouldReturnErrorOnBoundViolation() {
    @SuppressWarnings("unchecked") final KeyQuery<String, Integer> query = Mockito.mock(KeyQuery.class);
    @SuppressWarnings("unchecked") final KeyValueStore<String, Integer> store = Mockito.mock(KeyValueStore.class);
    final StateStoreContext context = Mockito.mock(StateStoreContext.class);
    Mockito.when(context.taskId()).thenReturn(new TaskId(0, 0));
    final QueryResult<Integer> queryResult = StoreQueryUtils.handleBasicQueries(query, PositionBound.at(Position.emptyPosition().withComponent("topic", 0, 1)), new QueryConfig(false), store, Position.emptyPosition().withComponent("topic", 0, 0), context);
    assertThat(queryResult.isFailure(), is(true));
    assertThat(queryResult.getFailureReason(), is(FailureReason.NOT_UP_TO_BOUND));
    assertThat(queryResult.getFailureMessage(), is("For store partition 0, the current position Position{position={topic={0=0}}}" + " is not yet up to the bound" + " PositionBound{position=Position{position={topic={0=1}}}}"));
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) QueryConfig(org.apache.kafka.streams.query.QueryConfig) StateStoreContext(org.apache.kafka.streams.processor.StateStoreContext) Test(org.junit.Test)

Example 2 with StateStoreContext

use of org.apache.kafka.streams.processor.StateStoreContext in project kafka by apache.

the class EosIntegrationTest method verifyStateIsInStoreAndOffsetsAreInCheckpoint.

private void verifyStateIsInStoreAndOffsetsAreInCheckpoint(final int partition, final Set<KeyValue<Long, Long>> expectedState) throws IOException {
    final String stateStoreDir = stateTmpDir + File.separator + "appDir" + File.separator + applicationId + File.separator + "0_" + partition + File.separator;
    // Verify that the data in the state store on disk is fully up-to-date
    final StateStoreContext context = new MockInternalProcessorContext(new Properties(), new TaskId(0, 0), new File(stateStoreDir));
    final MockKeyValueStore stateStore = new MockKeyValueStore("store", false);
    final RocksDBStore store = (RocksDBStore) new RocksDbKeyValueBytesStoreSupplier(storeName, false).get();
    store.init(context, stateStore);
    store.all().forEachRemaining(kv -> {
        final KeyValue<Long, Long> kv2 = new KeyValue<>(new BigInteger(kv.key.get()).longValue(), new BigInteger(kv.value).longValue());
        expectedState.remove(kv2);
    });
    // Verify that the checkpointed offsets match exactly with max offset of the records in the changelog
    final OffsetCheckpoint checkpoint = new OffsetCheckpoint(new File(stateStoreDir + ".checkpoint"));
    final Map<TopicPartition, Long> checkpointedOffsets = checkpoint.read();
    checkpointedOffsets.forEach(this::verifyChangelogMaxRecordOffsetMatchesCheckpointedOffset);
}
Also used : OffsetCheckpoint(org.apache.kafka.streams.state.internals.OffsetCheckpoint) TaskId(org.apache.kafka.streams.processor.TaskId) KeyValue(org.apache.kafka.streams.KeyValue) RocksDbKeyValueBytesStoreSupplier(org.apache.kafka.streams.state.internals.RocksDbKeyValueBytesStoreSupplier) Properties(java.util.Properties) MockKeyValueStore(org.apache.kafka.test.MockKeyValueStore) StateStoreContext(org.apache.kafka.streams.processor.StateStoreContext) TopicPartition(org.apache.kafka.common.TopicPartition) BigInteger(java.math.BigInteger) MockInternalProcessorContext(org.apache.kafka.test.MockInternalProcessorContext) File(java.io.File) RocksDBStore(org.apache.kafka.streams.state.internals.RocksDBStore)

Example 3 with StateStoreContext

use of org.apache.kafka.streams.processor.StateStoreContext in project kafka by apache.

the class TimeOrderedKeyValueBufferTest method shouldFlush.

@Test
public void shouldFlush() {
    final TimeOrderedKeyValueBuffer<String, String> buffer = bufferSupplier.apply(testName);
    final MockInternalProcessorContext context = makeContext();
    buffer.init((StateStoreContext) context, buffer);
    putRecord(buffer, context, 2L, 0L, "asdf", "2093j");
    putRecord(buffer, context, 1L, 1L, "zxcv", "3gon4i");
    putRecord(buffer, context, 0L, 2L, "deleteme", "deadbeef");
    // replace "deleteme" with a tombstone
    buffer.evictWhile(() -> buffer.minTimestamp() < 1, kv -> {
    });
    // flush everything to the changelog
    buffer.flush();
    // the buffer should serialize the buffer time and the value as byte[],
    // which we can't compare for equality using ProducerRecord.
    // As a workaround, I'm deserializing them and shoving them in a KeyValue, just for ease of testing.
    final List<ProducerRecord<String, KeyValue<Long, BufferValue>>> collected = ((MockRecordCollector) context.recordCollector()).collected().stream().map(pr -> {
        final KeyValue<Long, BufferValue> niceValue;
        if (pr.value() == null) {
            niceValue = null;
        } else {
            final byte[] serializedValue = (byte[]) pr.value();
            final ByteBuffer valueBuffer = ByteBuffer.wrap(serializedValue);
            final BufferValue contextualRecord = BufferValue.deserialize(valueBuffer);
            final long timestamp = valueBuffer.getLong();
            niceValue = new KeyValue<>(timestamp, contextualRecord);
        }
        return new ProducerRecord<>(pr.topic(), pr.partition(), pr.timestamp(), pr.key().toString(), niceValue, pr.headers());
    }).collect(Collectors.toList());
    assertThat(collected, is(asList(new ProducerRecord<>(APP_ID + "-" + testName + "-changelog", // Producer will assign
    0, null, "deleteme", null, new RecordHeaders()), new ProducerRecord<>(APP_ID + "-" + testName + "-changelog", 0, null, "zxcv", new KeyValue<>(1L, getBufferValue("3gon4i", 1)), CHANGELOG_HEADERS), new ProducerRecord<>(APP_ID + "-" + testName + "-changelog", 0, null, "asdf", new KeyValue<>(2L, getBufferValue("2093j", 0)), CHANGELOG_HEADERS))));
    cleanup(context, buffer);
}
Also used : StreamsConfig(org.apache.kafka.streams.StreamsConfig) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) TaskId(org.apache.kafka.streams.processor.TaskId) ProcessorRecordContext(org.apache.kafka.streams.processor.internals.ProcessorRecordContext) RunWith(org.junit.runner.RunWith) Random(java.util.Random) Eviction(org.apache.kafka.streams.state.internals.TimeOrderedKeyValueBuffer.Eviction) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) ValueAndTimestamp(org.apache.kafka.streams.state.ValueAndTimestamp) Function(java.util.function.Function) RecordBatchingStateRestoreCallback(org.apache.kafka.streams.processor.internals.RecordBatchingStateRestoreCallback) ByteBuffer(java.nio.ByteBuffer) Collections.singletonList(java.util.Collections.singletonList) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) StateStoreContext(org.apache.kafka.streams.processor.StateStoreContext) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Arrays.asList(java.util.Arrays.asList) Serdes(org.apache.kafka.common.serialization.Serdes) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) Record(org.apache.kafka.streams.processor.api.Record) Assert.fail(org.junit.Assert.fail) MockRecordCollector(org.apache.kafka.test.MockRecordCollector) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) LinkedList(java.util.LinkedList) TimestampType(org.apache.kafka.common.record.TimestampType) Parameterized(org.junit.runners.Parameterized) Utils(org.apache.kafka.common.utils.Utils) Properties(java.util.Properties) TestUtils(org.apache.kafka.test.TestUtils) UTF_8(java.nio.charset.StandardCharsets.UTF_8) Collection(java.util.Collection) KeyValue(org.apache.kafka.streams.KeyValue) CHANGELOG_HEADERS(org.apache.kafka.streams.state.internals.InMemoryTimeOrderedKeyValueBuffer.CHANGELOG_HEADERS) Test(org.junit.Test) IOException(java.io.IOException) Collectors(java.util.stream.Collectors) MockInternalProcessorContext(org.apache.kafka.test.MockInternalProcessorContext) List(java.util.List) Header(org.apache.kafka.common.header.Header) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Optional(java.util.Optional) Matchers.is(org.hamcrest.Matchers.is) Change(org.apache.kafka.streams.kstream.internals.Change) KeyValue(org.apache.kafka.streams.KeyValue) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) MockInternalProcessorContext(org.apache.kafka.test.MockInternalProcessorContext) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Example 4 with StateStoreContext

use of org.apache.kafka.streams.processor.StateStoreContext in project kafka by apache.

the class IQv2IntegrationTest method shouldNotRequireQueryHandler.

@Test
public void shouldNotRequireQueryHandler() {
    final KeyQuery<Integer, ValueAndTimestamp<Integer>> query = KeyQuery.withKey(1);
    final int partition = 1;
    final Set<Integer> partitions = singleton(partition);
    final StateQueryRequest<ValueAndTimestamp<Integer>> request = inStore(STORE_NAME).withQuery(query).withPartitions(partitions);
    final StreamsBuilder builder = new StreamsBuilder();
    builder.table(INPUT_TOPIC_NAME, Consumed.with(Serdes.Integer(), Serdes.Integer()), Materialized.as(new KeyValueBytesStoreSupplier() {

        @Override
        public String name() {
            return STORE_NAME;
        }

        @Override
        public KeyValueStore<Bytes, byte[]> get() {
            return new KeyValueStore<Bytes, byte[]>() {

                private boolean open = false;

                private Map<Bytes, byte[]> map = new HashMap<>();

                private Position position;

                private StateStoreContext context;

                @Override
                public void put(final Bytes key, final byte[] value) {
                    map.put(key, value);
                    StoreQueryUtils.updatePosition(position, context);
                }

                @Override
                public byte[] putIfAbsent(final Bytes key, final byte[] value) {
                    StoreQueryUtils.updatePosition(position, context);
                    return map.putIfAbsent(key, value);
                }

                @Override
                public void putAll(final List<KeyValue<Bytes, byte[]>> entries) {
                    StoreQueryUtils.updatePosition(position, context);
                    for (final KeyValue<Bytes, byte[]> entry : entries) {
                        map.put(entry.key, entry.value);
                    }
                }

                @Override
                public byte[] delete(final Bytes key) {
                    StoreQueryUtils.updatePosition(position, context);
                    return map.remove(key);
                }

                @Override
                public String name() {
                    return STORE_NAME;
                }

                @Deprecated
                @Override
                public void init(final ProcessorContext context, final StateStore root) {
                    throw new UnsupportedOperationException();
                }

                @Override
                public void init(final StateStoreContext context, final StateStore root) {
                    context.register(root, (key, value) -> put(Bytes.wrap(key), value));
                    this.open = true;
                    this.position = Position.emptyPosition();
                    this.context = context;
                }

                @Override
                public void flush() {
                }

                @Override
                public void close() {
                    this.open = false;
                    map.clear();
                }

                @Override
                public boolean persistent() {
                    return false;
                }

                @Override
                public boolean isOpen() {
                    return open;
                }

                @Override
                public Position getPosition() {
                    return position;
                }

                @Override
                public byte[] get(final Bytes key) {
                    return map.get(key);
                }

                @Override
                public KeyValueIterator<Bytes, byte[]> range(final Bytes from, final Bytes to) {
                    throw new UnsupportedOperationException();
                }

                @Override
                public KeyValueIterator<Bytes, byte[]> all() {
                    throw new UnsupportedOperationException();
                }

                @Override
                public long approximateNumEntries() {
                    return map.size();
                }
            };
        }

        @Override
        public String metricsScope() {
            return "nonquery";
        }
    }));
    kafkaStreams = new KafkaStreams(builder.build(), streamsConfiguration());
    kafkaStreams.cleanUp();
    kafkaStreams.start();
    final StateQueryResult<ValueAndTimestamp<Integer>> result = IntegrationTestUtils.iqv2WaitForResult(kafkaStreams, request);
    final QueryResult<ValueAndTimestamp<Integer>> queryResult = result.getPartitionResults().get(partition);
    assertThat(queryResult.isFailure(), is(true));
    assertThat(queryResult.getFailureReason(), is(FailureReason.UNKNOWN_QUERY_TYPE));
    assertThat(queryResult.getFailureMessage(), matchesPattern("This store (.*) doesn't know how to execute the given query (.*)." + " Contact the store maintainer if you need support for a new query type."));
}
Also used : KafkaStreams(org.apache.kafka.streams.KafkaStreams) Position(org.apache.kafka.streams.query.Position) StateStore(org.apache.kafka.streams.processor.StateStore) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) StateStoreContext(org.apache.kafka.streams.processor.StateStoreContext) ProcessorContext(org.apache.kafka.streams.processor.ProcessorContext) ValueAndTimestamp(org.apache.kafka.streams.state.ValueAndTimestamp) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) Bytes(org.apache.kafka.common.utils.Bytes) KeyValueBytesStoreSupplier(org.apache.kafka.streams.state.KeyValueBytesStoreSupplier) List(java.util.List) LinkedList(java.util.LinkedList) Map(java.util.Map) HashMap(java.util.HashMap) IntegrationTest(org.apache.kafka.test.IntegrationTest) Test(org.junit.Test)

Example 5 with StateStoreContext

use of org.apache.kafka.streams.processor.StateStoreContext in project kafka by apache.

the class InMemoryWindowStore method put.

@Override
public void put(final Bytes key, final byte[] value, final long windowStartTimestamp) {
    removeExpiredSegments();
    observedStreamTime = Math.max(observedStreamTime, windowStartTimestamp);
    if (windowStartTimestamp <= observedStreamTime - retentionPeriod) {
        expiredRecordSensor.record(1.0d, ProcessorContextUtils.currentSystemTime(context));
        LOG.warn("Skipping record for expired segment.");
    } else {
        if (value != null) {
            maybeUpdateSeqnumForDups();
            final Bytes keyBytes = retainDuplicates ? wrapForDups(key, seqnum) : key;
            segmentMap.computeIfAbsent(windowStartTimestamp, t -> new ConcurrentSkipListMap<>());
            segmentMap.get(windowStartTimestamp).put(keyBytes, value);
        } else if (!retainDuplicates) {
            // Skip if value is null and duplicates are allowed since this delete is a no-op
            segmentMap.computeIfPresent(windowStartTimestamp, (t, kvMap) -> {
                kvMap.remove(key);
                if (kvMap.isEmpty()) {
                    segmentMap.remove(windowStartTimestamp);
                }
                return kvMap;
            });
        }
    }
    StoreQueryUtils.updatePosition(position, stateStoreContext);
}
Also used : StreamsConfig(org.apache.kafka.streams.StreamsConfig) TaskMetrics(org.apache.kafka.streams.processor.internals.metrics.TaskMetrics) LoggerFactory(org.slf4j.LoggerFactory) PositionBound(org.apache.kafka.streams.query.PositionBound) RecordBatchingStateRestoreCallback(org.apache.kafka.streams.processor.internals.RecordBatchingStateRestoreCallback) WindowStore(org.apache.kafka.streams.state.WindowStore) ByteBuffer(java.nio.ByteBuffer) StoreToProcessorContextAdapter(org.apache.kafka.streams.processor.internals.StoreToProcessorContextAdapter) StateStoreContext(org.apache.kafka.streams.processor.StateStoreContext) Windowed(org.apache.kafka.streams.kstream.Windowed) Map(java.util.Map) ProcessorContextUtils(org.apache.kafka.streams.processor.internals.ProcessorContextUtils) StreamsMetricsImpl(org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl) NoSuchElementException(java.util.NoSuchElementException) QueryResult(org.apache.kafka.streams.query.QueryResult) Sensor(org.apache.kafka.common.metrics.Sensor) QueryConfig(org.apache.kafka.streams.query.QueryConfig) Logger(org.slf4j.Logger) Iterator(java.util.Iterator) Position(org.apache.kafka.streams.query.Position) Query(org.apache.kafka.streams.query.Query) WindowKeySchema.extractStoreTimestamp(org.apache.kafka.streams.state.internals.WindowKeySchema.extractStoreTimestamp) ConcurrentNavigableMap(java.util.concurrent.ConcurrentNavigableMap) KeyValue(org.apache.kafka.streams.KeyValue) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Set(java.util.Set) ChangelogRecordDeserializationHelper(org.apache.kafka.streams.processor.internals.ChangelogRecordDeserializationHelper) Bytes(org.apache.kafka.common.utils.Bytes) Objects(java.util.Objects) ProcessorContext(org.apache.kafka.streams.processor.ProcessorContext) KeyValueIterator(org.apache.kafka.streams.state.KeyValueIterator) ConcurrentSkipListMap(java.util.concurrent.ConcurrentSkipListMap) WindowKeySchema.extractStoreKeyBytes(org.apache.kafka.streams.state.internals.WindowKeySchema.extractStoreKeyBytes) StateStore(org.apache.kafka.streams.processor.StateStore) WindowStoreIterator(org.apache.kafka.streams.state.WindowStoreIterator) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) TimeWindow(org.apache.kafka.streams.kstream.internals.TimeWindow) IQ_CONSISTENCY_OFFSET_VECTOR_ENABLED(org.apache.kafka.streams.StreamsConfig.InternalConfig.IQ_CONSISTENCY_OFFSET_VECTOR_ENABLED) Bytes(org.apache.kafka.common.utils.Bytes) WindowKeySchema.extractStoreKeyBytes(org.apache.kafka.streams.state.internals.WindowKeySchema.extractStoreKeyBytes)

Aggregations

StateStoreContext (org.apache.kafka.streams.processor.StateStoreContext)8 StateStore (org.apache.kafka.streams.processor.StateStore)4 KeyValue (org.apache.kafka.streams.KeyValue)3 TaskId (org.apache.kafka.streams.processor.TaskId)3 ByteBuffer (java.nio.ByteBuffer)2 LinkedList (java.util.LinkedList)2 List (java.util.List)2 Map (java.util.Map)2 Properties (java.util.Properties)2 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)2 Serdes (org.apache.kafka.common.serialization.Serdes)2 Bytes (org.apache.kafka.common.utils.Bytes)2 StreamsConfig (org.apache.kafka.streams.StreamsConfig)2 ProcessorContext (org.apache.kafka.streams.processor.ProcessorContext)2 RecordBatchingStateRestoreCallback (org.apache.kafka.streams.processor.internals.RecordBatchingStateRestoreCallback)2 Position (org.apache.kafka.streams.query.Position)2 QueryConfig (org.apache.kafka.streams.query.QueryConfig)2 KeyValueStore (org.apache.kafka.streams.state.KeyValueStore)2 MockInternalProcessorContext (org.apache.kafka.test.MockInternalProcessorContext)2 Test (org.junit.Test)2