Search in sources :

Example 96 with KeyValueStore

use of org.apache.kafka.streams.state.KeyValueStore in project kafka by apache.

the class KTableKTableForeignKeyInnerJoinCustomPartitionerIntegrationTest method prepareTopology.

private static KafkaStreams prepareTopology(final String queryableName, final Properties streamsConfig) {
    final UniqueTopicSerdeScope serdeScope = new UniqueTopicSerdeScope();
    final StreamsBuilder builder = new StreamsBuilder();
    final KTable<String, String> table1 = builder.stream(TABLE_1, Consumed.with(serdeScope.decorateSerde(Serdes.String(), streamsConfig, true), serdeScope.decorateSerde(Serdes.String(), streamsConfig, false))).repartition(repartitionA()).toTable(Named.as("table.a"));
    final KTable<String, String> table2 = builder.stream(TABLE_2, Consumed.with(serdeScope.decorateSerde(Serdes.String(), streamsConfig, true), serdeScope.decorateSerde(Serdes.String(), streamsConfig, false))).repartition(repartitionB()).toTable(Named.as("table.b"));
    final Materialized<String, String, KeyValueStore<Bytes, byte[]>> materialized;
    if (queryableName != null) {
        materialized = Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as(queryableName).withKeySerde(serdeScope.decorateSerde(Serdes.String(), streamsConfig, true)).withValueSerde(serdeScope.decorateSerde(Serdes.String(), streamsConfig, false)).withCachingDisabled();
    } else {
        throw new RuntimeException("Current implementation of joinOnForeignKey requires a materialized store");
    }
    final ValueJoiner<String, String, String> joiner = (value1, value2) -> "value1=" + value1 + ",value2=" + value2;
    final TableJoined<String, String> tableJoined = TableJoined.with((topic, key, value, numPartitions) -> Math.abs(getKeyB(key).hashCode()) % numPartitions, (topic, key, value, numPartitions) -> Math.abs(key.hashCode()) % numPartitions);
    table1.join(table2, KTableKTableForeignKeyInnerJoinCustomPartitionerIntegrationTest::getKeyB, joiner, tableJoined, materialized).toStream().to(OUTPUT, Produced.with(serdeScope.decorateSerde(Serdes.String(), streamsConfig, true), serdeScope.decorateSerde(Serdes.String(), streamsConfig, false)));
    return new KafkaStreams(builder.build(streamsConfig), streamsConfig);
}
Also used : UniqueTopicSerdeScope(org.apache.kafka.streams.utils.UniqueTopicSerdeScope) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) StreamsConfig(org.apache.kafka.streams.StreamsConfig) TableJoined(org.apache.kafka.streams.kstream.TableJoined) IntegrationTestUtils.startApplicationAndWaitUntilRunning(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.startApplicationAndWaitUntilRunning) BeforeClass(org.junit.BeforeClass) Produced(org.apache.kafka.streams.kstream.Produced) Repartitioned(org.apache.kafka.streams.kstream.Repartitioned) IntegrationTest(org.apache.kafka.test.IntegrationTest) Duration.ofSeconds(java.time.Duration.ofSeconds) HashSet(java.util.HashSet) MockTime(kafka.utils.MockTime) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) EmbeddedKafkaCluster(org.apache.kafka.streams.integration.utils.EmbeddedKafkaCluster) Arrays.asList(java.util.Arrays.asList) Named(org.apache.kafka.streams.kstream.Named) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) After(org.junit.After) Serdes(org.apache.kafka.common.serialization.Serdes) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) ProducerConfig(org.apache.kafka.clients.producer.ProducerConfig) Before(org.junit.Before) IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) KTable(org.apache.kafka.streams.kstream.KTable) AfterClass(org.junit.AfterClass) Properties(java.util.Properties) TestUtils(org.apache.kafka.test.TestUtils) Consumed(org.apache.kafka.streams.kstream.Consumed) KeyValue(org.apache.kafka.streams.KeyValue) Set(java.util.Set) IOException(java.io.IOException) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) Test(org.junit.Test) Category(org.junit.experimental.categories.Category) Bytes(org.apache.kafka.common.utils.Bytes) IntegrationTestUtils(org.apache.kafka.streams.integration.utils.IntegrationTestUtils) List(java.util.List) ValueJoiner(org.apache.kafka.streams.kstream.ValueJoiner) Materialized(org.apache.kafka.streams.kstream.Materialized) KafkaStreams(org.apache.kafka.streams.KafkaStreams) Assert.assertEquals(org.junit.Assert.assertEquals) UniqueTopicSerdeScope(org.apache.kafka.streams.utils.UniqueTopicSerdeScope) KafkaStreams(org.apache.kafka.streams.KafkaStreams) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore)

Example 97 with KeyValueStore

use of org.apache.kafka.streams.state.KeyValueStore in project kafka by apache.

the class IQv2IntegrationTest method shouldNotRequireQueryHandler.

@Test
public void shouldNotRequireQueryHandler() {
    final KeyQuery<Integer, ValueAndTimestamp<Integer>> query = KeyQuery.withKey(1);
    final int partition = 1;
    final Set<Integer> partitions = singleton(partition);
    final StateQueryRequest<ValueAndTimestamp<Integer>> request = inStore(STORE_NAME).withQuery(query).withPartitions(partitions);
    final StreamsBuilder builder = new StreamsBuilder();
    builder.table(INPUT_TOPIC_NAME, Consumed.with(Serdes.Integer(), Serdes.Integer()), Materialized.as(new KeyValueBytesStoreSupplier() {

        @Override
        public String name() {
            return STORE_NAME;
        }

        @Override
        public KeyValueStore<Bytes, byte[]> get() {
            return new KeyValueStore<Bytes, byte[]>() {

                private boolean open = false;

                private Map<Bytes, byte[]> map = new HashMap<>();

                private Position position;

                private StateStoreContext context;

                @Override
                public void put(final Bytes key, final byte[] value) {
                    map.put(key, value);
                    StoreQueryUtils.updatePosition(position, context);
                }

                @Override
                public byte[] putIfAbsent(final Bytes key, final byte[] value) {
                    StoreQueryUtils.updatePosition(position, context);
                    return map.putIfAbsent(key, value);
                }

                @Override
                public void putAll(final List<KeyValue<Bytes, byte[]>> entries) {
                    StoreQueryUtils.updatePosition(position, context);
                    for (final KeyValue<Bytes, byte[]> entry : entries) {
                        map.put(entry.key, entry.value);
                    }
                }

                @Override
                public byte[] delete(final Bytes key) {
                    StoreQueryUtils.updatePosition(position, context);
                    return map.remove(key);
                }

                @Override
                public String name() {
                    return STORE_NAME;
                }

                @Deprecated
                @Override
                public void init(final ProcessorContext context, final StateStore root) {
                    throw new UnsupportedOperationException();
                }

                @Override
                public void init(final StateStoreContext context, final StateStore root) {
                    context.register(root, (key, value) -> put(Bytes.wrap(key), value));
                    this.open = true;
                    this.position = Position.emptyPosition();
                    this.context = context;
                }

                @Override
                public void flush() {
                }

                @Override
                public void close() {
                    this.open = false;
                    map.clear();
                }

                @Override
                public boolean persistent() {
                    return false;
                }

                @Override
                public boolean isOpen() {
                    return open;
                }

                @Override
                public Position getPosition() {
                    return position;
                }

                @Override
                public byte[] get(final Bytes key) {
                    return map.get(key);
                }

                @Override
                public KeyValueIterator<Bytes, byte[]> range(final Bytes from, final Bytes to) {
                    throw new UnsupportedOperationException();
                }

                @Override
                public KeyValueIterator<Bytes, byte[]> all() {
                    throw new UnsupportedOperationException();
                }

                @Override
                public long approximateNumEntries() {
                    return map.size();
                }
            };
        }

        @Override
        public String metricsScope() {
            return "nonquery";
        }
    }));
    kafkaStreams = new KafkaStreams(builder.build(), streamsConfiguration());
    kafkaStreams.cleanUp();
    kafkaStreams.start();
    final StateQueryResult<ValueAndTimestamp<Integer>> result = IntegrationTestUtils.iqv2WaitForResult(kafkaStreams, request);
    final QueryResult<ValueAndTimestamp<Integer>> queryResult = result.getPartitionResults().get(partition);
    assertThat(queryResult.isFailure(), is(true));
    assertThat(queryResult.getFailureReason(), is(FailureReason.UNKNOWN_QUERY_TYPE));
    assertThat(queryResult.getFailureMessage(), matchesPattern("This store (.*) doesn't know how to execute the given query (.*)." + " Contact the store maintainer if you need support for a new query type."));
}
Also used : KafkaStreams(org.apache.kafka.streams.KafkaStreams) Position(org.apache.kafka.streams.query.Position) StateStore(org.apache.kafka.streams.processor.StateStore) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) StateStoreContext(org.apache.kafka.streams.processor.StateStoreContext) ProcessorContext(org.apache.kafka.streams.processor.ProcessorContext) ValueAndTimestamp(org.apache.kafka.streams.state.ValueAndTimestamp) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) Bytes(org.apache.kafka.common.utils.Bytes) KeyValueBytesStoreSupplier(org.apache.kafka.streams.state.KeyValueBytesStoreSupplier) List(java.util.List) LinkedList(java.util.LinkedList) Map(java.util.Map) HashMap(java.util.HashMap) IntegrationTest(org.apache.kafka.test.IntegrationTest) Test(org.junit.Test)

Example 98 with KeyValueStore

use of org.apache.kafka.streams.state.KeyValueStore in project kafka by apache.

the class OptimizedKTableIntegrationTest method shouldApplyUpdatesToStandbyStore.

@Test
public void shouldApplyUpdatesToStandbyStore() throws Exception {
    final int batch1NumMessages = 100;
    final int batch2NumMessages = 100;
    final int key = 1;
    final Semaphore semaphore = new Semaphore(0);
    final StreamsBuilder builder = new StreamsBuilder();
    builder.table(INPUT_TOPIC_NAME, Consumed.with(Serdes.Integer(), Serdes.Integer()), Materialized.<Integer, Integer, KeyValueStore<Bytes, byte[]>>as(TABLE_NAME).withCachingDisabled()).toStream().peek((k, v) -> semaphore.release());
    final KafkaStreams kafkaStreams1 = createKafkaStreams(builder, streamsConfiguration());
    final KafkaStreams kafkaStreams2 = createKafkaStreams(builder, streamsConfiguration());
    final List<KafkaStreams> kafkaStreamsList = Arrays.asList(kafkaStreams1, kafkaStreams2);
    startApplicationAndWaitUntilRunning(kafkaStreamsList, Duration.ofSeconds(60));
    produceValueRange(key, 0, batch1NumMessages);
    // Assert that all messages in the first batch were processed in a timely manner
    assertThat(semaphore.tryAcquire(batch1NumMessages, 60, TimeUnit.SECONDS), is(equalTo(true)));
    final ReadOnlyKeyValueStore<Integer, Integer> store1 = IntegrationTestUtils.getStore(TABLE_NAME, kafkaStreams1, QueryableStoreTypes.keyValueStore());
    final ReadOnlyKeyValueStore<Integer, Integer> store2 = IntegrationTestUtils.getStore(TABLE_NAME, kafkaStreams2, QueryableStoreTypes.keyValueStore());
    final boolean kafkaStreams1WasFirstActive;
    final KeyQueryMetadata keyQueryMetadata = kafkaStreams1.queryMetadataForKey(TABLE_NAME, key, (topic, somekey, value, numPartitions) -> 0);
    // Assert that the current value in store reflects all messages being processed
    if ((keyQueryMetadata.activeHost().port() % 2) == 1) {
        assertThat(store1.get(key), is(equalTo(batch1NumMessages - 1)));
        kafkaStreams1WasFirstActive = true;
    } else {
        assertThat(store2.get(key), is(equalTo(batch1NumMessages - 1)));
        kafkaStreams1WasFirstActive = false;
    }
    if (kafkaStreams1WasFirstActive) {
        kafkaStreams1.close();
    } else {
        kafkaStreams2.close();
    }
    final ReadOnlyKeyValueStore<Integer, Integer> newActiveStore = kafkaStreams1WasFirstActive ? store2 : store1;
    TestUtils.retryOnExceptionWithTimeout(60 * 1000, 100, () -> {
        // Assert that after failover we have recovered to the last store write
        assertThat(newActiveStore.get(key), is(equalTo(batch1NumMessages - 1)));
    });
    final int totalNumMessages = batch1NumMessages + batch2NumMessages;
    produceValueRange(key, batch1NumMessages, totalNumMessages);
    // Assert that all messages in the second batch were processed in a timely manner
    assertThat(semaphore.tryAcquire(batch2NumMessages, 60, TimeUnit.SECONDS), is(equalTo(true)));
    TestUtils.retryOnExceptionWithTimeout(60 * 1000, 100, () -> {
        // Assert that the current value in store reflects all messages being processed
        assertThat(newActiveStore.get(key), is(equalTo(totalNumMessages - 1)));
    });
}
Also used : StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) KafkaStreams(org.apache.kafka.streams.KafkaStreams) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) ReadOnlyKeyValueStore(org.apache.kafka.streams.state.ReadOnlyKeyValueStore) Semaphore(java.util.concurrent.Semaphore) KeyQueryMetadata(org.apache.kafka.streams.KeyQueryMetadata) IntegrationTest(org.apache.kafka.test.IntegrationTest) Test(org.junit.Test)

Example 99 with KeyValueStore

use of org.apache.kafka.streams.state.KeyValueStore in project kafka by apache.

the class RangeQueryIntegrationTest method testStoreConfig.

@Test
public void testStoreConfig() throws Exception {
    final StreamsBuilder builder = new StreamsBuilder();
    final Materialized<String, String, KeyValueStore<Bytes, byte[]>> stateStoreConfig = getStoreConfig(storeType, enableLogging, enableCaching);
    builder.table(inputStream, stateStoreConfig);
    try (final KafkaStreams kafkaStreams = new KafkaStreams(builder.build(), STREAMS_CONFIG)) {
        IntegrationTestUtils.startApplicationAndWaitUntilRunning(Collections.singletonList(kafkaStreams), Duration.ofSeconds(60));
        writeInputData();
        final ReadOnlyKeyValueStore<String, String> stateStore = IntegrationTestUtils.getStore(1000_000L, TABLE_NAME, kafkaStreams, QueryableStoreTypes.keyValueStore());
        // wait for the store to populate
        TestUtils.waitForCondition(() -> stateStore.get(high) != null, "The store never finished populating");
        // query the state store
        try (final KeyValueIterator<String, String> scanIterator = forward ? stateStore.range(null, null) : stateStore.reverseRange(null, null)) {
            final Iterator<KeyValue<String, String>> dataIterator = forward ? records.iterator() : records.descendingIterator();
            TestUtils.checkEquals(scanIterator, dataIterator);
        }
        try (final KeyValueIterator<String, String> allIterator = forward ? stateStore.all() : stateStore.reverseAll()) {
            final Iterator<KeyValue<String, String>> dataIterator = forward ? records.iterator() : records.descendingIterator();
            TestUtils.checkEquals(allIterator, dataIterator);
        }
        testRange("range", stateStore, innerLow, innerHigh, forward);
        testRange("until", stateStore, null, middle, forward);
        testRange("from", stateStore, middle, null, forward);
        testRange("untilBetween", stateStore, null, innerHighBetween, forward);
        testRange("fromBetween", stateStore, innerLowBetween, null, forward);
    }
}
Also used : StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) KafkaStreams(org.apache.kafka.streams.KafkaStreams) KeyValue(org.apache.kafka.streams.KeyValue) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) ReadOnlyKeyValueStore(org.apache.kafka.streams.state.ReadOnlyKeyValueStore) IntegrationTest(org.apache.kafka.test.IntegrationTest) Test(org.junit.Test)

Example 100 with KeyValueStore

use of org.apache.kafka.streams.state.KeyValueStore in project kafka by apache.

the class RangeQueryIntegrationTest method getStoreConfig.

private Materialized<String, String, KeyValueStore<Bytes, byte[]>> getStoreConfig(final StoreType type, final boolean cachingEnabled, final boolean loggingEnabled) {
    final Supplier<KeyValueBytesStoreSupplier> createStore = () -> {
        if (type == StoreType.InMemory) {
            return Stores.inMemoryKeyValueStore(TABLE_NAME);
        } else if (type == StoreType.RocksDB) {
            return Stores.persistentKeyValueStore(TABLE_NAME);
        } else if (type == StoreType.Timed) {
            return Stores.persistentTimestampedKeyValueStore(TABLE_NAME);
        } else {
            return Stores.inMemoryKeyValueStore(TABLE_NAME);
        }
    };
    final KeyValueBytesStoreSupplier stateStoreSupplier = createStore.get();
    final Materialized<String, String, KeyValueStore<Bytes, byte[]>> stateStoreConfig = Materialized.<String, String>as(stateStoreSupplier).withKeySerde(Serdes.String()).withValueSerde(Serdes.String());
    if (cachingEnabled) {
        stateStoreConfig.withCachingEnabled();
    } else {
        stateStoreConfig.withCachingDisabled();
    }
    if (loggingEnabled) {
        stateStoreConfig.withLoggingEnabled(new HashMap<>());
    } else {
        stateStoreConfig.withLoggingDisabled();
    }
    return stateStoreConfig;
}
Also used : KeyValueBytesStoreSupplier(org.apache.kafka.streams.state.KeyValueBytesStoreSupplier) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) ReadOnlyKeyValueStore(org.apache.kafka.streams.state.ReadOnlyKeyValueStore)

Aggregations

KeyValueStore (org.apache.kafka.streams.state.KeyValueStore)133 Test (org.junit.Test)101 StreamsBuilder (org.apache.kafka.streams.StreamsBuilder)54 KeyValue (org.apache.kafka.streams.KeyValue)49 TopologyTestDriver (org.apache.kafka.streams.TopologyTestDriver)47 Properties (java.util.Properties)37 Bytes (org.apache.kafka.common.utils.Bytes)36 StringSerializer (org.apache.kafka.common.serialization.StringSerializer)32 CoreMatchers.containsString (org.hamcrest.CoreMatchers.containsString)29 KafkaStreams (org.apache.kafka.streams.KafkaStreams)28 Serdes (org.apache.kafka.common.serialization.Serdes)26 Materialized (org.apache.kafka.streams.kstream.Materialized)25 StreamsConfig (org.apache.kafka.streams.StreamsConfig)24 IntegrationTest (org.apache.kafka.test.IntegrationTest)21 KTable (org.apache.kafka.streams.kstream.KTable)20 Consumed (org.apache.kafka.streams.kstream.Consumed)19 StateStore (org.apache.kafka.streams.processor.StateStore)17 ReadOnlyKeyValueStore (org.apache.kafka.streams.state.ReadOnlyKeyValueStore)17 TestUtils (org.apache.kafka.test.TestUtils)16 MatcherAssert.assertThat (org.hamcrest.MatcherAssert.assertThat)16