Search in sources :

Example 26 with KeyValueStore

use of org.apache.kafka.streams.state.KeyValueStore in project kafka by apache.

the class EosV2UpgradeIntegrationTest method getKafkaStreams.

private KafkaStreams getKafkaStreams(final String appDir, final String processingGuarantee) {
    final StreamsBuilder builder = new StreamsBuilder();
    final String[] storeNames = new String[] { storeName };
    final StoreBuilder<KeyValueStore<Long, Long>> storeBuilder = Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore(storeName), Serdes.Long(), Serdes.Long()).withCachingEnabled();
    builder.addStateStore(storeBuilder);
    final KStream<Long, Long> input = builder.stream(MULTI_PARTITION_INPUT_TOPIC);
    input.transform(new TransformerSupplier<Long, Long, KeyValue<Long, Long>>() {

        @Override
        public Transformer<Long, Long, KeyValue<Long, Long>> get() {
            return new Transformer<Long, Long, KeyValue<Long, Long>>() {

                ProcessorContext context;

                KeyValueStore<Long, Long> state = null;

                AtomicBoolean crash;

                AtomicInteger sharedCommit;

                @Override
                public void init(final ProcessorContext context) {
                    this.context = context;
                    state = context.getStateStore(storeName);
                    final String clientId = context.appConfigs().get(StreamsConfig.CLIENT_ID_CONFIG).toString();
                    if (APP_DIR_1.equals(clientId)) {
                        crash = errorInjectedClient1;
                        sharedCommit = commitCounterClient1;
                    } else {
                        crash = errorInjectedClient2;
                        sharedCommit = commitCounterClient2;
                    }
                }

                @Override
                public KeyValue<Long, Long> transform(final Long key, final Long value) {
                    if ((value + 1) % 10 == 0) {
                        if (sharedCommit.get() < 0 || sharedCommit.incrementAndGet() == 2) {
                            context.commit();
                        }
                        commitRequested.incrementAndGet();
                    }
                    Long sum = state.get(key);
                    if (sum == null) {
                        sum = value;
                    } else {
                        sum += value;
                    }
                    state.put(key, sum);
                    state.flush();
                    if (// potentially crash when processing 5th, 15th, or 25th record (etc.)
                    value % 10 == 4 && crash != null && crash.compareAndSet(true, false)) {
                        // only crash a single task
                        throw new RuntimeException("Injected test exception.");
                    }
                    return new KeyValue<>(key, state.get(key));
                }

                @Override
                public void close() {
                }
            };
        }
    }, storeNames).to(MULTI_PARTITION_OUTPUT_TOPIC);
    final Properties properties = new Properties();
    properties.put(StreamsConfig.CLIENT_ID_CONFIG, appDir);
    properties.put(StreamsConfig.PROCESSING_GUARANTEE_CONFIG, processingGuarantee);
    final long commitInterval = Duration.ofMinutes(1L).toMillis();
    properties.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, commitInterval);
    properties.put(StreamsConfig.consumerPrefix(ConsumerConfig.METADATA_MAX_AGE_CONFIG), Duration.ofSeconds(1L).toMillis());
    properties.put(StreamsConfig.consumerPrefix(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG), "earliest");
    properties.put(StreamsConfig.consumerPrefix(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG), (int) Duration.ofSeconds(5L).toMillis());
    properties.put(StreamsConfig.consumerPrefix(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG), (int) Duration.ofSeconds(5L).minusMillis(1L).toMillis());
    properties.put(StreamsConfig.consumerPrefix(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG), MAX_POLL_INTERVAL_MS);
    properties.put(StreamsConfig.producerPrefix(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG), (int) commitInterval);
    properties.put(StreamsConfig.producerPrefix(ProducerConfig.PARTITIONER_CLASS_CONFIG), KeyPartitioner.class);
    properties.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0);
    properties.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getPath() + File.separator + appDir);
    properties.put(InternalConfig.ASSIGNMENT_LISTENER, assignmentListener);
    final Properties config = StreamsTestUtils.getStreamsConfig(applicationId, CLUSTER.bootstrapServers(), Serdes.LongSerde.class.getName(), Serdes.LongSerde.class.getName(), properties);
    final KafkaStreams streams = new KafkaStreams(builder.build(), config, new TestKafkaClientSupplier());
    streams.setUncaughtExceptionHandler(e -> {
        if (!injectError) {
            // we don't expect any exception thrown in stop case
            e.printStackTrace(System.err);
            hasUnexpectedError = true;
        } else {
            int exceptionCount = (int) exceptionCounts.get(appDir);
            // should only have our injected exception or commit exception, and 2 exceptions for each stream
            if (++exceptionCount > 2 || !(e instanceof RuntimeException) || !(e.getMessage().contains("test exception"))) {
                // The exception won't cause the test fail since we actually "expected" exception thrown and failed the stream.
                // So, log to stderr for debugging when the exception is not what we expected, and fail in the main thread
                e.printStackTrace(System.err);
                hasUnexpectedError = true;
            }
            exceptionCounts.put(appDir, exceptionCount);
        }
        return StreamsUncaughtExceptionHandler.StreamThreadExceptionResponse.SHUTDOWN_CLIENT;
    });
    return streams;
}
Also used : KafkaStreams(org.apache.kafka.streams.KafkaStreams) KeyValue(org.apache.kafka.streams.KeyValue) Transformer(org.apache.kafka.streams.kstream.Transformer) TransformerSupplier(org.apache.kafka.streams.kstream.TransformerSupplier) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) ReadOnlyKeyValueStore(org.apache.kafka.streams.state.ReadOnlyKeyValueStore) Properties(java.util.Properties) ProcessorContext(org.apache.kafka.streams.processor.ProcessorContext) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) AtomicInteger(java.util.concurrent.atomic.AtomicInteger)

Example 27 with KeyValueStore

use of org.apache.kafka.streams.state.KeyValueStore in project kafka by apache.

the class KTableEfficientRangeQueryTest method testStoreConfig.

@Test
public void testStoreConfig() {
    final Materialized<String, String, KeyValueStore<Bytes, byte[]>> stateStoreConfig = getStoreConfig(storeType, TABLE_NAME, enableLogging, enableCaching);
    // Create topology: table from input topic
    final StreamsBuilder builder = new StreamsBuilder();
    final KTable<String, String> table = builder.table("input", stateStoreConfig);
    final Topology topology = builder.build();
    try (final TopologyTestDriver driver = new TopologyTestDriver(topology)) {
        // get input topic and stateStore
        final TestInputTopic<String, String> input = driver.createInputTopic("input", new StringSerializer(), new StringSerializer());
        final ReadOnlyKeyValueStore<String, String> stateStore = driver.getKeyValueStore(TABLE_NAME);
        // write some data
        for (final KeyValue<String, String> kv : records) {
            input.pipeInput(kv.key, kv.value);
        }
        // query the state store
        try (final KeyValueIterator<String, String> scanIterator = forward ? stateStore.range(null, null) : stateStore.reverseRange(null, null)) {
            final Iterator<KeyValue<String, String>> dataIterator = forward ? records.iterator() : records.descendingIterator();
            TestUtils.checkEquals(scanIterator, dataIterator);
        }
        try (final KeyValueIterator<String, String> allIterator = forward ? stateStore.all() : stateStore.reverseAll()) {
            final Iterator<KeyValue<String, String>> dataIterator = forward ? records.iterator() : records.descendingIterator();
            TestUtils.checkEquals(allIterator, dataIterator);
        }
        testRange("range", stateStore, innerLow, innerHigh, forward);
        testRange("until", stateStore, null, middle, forward);
        testRange("from", stateStore, middle, null, forward);
        testRange("untilBetween", stateStore, null, innerHighBetween, forward);
        testRange("fromBetween", stateStore, innerLowBetween, null, forward);
    }
}
Also used : KeyValue(org.apache.kafka.streams.KeyValue) TopologyTestDriver(org.apache.kafka.streams.TopologyTestDriver) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) ReadOnlyKeyValueStore(org.apache.kafka.streams.state.ReadOnlyKeyValueStore) Topology(org.apache.kafka.streams.Topology) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) Test(org.junit.Test)

Example 28 with KeyValueStore

use of org.apache.kafka.streams.state.KeyValueStore in project kafka by apache.

the class KTableKTableForeignKeyJoinMaterializationIntegrationTest method getTopology.

private Topology getTopology(final Properties streamsConfig, final String queryableStoreName) {
    final StreamsBuilder builder = new StreamsBuilder();
    final KTable<String, String> left = builder.table(LEFT_TABLE, Consumed.with(Serdes.String(), Serdes.String()));
    final KTable<String, String> right = builder.table(RIGHT_TABLE, Consumed.with(Serdes.String(), Serdes.String()));
    final Function<String, String> extractor = value -> value.split("\\|")[1];
    final ValueJoiner<String, String, String> joiner = (value1, value2) -> "(" + value1 + "," + value2 + ")";
    final Materialized<String, String, KeyValueStore<Bytes, byte[]>> materialized;
    if (queryable) {
        materialized = Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as(queryableStoreName).withValueSerde(Serdes.String());
    } else {
        materialized = Materialized.with(null, Serdes.String());
    }
    final KTable<String, String> joinResult;
    if (this.materialized) {
        joinResult = left.join(right, extractor, joiner, materialized);
    } else {
        joinResult = left.join(right, extractor, joiner);
    }
    joinResult.toStream().to(OUTPUT, Produced.with(null, Serdes.String()));
    return builder.build(streamsConfig);
}
Also used : StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) CoreMatchers.is(org.hamcrest.CoreMatchers.is) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Arrays(java.util.Arrays) Produced(org.apache.kafka.streams.kstream.Produced) RunWith(org.junit.runner.RunWith) HashMap(java.util.HashMap) Utils.mkProperties(org.apache.kafka.common.utils.Utils.mkProperties) Function(java.util.function.Function) Utils.mkMap(org.apache.kafka.common.utils.Utils.mkMap) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) IntegrationTestUtils.safeUniqueTestName(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.safeUniqueTestName) TestName(org.junit.rules.TestName) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) Map(java.util.Map) Serdes(org.apache.kafka.common.serialization.Serdes) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) Parameterized(org.junit.runners.Parameterized) Before(org.junit.Before) TopologyTestDriver(org.apache.kafka.streams.TopologyTestDriver) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) Collections.emptyMap(java.util.Collections.emptyMap) KTable(org.apache.kafka.streams.kstream.KTable) TestOutputTopic(org.apache.kafka.streams.TestOutputTopic) Properties(java.util.Properties) TestUtils(org.apache.kafka.test.TestUtils) Consumed(org.apache.kafka.streams.kstream.Consumed) Collection(java.util.Collection) Test(org.junit.Test) Bytes(org.apache.kafka.common.utils.Bytes) Rule(org.junit.Rule) Utils.mkEntry(org.apache.kafka.common.utils.Utils.mkEntry) ValueJoiner(org.apache.kafka.streams.kstream.ValueJoiner) Materialized(org.apache.kafka.streams.kstream.Materialized) TestInputTopic(org.apache.kafka.streams.TestInputTopic) Topology(org.apache.kafka.streams.Topology) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore)

Example 29 with KeyValueStore

use of org.apache.kafka.streams.state.KeyValueStore in project kafka by apache.

the class StandbyTaskCreationIntegrationTest method shouldNotCreateAnyStandByTasksForStateStoreWithLoggingDisabled.

@Test
public void shouldNotCreateAnyStandByTasksForStateStoreWithLoggingDisabled() throws Exception {
    final StreamsBuilder builder = new StreamsBuilder();
    final String stateStoreName = "myTransformState";
    final StoreBuilder<KeyValueStore<Integer, Integer>> keyValueStoreBuilder = Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore(stateStoreName), Serdes.Integer(), Serdes.Integer()).withLoggingDisabled();
    builder.addStateStore(keyValueStoreBuilder);
    builder.stream(INPUT_TOPIC, Consumed.with(Serdes.Integer(), Serdes.Integer())).transform(() -> new Transformer<Integer, Integer, KeyValue<Integer, Integer>>() {

        @Override
        public void init(final ProcessorContext context) {
        }

        @Override
        public KeyValue<Integer, Integer> transform(final Integer key, final Integer value) {
            return null;
        }

        @Override
        public void close() {
        }
    }, stateStoreName);
    final Topology topology = builder.build();
    createClients(topology, streamsConfiguration(), topology, streamsConfiguration());
    setStateListenersForVerification(thread -> thread.standbyTasks().isEmpty() && !thread.activeTasks().isEmpty());
    startClients();
    waitUntilBothClientAreOK("At least one client did not reach state RUNNING with active tasks but no stand-by tasks");
}
Also used : StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) KeyValue(org.apache.kafka.streams.KeyValue) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) Topology(org.apache.kafka.streams.Topology) ProcessorContext(org.apache.kafka.streams.processor.ProcessorContext) IntegrationTest(org.apache.kafka.test.IntegrationTest) Test(org.junit.Test)

Example 30 with KeyValueStore

use of org.apache.kafka.streams.state.KeyValueStore in project kafka by apache.

the class GlobalKTableIntegrationTest method before.

@Before
public void before() throws Exception {
    builder = new StreamsBuilder();
    createTopics();
    streamsConfiguration = new Properties();
    final String safeTestName = safeUniqueTestName(getClass(), testName);
    streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, "app-" + safeTestName);
    streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers());
    streamsConfiguration.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    streamsConfiguration.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getPath());
    streamsConfiguration.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0);
    streamsConfiguration.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 100L);
    globalTable = builder.globalTable(globalTableTopic, Consumed.with(Serdes.Long(), Serdes.String()), Materialized.<Long, String, KeyValueStore<Bytes, byte[]>>as(globalStore).withKeySerde(Serdes.Long()).withValueSerde(Serdes.String()));
    final Consumed<String, Long> stringLongConsumed = Consumed.with(Serdes.String(), Serdes.Long());
    stream = builder.stream(streamTopic, stringLongConsumed);
    supplier = new MockApiProcessorSupplier<>();
}
Also used : StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) ReadOnlyKeyValueStore(org.apache.kafka.streams.state.ReadOnlyKeyValueStore) Properties(java.util.Properties) Before(org.junit.Before)

Aggregations

KeyValueStore (org.apache.kafka.streams.state.KeyValueStore)133 Test (org.junit.Test)101 StreamsBuilder (org.apache.kafka.streams.StreamsBuilder)54 KeyValue (org.apache.kafka.streams.KeyValue)49 TopologyTestDriver (org.apache.kafka.streams.TopologyTestDriver)47 Properties (java.util.Properties)37 Bytes (org.apache.kafka.common.utils.Bytes)36 StringSerializer (org.apache.kafka.common.serialization.StringSerializer)32 CoreMatchers.containsString (org.hamcrest.CoreMatchers.containsString)29 KafkaStreams (org.apache.kafka.streams.KafkaStreams)28 Serdes (org.apache.kafka.common.serialization.Serdes)26 Materialized (org.apache.kafka.streams.kstream.Materialized)25 StreamsConfig (org.apache.kafka.streams.StreamsConfig)24 IntegrationTest (org.apache.kafka.test.IntegrationTest)21 KTable (org.apache.kafka.streams.kstream.KTable)20 Consumed (org.apache.kafka.streams.kstream.Consumed)19 StateStore (org.apache.kafka.streams.processor.StateStore)17 ReadOnlyKeyValueStore (org.apache.kafka.streams.state.ReadOnlyKeyValueStore)17 TestUtils (org.apache.kafka.test.TestUtils)16 MatcherAssert.assertThat (org.hamcrest.MatcherAssert.assertThat)16