Search in sources :

Example 91 with KeyValue

use of org.apache.kafka.streams.KeyValue in project kafka by apache.

the class EosIntegrationTest method verifyStateIsInStoreAndOffsetsAreInCheckpoint.

private void verifyStateIsInStoreAndOffsetsAreInCheckpoint(final int partition, final Set<KeyValue<Long, Long>> expectedState) throws IOException {
    final String stateStoreDir = stateTmpDir + File.separator + "appDir" + File.separator + applicationId + File.separator + "0_" + partition + File.separator;
    // Verify that the data in the state store on disk is fully up-to-date
    final StateStoreContext context = new MockInternalProcessorContext(new Properties(), new TaskId(0, 0), new File(stateStoreDir));
    final MockKeyValueStore stateStore = new MockKeyValueStore("store", false);
    final RocksDBStore store = (RocksDBStore) new RocksDbKeyValueBytesStoreSupplier(storeName, false).get();
    store.init(context, stateStore);
    store.all().forEachRemaining(kv -> {
        final KeyValue<Long, Long> kv2 = new KeyValue<>(new BigInteger(kv.key.get()).longValue(), new BigInteger(kv.value).longValue());
        expectedState.remove(kv2);
    });
    // Verify that the checkpointed offsets match exactly with max offset of the records in the changelog
    final OffsetCheckpoint checkpoint = new OffsetCheckpoint(new File(stateStoreDir + ".checkpoint"));
    final Map<TopicPartition, Long> checkpointedOffsets = checkpoint.read();
    checkpointedOffsets.forEach(this::verifyChangelogMaxRecordOffsetMatchesCheckpointedOffset);
}
Also used : OffsetCheckpoint(org.apache.kafka.streams.state.internals.OffsetCheckpoint) TaskId(org.apache.kafka.streams.processor.TaskId) KeyValue(org.apache.kafka.streams.KeyValue) RocksDbKeyValueBytesStoreSupplier(org.apache.kafka.streams.state.internals.RocksDbKeyValueBytesStoreSupplier) Properties(java.util.Properties) MockKeyValueStore(org.apache.kafka.test.MockKeyValueStore) StateStoreContext(org.apache.kafka.streams.processor.StateStoreContext) TopicPartition(org.apache.kafka.common.TopicPartition) BigInteger(java.math.BigInteger) MockInternalProcessorContext(org.apache.kafka.test.MockInternalProcessorContext) File(java.io.File) RocksDBStore(org.apache.kafka.streams.state.internals.RocksDBStore)

Example 92 with KeyValue

use of org.apache.kafka.streams.KeyValue in project kafka by apache.

the class EosIntegrationTest method getKafkaStreams.

// the threads should no longer fail one thread one at a time
@SuppressWarnings("deprecation")
private KafkaStreams getKafkaStreams(final String dummyHostName, final boolean withState, final String appDir, final int numberOfStreamsThreads, final String eosConfig, final int maxPollIntervalMs) {
    commitRequested = new AtomicInteger(0);
    errorInjected = new AtomicBoolean(false);
    stallInjected = new AtomicBoolean(false);
    stallingHost = new AtomicReference<>();
    final StreamsBuilder builder = new StreamsBuilder();
    String[] storeNames = new String[0];
    if (withState) {
        storeNames = new String[] { storeName };
        final StoreBuilder<KeyValueStore<Long, Long>> storeBuilder = Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore(storeName), Serdes.Long(), Serdes.Long()).withCachingEnabled();
        builder.addStateStore(storeBuilder);
    }
    final KStream<Long, Long> input = builder.stream(MULTI_PARTITION_INPUT_TOPIC);
    input.transform(new TransformerSupplier<Long, Long, KeyValue<Long, Long>>() {

        @SuppressWarnings("unchecked")
        @Override
        public Transformer<Long, Long, KeyValue<Long, Long>> get() {
            return new Transformer<Long, Long, KeyValue<Long, Long>>() {

                ProcessorContext context;

                KeyValueStore<Long, Long> state = null;

                @Override
                public void init(final ProcessorContext context) {
                    this.context = context;
                    if (withState) {
                        state = (KeyValueStore<Long, Long>) context.getStateStore(storeName);
                    }
                }

                @Override
                public KeyValue<Long, Long> transform(final Long key, final Long value) {
                    if (stallInjected.compareAndSet(true, false)) {
                        LOG.info(dummyHostName + " is executing the injected stall");
                        stallingHost.set(dummyHostName);
                        while (doStall) {
                            final StreamThread thread = (StreamThread) Thread.currentThread();
                            if (thread.isInterrupted() || !thread.isRunning()) {
                                throw new RuntimeException("Detected we've been interrupted.");
                            }
                            try {
                                Thread.sleep(100);
                            } catch (final InterruptedException e) {
                                throw new RuntimeException(e);
                            }
                        }
                    }
                    if ((value + 1) % 10 == 0) {
                        context.commit();
                        commitRequested.incrementAndGet();
                    }
                    if (state != null) {
                        Long sum = state.get(key);
                        if (sum == null) {
                            sum = value;
                        } else {
                            sum += value;
                        }
                        state.put(key, sum);
                        state.flush();
                    }
                    if (errorInjected.compareAndSet(true, false)) {
                        // only tries to fail once on one of the task
                        throw new RuntimeException("Injected test exception.");
                    }
                    if (state != null) {
                        return new KeyValue<>(key, state.get(key));
                    } else {
                        return new KeyValue<>(key, value);
                    }
                }

                @Override
                public void close() {
                }
            };
        }
    }, storeNames).to(SINGLE_PARTITION_OUTPUT_TOPIC);
    stateTmpDir = TestUtils.tempDirectory().getPath() + File.separator;
    final Properties properties = new Properties();
    // Set commit interval to a larger value to avoid affection of controlled stream commit,
    // but not too large as we need to have a relatively low transaction timeout such
    // that it should help trigger the timed out transaction in time.
    final long commitIntervalMs = 20_000L;
    properties.put(StreamsConfig.PROCESSING_GUARANTEE_CONFIG, eosConfig);
    properties.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, numberOfStreamsThreads);
    properties.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, commitIntervalMs);
    properties.put(StreamsConfig.producerPrefix(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG), (int) commitIntervalMs);
    properties.put(StreamsConfig.consumerPrefix(ConsumerConfig.METADATA_MAX_AGE_CONFIG), "1000");
    properties.put(StreamsConfig.consumerPrefix(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG), "earliest");
    properties.put(StreamsConfig.consumerPrefix(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG), maxPollIntervalMs);
    properties.put(StreamsConfig.consumerPrefix(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG), maxPollIntervalMs - 1);
    properties.put(StreamsConfig.consumerPrefix(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG), maxPollIntervalMs);
    properties.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0);
    properties.put(StreamsConfig.STATE_DIR_CONFIG, stateTmpDir + appDir);
    properties.put(StreamsConfig.APPLICATION_SERVER_CONFIG, dummyHostName + ":2142");
    final Properties config = StreamsTestUtils.getStreamsConfig(applicationId, CLUSTER.bootstrapServers(), Serdes.LongSerde.class.getName(), Serdes.LongSerde.class.getName(), properties);
    final KafkaStreams streams = new KafkaStreams(builder.build(), config);
    streams.setUncaughtExceptionHandler((t, e) -> {
        if (uncaughtException != null || !e.getMessage().contains("Injected test exception")) {
            e.printStackTrace(System.err);
            hasUnexpectedError = true;
        }
        uncaughtException = e;
    });
    return streams;
}
Also used : KafkaStreams(org.apache.kafka.streams.KafkaStreams) KeyValue(org.apache.kafka.streams.KeyValue) Transformer(org.apache.kafka.streams.kstream.Transformer) StreamThread(org.apache.kafka.streams.processor.internals.StreamThread) TransformerSupplier(org.apache.kafka.streams.kstream.TransformerSupplier) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) ReadOnlyKeyValueStore(org.apache.kafka.streams.state.ReadOnlyKeyValueStore) MockKeyValueStore(org.apache.kafka.test.MockKeyValueStore) Properties(java.util.Properties) ProcessorContext(org.apache.kafka.streams.processor.ProcessorContext) MockInternalProcessorContext(org.apache.kafka.test.MockInternalProcessorContext) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) AtomicInteger(java.util.concurrent.atomic.AtomicInteger)

Example 93 with KeyValue

use of org.apache.kafka.streams.KeyValue in project kafka by apache.

the class ErrorHandlingIntegrationTest method shouldBackOffTaskAndEmitDataWithinSameTopology.

@Test
public void shouldBackOffTaskAndEmitDataWithinSameTopology() throws Exception {
    final AtomicInteger noOutputExpected = new AtomicInteger(0);
    final AtomicInteger outputExpected = new AtomicInteger(0);
    try (final KafkaStreamsNamedTopologyWrapper kafkaStreams = new KafkaStreamsNamedTopologyWrapper(properties)) {
        kafkaStreams.setUncaughtExceptionHandler(exception -> StreamThreadExceptionResponse.REPLACE_THREAD);
        final NamedTopologyBuilder builder = kafkaStreams.newNamedTopologyBuilder("topology_A");
        builder.stream(inputTopic).peek((k, v) -> outputExpected.incrementAndGet()).to(outputTopic);
        builder.stream(errorInputTopic).peek((k, v) -> {
            throw new RuntimeException("Kaboom");
        }).peek((k, v) -> noOutputExpected.incrementAndGet()).to(errorOutputTopic);
        kafkaStreams.addNamedTopology(builder.build());
        StreamsTestUtils.startKafkaStreamsAndWaitForRunningState(kafkaStreams);
        IntegrationTestUtils.produceKeyValuesSynchronouslyWithTimestamp(errorInputTopic, Arrays.asList(new KeyValue<>(1, "A")), TestUtils.producerConfig(CLUSTER.bootstrapServers(), IntegerSerializer.class, StringSerializer.class, new Properties()), 0L);
        IntegrationTestUtils.produceKeyValuesSynchronouslyWithTimestamp(inputTopic, Arrays.asList(new KeyValue<>(1, "A"), new KeyValue<>(1, "B")), TestUtils.producerConfig(CLUSTER.bootstrapServers(), IntegerSerializer.class, StringSerializer.class, new Properties()), 0L);
        IntegrationTestUtils.waitUntilFinalKeyValueRecordsReceived(TestUtils.consumerConfig(CLUSTER.bootstrapServers(), IntegerDeserializer.class, StringDeserializer.class), outputTopic, Arrays.asList(new KeyValue<>(1, "A"), new KeyValue<>(1, "B")));
        assertThat(noOutputExpected.get(), equalTo(0));
        assertThat(outputExpected.get(), equalTo(2));
    }
}
Also used : StreamsConfig(org.apache.kafka.streams.StreamsConfig) Arrays(java.util.Arrays) BeforeClass(org.junit.BeforeClass) CoreMatchers.equalTo(org.hamcrest.CoreMatchers.equalTo) KafkaStreamsNamedTopologyWrapper(org.apache.kafka.streams.processor.internals.namedtopology.KafkaStreamsNamedTopologyWrapper) IntegrationTest(org.apache.kafka.test.IntegrationTest) NamedTopologyBuilder(org.apache.kafka.streams.processor.internals.namedtopology.NamedTopologyBuilder) Utils.mkMap(org.apache.kafka.common.utils.Utils.mkMap) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) IntegrationTestUtils.safeUniqueTestName(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.safeUniqueTestName) EmbeddedKafkaCluster(org.apache.kafka.streams.integration.utils.EmbeddedKafkaCluster) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TestName(org.junit.rules.TestName) IntegerSerializer(org.apache.kafka.common.serialization.IntegerSerializer) Serdes(org.apache.kafka.common.serialization.Serdes) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) Before(org.junit.Before) AfterClass(org.junit.AfterClass) Properties(java.util.Properties) TestUtils(org.apache.kafka.test.TestUtils) Utils.mkObjectProperties(org.apache.kafka.common.utils.Utils.mkObjectProperties) KeyValue(org.apache.kafka.streams.KeyValue) IOException(java.io.IOException) Test(org.junit.Test) Category(org.junit.experimental.categories.Category) IntegrationTestUtils(org.apache.kafka.streams.integration.utils.IntegrationTestUtils) Rule(org.junit.Rule) Utils.mkEntry(org.apache.kafka.common.utils.Utils.mkEntry) StreamThreadExceptionResponse(org.apache.kafka.streams.errors.StreamsUncaughtExceptionHandler.StreamThreadExceptionResponse) IntegerDeserializer(org.apache.kafka.common.serialization.IntegerDeserializer) StreamsTestUtils(org.apache.kafka.test.StreamsTestUtils) IntegerDeserializer(org.apache.kafka.common.serialization.IntegerDeserializer) KafkaStreamsNamedTopologyWrapper(org.apache.kafka.streams.processor.internals.namedtopology.KafkaStreamsNamedTopologyWrapper) KeyValue(org.apache.kafka.streams.KeyValue) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) NamedTopologyBuilder(org.apache.kafka.streams.processor.internals.namedtopology.NamedTopologyBuilder) Properties(java.util.Properties) Utils.mkObjectProperties(org.apache.kafka.common.utils.Utils.mkObjectProperties) IntegerSerializer(org.apache.kafka.common.serialization.IntegerSerializer) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) IntegrationTest(org.apache.kafka.test.IntegrationTest) Test(org.junit.Test)

Example 94 with KeyValue

use of org.apache.kafka.streams.KeyValue in project kafka by apache.

the class AbstractResetIntegrationTest method testReprocessingFromScratchAfterResetWithIntermediateUserTopic.

private void testReprocessingFromScratchAfterResetWithIntermediateUserTopic(final boolean useRepartitioned) throws Exception {
    if (!useRepartitioned) {
        cluster.createTopic(INTERMEDIATE_USER_TOPIC);
    }
    final String appID = IntegrationTestUtils.safeUniqueTestName(getClass(), testName);
    streamsConfig.put(StreamsConfig.APPLICATION_ID_CONFIG, appID);
    // RUN
    streams = new KafkaStreams(setupTopologyWithIntermediateTopic(useRepartitioned, OUTPUT_TOPIC_2), streamsConfig);
    streams.start();
    final List<KeyValue<Long, Long>> result = IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(resultConsumerConfig, OUTPUT_TOPIC, 10);
    // receive only first values to make sure intermediate user topic is not consumed completely
    // => required to test "seekToEnd" for intermediate topics
    final List<KeyValue<Long, Long>> result2 = IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(resultConsumerConfig, OUTPUT_TOPIC_2, 40);
    streams.close();
    waitForEmptyConsumerGroup(adminClient, appID, TIMEOUT_MULTIPLIER * STREAMS_CONSUMER_TIMEOUT);
    // insert bad record to make sure intermediate user topic gets seekToEnd()
    mockTime.sleep(1);
    final KeyValue<Long, String> badMessage = new KeyValue<>(-1L, "badRecord-ShouldBeSkipped");
    if (!useRepartitioned) {
        IntegrationTestUtils.produceKeyValuesSynchronouslyWithTimestamp(INTERMEDIATE_USER_TOPIC, Collections.singleton(badMessage), producerConfig, mockTime.milliseconds());
    }
    // RESET
    streams = new KafkaStreams(setupTopologyWithIntermediateTopic(useRepartitioned, OUTPUT_TOPIC_2_RERUN), streamsConfig);
    streams.cleanUp();
    cleanGlobal(!useRepartitioned, null, null, appID);
    waitForEmptyConsumerGroup(adminClient, appID, TIMEOUT_MULTIPLIER * STREAMS_CONSUMER_TIMEOUT);
    assertInternalTopicsGotDeleted(useRepartitioned ? null : INTERMEDIATE_USER_TOPIC);
    // RE-RUN
    streams.start();
    final List<KeyValue<Long, Long>> resultRerun = IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(resultConsumerConfig, OUTPUT_TOPIC, 10);
    final List<KeyValue<Long, Long>> resultRerun2 = IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(resultConsumerConfig, OUTPUT_TOPIC_2_RERUN, 40);
    streams.close();
    assertThat(resultRerun, equalTo(result));
    assertThat(resultRerun2, equalTo(result2));
    if (!useRepartitioned) {
        final Properties props = TestUtils.consumerConfig(cluster.bootstrapServers(), appID + "-result-consumer", LongDeserializer.class, StringDeserializer.class, commonClientConfig);
        final List<KeyValue<Long, String>> resultIntermediate = IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(props, INTERMEDIATE_USER_TOPIC, 21);
        for (int i = 0; i < 10; i++) {
            assertThat(resultIntermediate.get(i), equalTo(resultIntermediate.get(i + 11)));
        }
        assertThat(resultIntermediate.get(10), equalTo(badMessage));
    }
    waitForEmptyConsumerGroup(adminClient, appID, TIMEOUT_MULTIPLIER * STREAMS_CONSUMER_TIMEOUT);
    cleanGlobal(!useRepartitioned, null, null, appID);
    if (!useRepartitioned) {
        cluster.deleteTopicAndWait(INTERMEDIATE_USER_TOPIC);
    }
}
Also used : KafkaStreams(org.apache.kafka.streams.KafkaStreams) KeyValue(org.apache.kafka.streams.KeyValue) Properties(java.util.Properties)

Example 95 with KeyValue

use of org.apache.kafka.streams.KeyValue in project kafka by apache.

the class EosV2UpgradeIntegrationTest method getKafkaStreams.

private KafkaStreams getKafkaStreams(final String appDir, final String processingGuarantee) {
    final StreamsBuilder builder = new StreamsBuilder();
    final String[] storeNames = new String[] { storeName };
    final StoreBuilder<KeyValueStore<Long, Long>> storeBuilder = Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore(storeName), Serdes.Long(), Serdes.Long()).withCachingEnabled();
    builder.addStateStore(storeBuilder);
    final KStream<Long, Long> input = builder.stream(MULTI_PARTITION_INPUT_TOPIC);
    input.transform(new TransformerSupplier<Long, Long, KeyValue<Long, Long>>() {

        @Override
        public Transformer<Long, Long, KeyValue<Long, Long>> get() {
            return new Transformer<Long, Long, KeyValue<Long, Long>>() {

                ProcessorContext context;

                KeyValueStore<Long, Long> state = null;

                AtomicBoolean crash;

                AtomicInteger sharedCommit;

                @Override
                public void init(final ProcessorContext context) {
                    this.context = context;
                    state = context.getStateStore(storeName);
                    final String clientId = context.appConfigs().get(StreamsConfig.CLIENT_ID_CONFIG).toString();
                    if (APP_DIR_1.equals(clientId)) {
                        crash = errorInjectedClient1;
                        sharedCommit = commitCounterClient1;
                    } else {
                        crash = errorInjectedClient2;
                        sharedCommit = commitCounterClient2;
                    }
                }

                @Override
                public KeyValue<Long, Long> transform(final Long key, final Long value) {
                    if ((value + 1) % 10 == 0) {
                        if (sharedCommit.get() < 0 || sharedCommit.incrementAndGet() == 2) {
                            context.commit();
                        }
                        commitRequested.incrementAndGet();
                    }
                    Long sum = state.get(key);
                    if (sum == null) {
                        sum = value;
                    } else {
                        sum += value;
                    }
                    state.put(key, sum);
                    state.flush();
                    if (// potentially crash when processing 5th, 15th, or 25th record (etc.)
                    value % 10 == 4 && crash != null && crash.compareAndSet(true, false)) {
                        // only crash a single task
                        throw new RuntimeException("Injected test exception.");
                    }
                    return new KeyValue<>(key, state.get(key));
                }

                @Override
                public void close() {
                }
            };
        }
    }, storeNames).to(MULTI_PARTITION_OUTPUT_TOPIC);
    final Properties properties = new Properties();
    properties.put(StreamsConfig.CLIENT_ID_CONFIG, appDir);
    properties.put(StreamsConfig.PROCESSING_GUARANTEE_CONFIG, processingGuarantee);
    final long commitInterval = Duration.ofMinutes(1L).toMillis();
    properties.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, commitInterval);
    properties.put(StreamsConfig.consumerPrefix(ConsumerConfig.METADATA_MAX_AGE_CONFIG), Duration.ofSeconds(1L).toMillis());
    properties.put(StreamsConfig.consumerPrefix(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG), "earliest");
    properties.put(StreamsConfig.consumerPrefix(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG), (int) Duration.ofSeconds(5L).toMillis());
    properties.put(StreamsConfig.consumerPrefix(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG), (int) Duration.ofSeconds(5L).minusMillis(1L).toMillis());
    properties.put(StreamsConfig.consumerPrefix(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG), MAX_POLL_INTERVAL_MS);
    properties.put(StreamsConfig.producerPrefix(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG), (int) commitInterval);
    properties.put(StreamsConfig.producerPrefix(ProducerConfig.PARTITIONER_CLASS_CONFIG), KeyPartitioner.class);
    properties.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0);
    properties.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getPath() + File.separator + appDir);
    properties.put(InternalConfig.ASSIGNMENT_LISTENER, assignmentListener);
    final Properties config = StreamsTestUtils.getStreamsConfig(applicationId, CLUSTER.bootstrapServers(), Serdes.LongSerde.class.getName(), Serdes.LongSerde.class.getName(), properties);
    final KafkaStreams streams = new KafkaStreams(builder.build(), config, new TestKafkaClientSupplier());
    streams.setUncaughtExceptionHandler(e -> {
        if (!injectError) {
            // we don't expect any exception thrown in stop case
            e.printStackTrace(System.err);
            hasUnexpectedError = true;
        } else {
            int exceptionCount = (int) exceptionCounts.get(appDir);
            // should only have our injected exception or commit exception, and 2 exceptions for each stream
            if (++exceptionCount > 2 || !(e instanceof RuntimeException) || !(e.getMessage().contains("test exception"))) {
                // The exception won't cause the test fail since we actually "expected" exception thrown and failed the stream.
                // So, log to stderr for debugging when the exception is not what we expected, and fail in the main thread
                e.printStackTrace(System.err);
                hasUnexpectedError = true;
            }
            exceptionCounts.put(appDir, exceptionCount);
        }
        return StreamsUncaughtExceptionHandler.StreamThreadExceptionResponse.SHUTDOWN_CLIENT;
    });
    return streams;
}
Also used : KafkaStreams(org.apache.kafka.streams.KafkaStreams) KeyValue(org.apache.kafka.streams.KeyValue) Transformer(org.apache.kafka.streams.kstream.Transformer) TransformerSupplier(org.apache.kafka.streams.kstream.TransformerSupplier) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) ReadOnlyKeyValueStore(org.apache.kafka.streams.state.ReadOnlyKeyValueStore) Properties(java.util.Properties) ProcessorContext(org.apache.kafka.streams.processor.ProcessorContext) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) AtomicInteger(java.util.concurrent.atomic.AtomicInteger)

Aggregations

KeyValue (org.apache.kafka.streams.KeyValue)343 Test (org.junit.Test)268 Properties (java.util.Properties)127 StreamsBuilder (org.apache.kafka.streams.StreamsBuilder)123 Windowed (org.apache.kafka.streams.kstream.Windowed)105 ArrayList (java.util.ArrayList)90 KafkaStreams (org.apache.kafka.streams.KafkaStreams)82 StringSerializer (org.apache.kafka.common.serialization.StringSerializer)74 Bytes (org.apache.kafka.common.utils.Bytes)74 TopologyTestDriver (org.apache.kafka.streams.TopologyTestDriver)68 IntegrationTest (org.apache.kafka.test.IntegrationTest)66 Serdes (org.apache.kafka.common.serialization.Serdes)65 KeyValueStore (org.apache.kafka.streams.state.KeyValueStore)62 StreamsConfig (org.apache.kafka.streams.StreamsConfig)55 StringDeserializer (org.apache.kafka.common.serialization.StringDeserializer)53 KStream (org.apache.kafka.streams.kstream.KStream)52 SessionWindow (org.apache.kafka.streams.kstream.internals.SessionWindow)46 KTable (org.apache.kafka.streams.kstream.KTable)43 MatcherAssert.assertThat (org.hamcrest.MatcherAssert.assertThat)42 Consumed (org.apache.kafka.streams.kstream.Consumed)41