Search in sources :

Example 16 with Topology

use of org.apache.kafka.streams.Topology in project kafka by apache.

the class MetricsIntegrationTest method startApplication.

private void startApplication() throws InterruptedException {
    final Topology topology = builder.build();
    kafkaStreams = new KafkaStreams(topology, streamsConfiguration);
    verifyAliveStreamThreadsMetric();
    verifyStateMetric(State.CREATED);
    verifyTopologyDescriptionMetric(topology.describe().toString());
    verifyApplicationIdMetric();
    kafkaStreams.start();
    TestUtils.waitForCondition(() -> kafkaStreams.state() == State.RUNNING, timeout, () -> "Kafka Streams application did not reach state RUNNING in " + timeout + " ms");
    verifyAliveStreamThreadsMetric();
    verifyStateMetric(State.RUNNING);
}
Also used : KafkaStreams(org.apache.kafka.streams.KafkaStreams) Topology(org.apache.kafka.streams.Topology)

Example 17 with Topology

use of org.apache.kafka.streams.Topology in project kafka by apache.

the class RepartitionOptimizingTest method runTest.

private void runTest(final String optimizationConfig, final int expectedNumberRepartitionTopics) {
    final StreamsBuilder builder = new StreamsBuilder();
    final KStream<String, String> sourceStream = builder.stream(INPUT_TOPIC, Consumed.with(Serdes.String(), Serdes.String()).withName("sourceStream"));
    final KStream<String, String> mappedStream = sourceStream.map((k, v) -> KeyValue.pair(k.toUpperCase(Locale.getDefault()), v), Named.as("source-map"));
    mappedStream.filter((k, v) -> k.equals("B"), Named.as("process-filter")).mapValues(v -> v.toUpperCase(Locale.getDefault()), Named.as("process-mapValues")).process(() -> new SimpleProcessor(processorValueCollector), Named.as("process"));
    final KStream<String, Long> countStream = mappedStream.groupByKey(Grouped.as("count-groupByKey")).count(Named.as("count"), Materialized.<String, Long>as(Stores.inMemoryKeyValueStore("count-store")).withKeySerde(Serdes.String()).withValueSerde(Serdes.Long())).toStream(Named.as("count-toStream"));
    countStream.to(COUNT_TOPIC, Produced.with(Serdes.String(), Serdes.Long()).withName("count-to"));
    mappedStream.groupByKey(Grouped.as("aggregate-groupByKey")).aggregate(initializer, aggregator, Named.as("aggregate"), Materialized.<String, Integer>as(Stores.inMemoryKeyValueStore("aggregate-store")).withKeySerde(Serdes.String()).withValueSerde(Serdes.Integer())).toStream(Named.as("aggregate-toStream")).to(AGGREGATION_TOPIC, Produced.with(Serdes.String(), Serdes.Integer()).withName("reduce-to"));
    // adding operators for case where the repartition node is further downstream
    mappedStream.filter((k, v) -> true, Named.as("reduce-filter")).peek((k, v) -> System.out.println(k + ":" + v), Named.as("reduce-peek")).groupByKey(Grouped.as("reduce-groupByKey")).reduce(reducer, Named.as("reducer"), Materialized.as(Stores.inMemoryKeyValueStore("reduce-store"))).toStream(Named.as("reduce-toStream")).to(REDUCE_TOPIC, Produced.with(Serdes.String(), Serdes.String()));
    mappedStream.filter((k, v) -> k.equals("A"), Named.as("join-filter")).join(countStream, (v1, v2) -> v1 + ":" + v2.toString(), JoinWindows.of(ofMillis(5000)), StreamJoined.<String, String, Long>with(Stores.inMemoryWindowStore("join-store", ofDays(1), ofMillis(10000), true), Stores.inMemoryWindowStore("other-join-store", ofDays(1), ofMillis(10000), true)).withName("join").withKeySerde(Serdes.String()).withValueSerde(Serdes.String()).withOtherValueSerde(Serdes.Long())).to(JOINED_TOPIC, Produced.as("join-to"));
    streamsConfiguration.setProperty(StreamsConfig.TOPOLOGY_OPTIMIZATION_CONFIG, optimizationConfig);
    final Topology topology = builder.build(streamsConfiguration);
    topologyTestDriver = new TopologyTestDriver(topology, streamsConfiguration);
    final TestInputTopic<String, String> inputTopicA = topologyTestDriver.createInputTopic(INPUT_TOPIC, stringSerializer, stringSerializer);
    final TestOutputTopic<String, Long> countOutputTopic = topologyTestDriver.createOutputTopic(COUNT_TOPIC, stringDeserializer, new LongDeserializer());
    final TestOutputTopic<String, Integer> aggregationOutputTopic = topologyTestDriver.createOutputTopic(AGGREGATION_TOPIC, stringDeserializer, new IntegerDeserializer());
    final TestOutputTopic<String, String> reduceOutputTopic = topologyTestDriver.createOutputTopic(REDUCE_TOPIC, stringDeserializer, stringDeserializer);
    final TestOutputTopic<String, String> joinedOutputTopic = topologyTestDriver.createOutputTopic(JOINED_TOPIC, stringDeserializer, stringDeserializer);
    inputTopicA.pipeKeyValueList(getKeyValues());
    // Verify the topology
    final String topologyString = topology.describe().toString();
    if (optimizationConfig.equals(StreamsConfig.OPTIMIZE)) {
        assertEquals(EXPECTED_OPTIMIZED_TOPOLOGY, topologyString);
    } else {
        assertEquals(EXPECTED_UNOPTIMIZED_TOPOLOGY, topologyString);
    }
    // Verify the number of repartition topics
    assertEquals(expectedNumberRepartitionTopics, getCountOfRepartitionTopicsFound(topologyString));
    // Verify the values collected by the processor
    assertThat(3, equalTo(processorValueCollector.size()));
    assertThat(processorValueCollector, equalTo(expectedCollectedProcessorValues));
    // Verify the expected output
    assertThat(countOutputTopic.readKeyValuesToMap(), equalTo(keyValueListToMap(expectedCountKeyValues)));
    assertThat(aggregationOutputTopic.readKeyValuesToMap(), equalTo(keyValueListToMap(expectedAggKeyValues)));
    assertThat(reduceOutputTopic.readKeyValuesToMap(), equalTo(keyValueListToMap(expectedReduceKeyValues)));
    assertThat(joinedOutputTopic.readKeyValuesToMap(), equalTo(keyValueListToMap(expectedJoinKeyValues)));
}
Also used : StreamsConfig(org.apache.kafka.streams.StreamsConfig) Arrays(java.util.Arrays) Produced(org.apache.kafka.streams.kstream.Produced) CoreMatchers.equalTo(org.hamcrest.CoreMatchers.equalTo) Stores(org.apache.kafka.streams.state.Stores) LoggerFactory(org.slf4j.LoggerFactory) HashMap(java.util.HashMap) KStream(org.apache.kafka.streams.kstream.KStream) StreamJoined(org.apache.kafka.streams.kstream.StreamJoined) ArrayList(java.util.ArrayList) Initializer(org.apache.kafka.streams.kstream.Initializer) JoinWindows(org.apache.kafka.streams.kstream.JoinWindows) Matcher(java.util.regex.Matcher) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) Locale(java.util.Locale) Map(java.util.Map) Named(org.apache.kafka.streams.kstream.Named) After(org.junit.After) Serdes(org.apache.kafka.common.serialization.Serdes) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) Record(org.apache.kafka.streams.processor.api.Record) Deserializer(org.apache.kafka.common.serialization.Deserializer) Processor(org.apache.kafka.streams.processor.api.Processor) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) Aggregator(org.apache.kafka.streams.kstream.Aggregator) Before(org.junit.Before) Duration.ofDays(java.time.Duration.ofDays) TopologyTestDriver(org.apache.kafka.streams.TopologyTestDriver) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) TestOutputTopic(org.apache.kafka.streams.TestOutputTopic) Properties(java.util.Properties) Logger(org.slf4j.Logger) Consumed(org.apache.kafka.streams.kstream.Consumed) KeyValue(org.apache.kafka.streams.KeyValue) LongDeserializer(org.apache.kafka.common.serialization.LongDeserializer) Test(org.junit.Test) Grouped(org.apache.kafka.streams.kstream.Grouped) List(java.util.List) Serializer(org.apache.kafka.common.serialization.Serializer) Reducer(org.apache.kafka.streams.kstream.Reducer) Materialized(org.apache.kafka.streams.kstream.Materialized) IntegerDeserializer(org.apache.kafka.common.serialization.IntegerDeserializer) TestInputTopic(org.apache.kafka.streams.TestInputTopic) StreamsTestUtils(org.apache.kafka.test.StreamsTestUtils) Pattern(java.util.regex.Pattern) Duration.ofMillis(java.time.Duration.ofMillis) Topology(org.apache.kafka.streams.Topology) Assert.assertEquals(org.junit.Assert.assertEquals) IntegerDeserializer(org.apache.kafka.common.serialization.IntegerDeserializer) TopologyTestDriver(org.apache.kafka.streams.TopologyTestDriver) Topology(org.apache.kafka.streams.Topology) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) LongDeserializer(org.apache.kafka.common.serialization.LongDeserializer)

Example 18 with Topology

use of org.apache.kafka.streams.Topology in project kafka by apache.

the class RepartitionWithMergeOptimizingTest method runTest.

private void runTest(final String optimizationConfig, final int expectedNumberRepartitionTopics) {
    streamsConfiguration.setProperty(StreamsConfig.TOPOLOGY_OPTIMIZATION_CONFIG, optimizationConfig);
    final StreamsBuilder builder = new StreamsBuilder();
    final KStream<String, String> sourceAStream = builder.stream(INPUT_A_TOPIC, Consumed.with(Serdes.String(), Serdes.String()).withName("sourceAStream"));
    final KStream<String, String> sourceBStream = builder.stream(INPUT_B_TOPIC, Consumed.with(Serdes.String(), Serdes.String()).withName("sourceBStream"));
    final KStream<String, String> mappedAStream = sourceAStream.map((k, v) -> KeyValue.pair(v.split(":")[0], v), Named.as("mappedAStream"));
    final KStream<String, String> mappedBStream = sourceBStream.map((k, v) -> KeyValue.pair(v.split(":")[0], v), Named.as("mappedBStream"));
    final KStream<String, String> mergedStream = mappedAStream.merge(mappedBStream, Named.as("mergedStream"));
    mergedStream.groupByKey(Grouped.as("long-groupByKey")).count(Named.as("long-count"), Materialized.as(Stores.inMemoryKeyValueStore("long-store"))).toStream(Named.as("long-toStream")).to(COUNT_TOPIC, Produced.with(Serdes.String(), Serdes.Long()).withName("long-to"));
    mergedStream.groupByKey(Grouped.as("string-groupByKey")).count(Named.as("string-count"), Materialized.as(Stores.inMemoryKeyValueStore("string-store"))).toStream(Named.as("string-toStream")).mapValues(v -> v.toString(), Named.as("string-mapValues")).to(STRING_COUNT_TOPIC, Produced.with(Serdes.String(), Serdes.String()).withName("string-to"));
    final Topology topology = builder.build(streamsConfiguration);
    topologyTestDriver = new TopologyTestDriver(topology, streamsConfiguration);
    final TestInputTopic<String, String> inputTopicA = topologyTestDriver.createInputTopic(INPUT_A_TOPIC, stringSerializer, stringSerializer);
    final TestInputTopic<String, String> inputTopicB = topologyTestDriver.createInputTopic(INPUT_B_TOPIC, stringSerializer, stringSerializer);
    final TestOutputTopic<String, Long> countOutputTopic = topologyTestDriver.createOutputTopic(COUNT_TOPIC, stringDeserializer, new LongDeserializer());
    final TestOutputTopic<String, String> stringCountOutputTopic = topologyTestDriver.createOutputTopic(STRING_COUNT_TOPIC, stringDeserializer, stringDeserializer);
    inputTopicA.pipeKeyValueList(getKeyValues());
    inputTopicB.pipeKeyValueList(getKeyValues());
    final String topologyString = topology.describe().toString();
    // Verify the topology
    if (optimizationConfig.equals(StreamsConfig.OPTIMIZE)) {
        assertEquals(EXPECTED_OPTIMIZED_TOPOLOGY, topologyString);
    } else {
        assertEquals(EXPECTED_UNOPTIMIZED_TOPOLOGY, topologyString);
    }
    // Verify the number of repartition topics
    assertEquals(expectedNumberRepartitionTopics, getCountOfRepartitionTopicsFound(topologyString));
    // Verify the expected output
    assertThat(countOutputTopic.readKeyValuesToMap(), equalTo(keyValueListToMap(expectedCountKeyValues)));
    assertThat(stringCountOutputTopic.readKeyValuesToMap(), equalTo(keyValueListToMap(expectedStringCountKeyValues)));
}
Also used : StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Arrays(java.util.Arrays) Produced(org.apache.kafka.streams.kstream.Produced) CoreMatchers.equalTo(org.hamcrest.CoreMatchers.equalTo) Stores(org.apache.kafka.streams.state.Stores) LoggerFactory(org.slf4j.LoggerFactory) HashMap(java.util.HashMap) KStream(org.apache.kafka.streams.kstream.KStream) ArrayList(java.util.ArrayList) Matcher(java.util.regex.Matcher) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) Map(java.util.Map) Named(org.apache.kafka.streams.kstream.Named) After(org.junit.After) Serdes(org.apache.kafka.common.serialization.Serdes) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) Deserializer(org.apache.kafka.common.serialization.Deserializer) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) Before(org.junit.Before) TopologyTestDriver(org.apache.kafka.streams.TopologyTestDriver) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) TestOutputTopic(org.apache.kafka.streams.TestOutputTopic) Properties(java.util.Properties) Logger(org.slf4j.Logger) Consumed(org.apache.kafka.streams.kstream.Consumed) KeyValue(org.apache.kafka.streams.KeyValue) LongDeserializer(org.apache.kafka.common.serialization.LongDeserializer) Test(org.junit.Test) Grouped(org.apache.kafka.streams.kstream.Grouped) List(java.util.List) Serializer(org.apache.kafka.common.serialization.Serializer) Materialized(org.apache.kafka.streams.kstream.Materialized) TestInputTopic(org.apache.kafka.streams.TestInputTopic) StreamsTestUtils(org.apache.kafka.test.StreamsTestUtils) Pattern(java.util.regex.Pattern) Topology(org.apache.kafka.streams.Topology) Assert.assertEquals(org.junit.Assert.assertEquals) LongDeserializer(org.apache.kafka.common.serialization.LongDeserializer) TopologyTestDriver(org.apache.kafka.streams.TopologyTestDriver) Topology(org.apache.kafka.streams.Topology)

Example 19 with Topology

use of org.apache.kafka.streams.Topology in project kafka by apache.

the class StateDirectoryIntegrationTest method testNotCleanUpStateDirIfNotEmpty.

@Test
public void testNotCleanUpStateDirIfNotEmpty() throws InterruptedException {
    final String uniqueTestName = safeUniqueTestName(getClass(), testName);
    // Create Topic
    final String input = uniqueTestName + "-input";
    CLUSTER.createTopic(input);
    final Properties producerConfig = mkProperties(mkMap(mkEntry(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()), mkEntry(ProducerConfig.ACKS_CONFIG, "all"), mkEntry(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getCanonicalName()), mkEntry(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getCanonicalName())));
    try (final KafkaProducer<String, String> producer = new KafkaProducer<>(producerConfig, Serdes.String().serializer(), Serdes.String().serializer())) {
        // Create Test Records
        producer.send(new ProducerRecord<>(input, "a"));
        producer.send(new ProducerRecord<>(input, "b"));
        producer.send(new ProducerRecord<>(input, "c"));
        // Create Topology
        final String storeName = uniqueTestName + "-input-table";
        final StreamsBuilder builder = new StreamsBuilder();
        builder.table(input, Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as(storeName).withKeySerde(Serdes.String()).withValueSerde(Serdes.String()));
        final Topology topology = builder.build();
        // State Store Directory
        final String stateDir = TestUtils.tempDirectory(uniqueTestName).getPath();
        // Create KafkaStreams instance
        final String applicationId = uniqueTestName + "-app";
        final Properties streamsConfig = mkProperties(mkMap(mkEntry(StreamsConfig.APPLICATION_ID_CONFIG, applicationId), mkEntry(StreamsConfig.STATE_DIR_CONFIG, stateDir), mkEntry(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers())));
        final KafkaStreams streams = new KafkaStreams(topology, streamsConfig);
        // Create StateListener
        final CountDownLatch runningLatch = new CountDownLatch(1);
        final CountDownLatch notRunningLatch = new CountDownLatch(1);
        final KafkaStreams.StateListener stateListener = (newState, oldState) -> {
            if (newState == KafkaStreams.State.RUNNING) {
                runningLatch.countDown();
            }
            if (newState == KafkaStreams.State.NOT_RUNNING) {
                notRunningLatch.countDown();
            }
        };
        streams.setStateListener(stateListener);
        // Application state directory
        final File appDir = new File(stateDir, applicationId);
        // Validate application state directory is created.
        streams.start();
        try {
            runningLatch.await(IntegrationTestUtils.DEFAULT_TIMEOUT, TimeUnit.MILLISECONDS);
        } catch (final InterruptedException e) {
            throw new RuntimeException("Streams didn't start in time.", e);
        }
        // State directory exists
        assertTrue((new File(stateDir)).exists());
        // Application state directory Exists
        assertTrue(appDir.exists());
        try {
            assertTrue((new File(appDir, "dummy")).createNewFile());
        } catch (final IOException e) {
            throw new RuntimeException("Failed to create dummy file.", e);
        }
        // Validate StateStore directory is deleted.
        streams.close();
        try {
            notRunningLatch.await(IntegrationTestUtils.DEFAULT_TIMEOUT, TimeUnit.MILLISECONDS);
        } catch (final InterruptedException e) {
            throw new RuntimeException("Streams didn't cleaned up in time.", e);
        }
        streams.cleanUp();
        // Root state store exists
        assertTrue((new File(stateDir)).exists());
        // Application state store exists
        assertTrue(appDir.exists());
    } finally {
        CLUSTER.deleteAllTopicsAndWait(0L);
    }
}
Also used : KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Arrays(java.util.Arrays) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) BeforeClass(org.junit.BeforeClass) IntegrationTest(org.apache.kafka.test.IntegrationTest) Utils.mkProperties(org.apache.kafka.common.utils.Utils.mkProperties) KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) Utils.mkMap(org.apache.kafka.common.utils.Utils.mkMap) IntegrationTestUtils.safeUniqueTestName(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.safeUniqueTestName) EmbeddedKafkaCluster(org.apache.kafka.streams.integration.utils.EmbeddedKafkaCluster) TestName(org.junit.rules.TestName) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) Serdes(org.apache.kafka.common.serialization.Serdes) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) ProducerConfig(org.apache.kafka.clients.producer.ProducerConfig) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) AfterClass(org.junit.AfterClass) Properties(java.util.Properties) TestUtils(org.apache.kafka.test.TestUtils) Assert.assertTrue(org.junit.Assert.assertTrue) IOException(java.io.IOException) Test(org.junit.Test) Category(org.junit.experimental.categories.Category) File(java.io.File) Bytes(org.apache.kafka.common.utils.Bytes) TimeUnit(java.util.concurrent.TimeUnit) CountDownLatch(java.util.concurrent.CountDownLatch) IntegrationTestUtils(org.apache.kafka.streams.integration.utils.IntegrationTestUtils) Rule(org.junit.Rule) Utils.mkEntry(org.apache.kafka.common.utils.Utils.mkEntry) Materialized(org.apache.kafka.streams.kstream.Materialized) KafkaStreams(org.apache.kafka.streams.KafkaStreams) Topology(org.apache.kafka.streams.Topology) KafkaStreams(org.apache.kafka.streams.KafkaStreams) Topology(org.apache.kafka.streams.Topology) IOException(java.io.IOException) Utils.mkProperties(org.apache.kafka.common.utils.Utils.mkProperties) Properties(java.util.Properties) CountDownLatch(java.util.concurrent.CountDownLatch) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) Bytes(org.apache.kafka.common.utils.Bytes) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) File(java.io.File) IntegrationTest(org.apache.kafka.test.IntegrationTest) Test(org.junit.Test)

Example 20 with Topology

use of org.apache.kafka.streams.Topology in project kafka by apache.

the class RestoreIntegrationTest method shouldProcessDataFromStoresWithLoggingDisabled.

@Test
public void shouldProcessDataFromStoresWithLoggingDisabled() throws InterruptedException {
    IntegrationTestUtils.produceKeyValuesSynchronously(inputStream, asList(KeyValue.pair(1, 1), KeyValue.pair(2, 2), KeyValue.pair(3, 3)), TestUtils.producerConfig(CLUSTER.bootstrapServers(), IntegerSerializer.class, IntegerSerializer.class), CLUSTER.time);
    final KeyValueBytesStoreSupplier lruMapSupplier = Stores.lruMap(inputStream, 10);
    final StoreBuilder<KeyValueStore<Integer, Integer>> storeBuilder = new KeyValueStoreBuilder<>(lruMapSupplier, Serdes.Integer(), Serdes.Integer(), CLUSTER.time).withLoggingDisabled();
    final StreamsBuilder streamsBuilder = new StreamsBuilder();
    streamsBuilder.addStateStore(storeBuilder);
    final KStream<Integer, Integer> stream = streamsBuilder.stream(inputStream);
    final CountDownLatch processorLatch = new CountDownLatch(3);
    stream.process(() -> new KeyValueStoreProcessor(inputStream, processorLatch), inputStream);
    final Topology topology = streamsBuilder.build();
    kafkaStreams = new KafkaStreams(topology, props());
    final CountDownLatch latch = new CountDownLatch(1);
    kafkaStreams.setStateListener((newState, oldState) -> {
        if (newState == KafkaStreams.State.RUNNING && oldState == KafkaStreams.State.REBALANCING) {
            latch.countDown();
        }
    });
    kafkaStreams.start();
    latch.await(30, TimeUnit.SECONDS);
    assertTrue(processorLatch.await(30, TimeUnit.SECONDS));
}
Also used : StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) KafkaStreams(org.apache.kafka.streams.KafkaStreams) KeyValueBytesStoreSupplier(org.apache.kafka.streams.state.KeyValueBytesStoreSupplier) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) InMemoryKeyValueStore(org.apache.kafka.streams.state.internals.InMemoryKeyValueStore) Topology(org.apache.kafka.streams.Topology) CountDownLatch(java.util.concurrent.CountDownLatch) IntegerSerializer(org.apache.kafka.common.serialization.IntegerSerializer) IntegrationTest(org.apache.kafka.test.IntegrationTest) Test(org.junit.Test)

Aggregations

Topology (org.apache.kafka.streams.Topology)127 Test (org.junit.Test)106 StreamsBuilder (org.apache.kafka.streams.StreamsBuilder)93 KafkaStreams (org.apache.kafka.streams.KafkaStreams)53 TopologyTestDriver (org.apache.kafka.streams.TopologyTestDriver)53 Properties (java.util.Properties)47 StringSerializer (org.apache.kafka.common.serialization.StringSerializer)46 KeyValue (org.apache.kafka.streams.KeyValue)40 Serdes (org.apache.kafka.common.serialization.Serdes)39 StreamsConfig (org.apache.kafka.streams.StreamsConfig)33 List (java.util.List)29 MutableSpan (brave.handler.MutableSpan)28 Consumed (org.apache.kafka.streams.kstream.Consumed)28 Produced (org.apache.kafka.streams.kstream.Produced)26 Arrays (java.util.Arrays)25 StringDeserializer (org.apache.kafka.common.serialization.StringDeserializer)25 ArrayList (java.util.ArrayList)23 ProcessorContext (org.apache.kafka.streams.processor.ProcessorContext)23 Duration (java.time.Duration)22 KStream (org.apache.kafka.streams.kstream.KStream)22