Search in sources :

Example 11 with LongDeserializer

use of org.apache.kafka.common.serialization.LongDeserializer in project starlight-for-kafka by datastax.

the class KStreamAggregationTest method shouldGroupByKey.

@Test
public void shouldGroupByKey() throws Exception {
    final long timestamp = mockTime.milliseconds();
    produceMessages(timestamp);
    produceMessages(timestamp);
    stream.groupByKey(Serialized.with(Serdes.Integer(), Serdes.String())).windowedBy(TimeWindows.of(500L)).count().toStream((windowedKey, value) -> windowedKey.key() + "@" + windowedKey.window().start()).to(outputTopic, Produced.with(Serdes.String(), Serdes.Long()));
    startStreams();
    final List<KeyValue<String, Long>> results = receiveMessages(new StringDeserializer(), new LongDeserializer(), 10);
    results.sort(KStreamAggregationTest::compare);
    final long window = timestamp / 500 * 500;
    assertThat(results, is(Arrays.asList(KeyValue.pair("1@" + window, 1L), KeyValue.pair("1@" + window, 2L), KeyValue.pair("2@" + window, 1L), KeyValue.pair("2@" + window, 2L), KeyValue.pair("3@" + window, 1L), KeyValue.pair("3@" + window, 2L), KeyValue.pair("4@" + window, 1L), KeyValue.pair("4@" + window, 2L), KeyValue.pair("5@" + window, 1L), KeyValue.pair("5@" + window, 2L))));
}
Also used : Arrays(java.util.Arrays) Produced(org.apache.kafka.streams.kstream.Produced) Serialized(org.apache.kafka.streams.kstream.Serialized) Test(org.testng.annotations.Test) WindowedSerdes(org.apache.kafka.streams.kstream.WindowedSerdes) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) Serde(org.apache.kafka.common.serialization.Serde) Map(java.util.Map) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) Is.is(org.hamcrest.core.Is.is) Serdes(org.apache.kafka.common.serialization.Serdes) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) Aggregator(org.apache.kafka.streams.kstream.Aggregator) TimeWindowedDeserializer(org.apache.kafka.streams.kstream.TimeWindowedDeserializer) SessionWindowedDeserializer(org.apache.kafka.streams.kstream.SessionWindowedDeserializer) NonNull(lombok.NonNull) KeyValue(org.apache.kafka.streams.KeyValue) Set(java.util.Set) LongDeserializer(org.apache.kafka.common.serialization.LongDeserializer) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) Bytes(org.apache.kafka.common.utils.Bytes) QueryableStoreTypes(org.apache.kafka.streams.state.QueryableStoreTypes) CountDownLatch(java.util.concurrent.CountDownLatch) ProcessorContext(org.apache.kafka.streams.processor.ProcessorContext) List(java.util.List) Assert.assertFalse(org.junit.Assert.assertFalse) Materialized(org.apache.kafka.streams.kstream.Materialized) SessionWindow(org.apache.kafka.streams.kstream.internals.SessionWindow) TimeWindow(org.apache.kafka.streams.kstream.internals.TimeWindow) StreamsConfig(org.apache.kafka.streams.StreamsConfig) ByteArrayOutputStream(java.io.ByteArrayOutputStream) KGroupedStream(org.apache.kafka.streams.kstream.KGroupedStream) ConsoleConsumer(kafka.tools.ConsoleConsumer) SessionWindows(org.apache.kafka.streams.kstream.SessionWindows) CoreMatchers.equalTo(org.hamcrest.CoreMatchers.equalTo) HashMap(java.util.HashMap) KStream(org.apache.kafka.streams.kstream.KStream) HashSet(java.util.HashSet) Initializer(org.apache.kafka.streams.kstream.Initializer) Windowed(org.apache.kafka.streams.kstream.Windowed) IntegerSerializer(org.apache.kafka.common.serialization.IntegerSerializer) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) Deserializer(org.apache.kafka.common.serialization.Deserializer) PrintStream(java.io.PrintStream) KeyValueMapper(org.apache.kafka.streams.kstream.KeyValueMapper) Properties(java.util.Properties) Consumed(org.apache.kafka.streams.kstream.Consumed) Transformer(org.apache.kafka.streams.kstream.Transformer) Assert.assertTrue(org.junit.Assert.assertTrue) TimeUnit(java.util.concurrent.TimeUnit) KeyValueIterator(org.apache.kafka.streams.state.KeyValueIterator) TimeWindows(org.apache.kafka.streams.kstream.TimeWindows) Reducer(org.apache.kafka.streams.kstream.Reducer) IntegerDeserializer(org.apache.kafka.common.serialization.IntegerDeserializer) Comparator(java.util.Comparator) Collections(java.util.Collections) ReadOnlySessionStore(org.apache.kafka.streams.state.ReadOnlySessionStore) KeyValue(org.apache.kafka.streams.KeyValue) LongDeserializer(org.apache.kafka.common.serialization.LongDeserializer) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) Test(org.testng.annotations.Test)

Example 12 with LongDeserializer

use of org.apache.kafka.common.serialization.LongDeserializer in project kafka-streams-examples by confluentinc.

the class HandlingCorruptedInputRecordsIntegrationTest method shouldIgnoreCorruptInputRecords.

@Test
public void shouldIgnoreCorruptInputRecords() {
    // 
    // Step 1: Configure and start the processor topology.
    // 
    final StreamsBuilder builder = new StreamsBuilder();
    final Properties streamsConfiguration = new Properties();
    streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, "failure-handling-integration-test");
    streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "dummy config");
    streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.ByteArray().getClass().getName());
    streamsConfiguration.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.ByteArray().getClass().getName());
    final Serde<String> stringSerde = Serdes.String();
    final Serde<Long> longSerde = Serdes.Long();
    final String inputTopic = "inputTopic";
    final String outputTopic = "outputTopic";
    final KStream<byte[], byte[]> stream = builder.stream(inputTopic);
    // Note how the returned stream is of type `KStream<String, Long>`.
    final KStream<String, Long> doubled = stream.flatMap((k, v) -> {
        try {
            // Attempt deserialization
            final String key = stringSerde.deserializer().deserialize("input-topic", k);
            final long value = longSerde.deserializer().deserialize("input-topic", v);
            // checking.
            return Collections.singletonList(KeyValue.pair(key, 2 * value));
        } catch (final SerializationException e) {
            // Ignore/skip the corrupted record by catching the exception.
            // Optionally, we can log the fact that we did so:
            System.err.println("Could not deserialize record: " + e.getMessage());
        }
        return Collections.emptyList();
    });
    // Write the processing results (which was generated from valid records only) to Kafka.
    doubled.to(outputTopic, Produced.with(stringSerde, longSerde));
    final List<Long> inputValues = Arrays.asList(1L, 2L, 3L);
    final LongSerializer longSerializer = new LongSerializer();
    final List<byte[]> serializedInputValues = inputValues.stream().map(x -> longSerializer.serialize(inputTopic, x)).collect(Collectors.toList());
    final List<Long> expectedValues = inputValues.stream().map(x -> 2 * x).collect(Collectors.toList());
    try (final TopologyTestDriver topologyTestDriver = new TopologyTestDriver(builder.build(), streamsConfiguration)) {
        // 
        // Step 2: Setup input and output topics.
        // 
        // setup input topic as byte[]-value type to allow ingesting corrupted data
        final TestInputTopic<Void, byte[]> input = topologyTestDriver.createInputTopic(inputTopic, new IntegrationTestUtils.NothingSerde<>(), new ByteArraySerializer());
        final TestOutputTopic<Void, Long> output = topologyTestDriver.createOutputTopic(outputTopic, new IntegrationTestUtils.NothingSerde<>(), new LongDeserializer());
        // 
        // Step 3: Produce some corrupt input data to the input topic.
        // 
        input.pipeInput(new StringSerializer().serialize(inputTopic, "corrupt"));
        // 
        // Step 4: Produce some (valid) input data to the input topic.
        // 
        input.pipeValueList(serializedInputValues);
        // 
        // Step 5: Verify the application's output data.
        // 
        assertThat(output.readValuesToList(), equalTo(expectedValues));
    }
}
Also used : TopologyTestDriver(org.apache.kafka.streams.TopologyTestDriver) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Arrays(java.util.Arrays) TestOutputTopic(org.apache.kafka.streams.TestOutputTopic) Properties(java.util.Properties) Produced(org.apache.kafka.streams.kstream.Produced) SerializationException(org.apache.kafka.common.errors.SerializationException) CoreMatchers.equalTo(org.hamcrest.CoreMatchers.equalTo) KeyValue(org.apache.kafka.streams.KeyValue) LongDeserializer(org.apache.kafka.common.serialization.LongDeserializer) Test(org.junit.Test) KStream(org.apache.kafka.streams.kstream.KStream) LongSerializer(org.apache.kafka.common.serialization.LongSerializer) Collectors(java.util.stream.Collectors) List(java.util.List) ByteArraySerializer(org.apache.kafka.common.serialization.ByteArraySerializer) Serde(org.apache.kafka.common.serialization.Serde) Serdes(org.apache.kafka.common.serialization.Serdes) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) TestInputTopic(org.apache.kafka.streams.TestInputTopic) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) Collections(java.util.Collections) SerializationException(org.apache.kafka.common.errors.SerializationException) LongSerializer(org.apache.kafka.common.serialization.LongSerializer) TopologyTestDriver(org.apache.kafka.streams.TopologyTestDriver) Properties(java.util.Properties) ByteArraySerializer(org.apache.kafka.common.serialization.ByteArraySerializer) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) LongDeserializer(org.apache.kafka.common.serialization.LongDeserializer) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) Test(org.junit.Test)

Example 13 with LongDeserializer

use of org.apache.kafka.common.serialization.LongDeserializer in project kafka-streams-examples by confluentinc.

the class SessionWindowsExampleTest method createStreams.

@Before
public void createStreams() {
    topologyTestDriver = new TopologyTestDriver(SessionWindowsExample.buildTopology(AVRO_SERDE_CONFIG), SessionWindowsExample.streamsConfig("dummy", TestUtils.tempDirectory().getPath()));
    final SpecificAvroSerializer<PlayEvent> playEventSerializer = new SpecificAvroSerializer<>();
    playEventSerializer.configure(AVRO_SERDE_CONFIG, false);
    input = topologyTestDriver.createInputTopic(SessionWindowsExample.PLAY_EVENTS, new StringSerializer(), playEventSerializer);
    output = topologyTestDriver.createOutputTopic(SessionWindowsExample.PLAY_EVENTS_PER_SESSION, new StringDeserializer(), new LongDeserializer());
}
Also used : LongDeserializer(org.apache.kafka.common.serialization.LongDeserializer) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) SpecificAvroSerializer(io.confluent.kafka.streams.serdes.avro.SpecificAvroSerializer) TopologyTestDriver(org.apache.kafka.streams.TopologyTestDriver) PlayEvent(io.confluent.examples.streams.avro.PlayEvent) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) Before(org.junit.Before)

Example 14 with LongDeserializer

use of org.apache.kafka.common.serialization.LongDeserializer in project kafka-streams-examples by confluentinc.

the class StateStoresInTheDSLIntegrationTest method shouldAllowStateStoreAccessFromDSL.

@Test
public void shouldAllowStateStoreAccessFromDSL() {
    final List<String> inputValues = Arrays.asList("foo", "bar", "foo", "quux", "bar", "foo");
    final List<KeyValue<String, Long>> expectedRecords = Arrays.asList(new KeyValue<>("foo", 1L), new KeyValue<>("bar", 1L), new KeyValue<>("foo", 2L), new KeyValue<>("quux", 1L), new KeyValue<>("bar", 2L), new KeyValue<>("foo", 3L));
    // 
    // Step 1: Configure and start the processor topology.
    // 
    final StreamsBuilder builder = new StreamsBuilder();
    final Properties streamsConfiguration = new Properties();
    streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, "state-store-dsl-lambda-integration-test");
    streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "dummy config");
    streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.ByteArray().getClass().getName());
    streamsConfiguration.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
    // Use a temporary directory for storing state, which will be automatically removed after the test.
    streamsConfiguration.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getAbsolutePath());
    // Create a state store manually.
    final StoreBuilder<KeyValueStore<String, Long>> wordCountsStore = Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore("WordCountsStore"), Serdes.String(), Serdes.Long()).withCachingEnabled();
    // Important (1 of 2): You must add the state store to the topology, otherwise your application
    // will fail at run-time (because the state store is referred to in `transform()` below.
    builder.addStateStore(wordCountsStore);
    // Read the input data.  (In this example we ignore whatever is stored in the record keys.)
    final KStream<byte[], String> words = builder.stream(inputTopic);
    // Important (2 of 2):  When we call `transform()` we must provide the name of the state store
    // that is going to be used by the `Transformer` returned by `WordCountTransformerSupplier` as
    // the second parameter of `transform()` (note: we are also passing the state store name to the
    // constructor of `WordCountTransformerSupplier`, which we do primarily for cleaner code).
    // Otherwise our application will fail at run-time when attempting to operate on the state store
    // (within the transformer) because `ProcessorContext#getStateStore("WordCountsStore")` will
    // return `null`.
    final KStream<String, Long> wordCounts = words.transform(new WordCountTransformerSupplier(wordCountsStore.name()), wordCountsStore.name());
    wordCounts.to(outputTopic, Produced.with(Serdes.String(), Serdes.Long()));
    try (final TopologyTestDriver topologyTestDriver = new TopologyTestDriver(builder.build(), streamsConfiguration)) {
        // 
        // Step 2: Setup input and output topics.
        // 
        final TestInputTopic<Void, String> input = topologyTestDriver.createInputTopic(inputTopic, new IntegrationTestUtils.NothingSerde<>(), new StringSerializer());
        final TestOutputTopic<String, Long> output = topologyTestDriver.createOutputTopic(outputTopic, new StringDeserializer(), new LongDeserializer());
        // 
        // Step 3: Produce some input data to the input topic.
        // 
        input.pipeValueList(inputValues);
        // 
        // Step 4: Verify the application's output data.
        // 
        assertThat(output.readKeyValuesToList()).isEqualTo(expectedRecords);
    }
}
Also used : KeyValue(org.apache.kafka.streams.KeyValue) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) TopologyTestDriver(org.apache.kafka.streams.TopologyTestDriver) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) Properties(java.util.Properties) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) LongDeserializer(org.apache.kafka.common.serialization.LongDeserializer) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) Test(org.junit.Test)

Example 15 with LongDeserializer

use of org.apache.kafka.common.serialization.LongDeserializer in project kafka-streams-examples by confluentinc.

the class StreamToTableJoinIntegrationTest method shouldCountClicksPerRegion.

@Test
public void shouldCountClicksPerRegion() {
    // Input 1: Clicks per user (multiple records allowed per user).
    final List<KeyValue<String, Long>> userClicks = Arrays.asList(new KeyValue<>("alice", 13L), new KeyValue<>("bob", 4L), new KeyValue<>("chao", 25L), new KeyValue<>("bob", 19L), new KeyValue<>("dave", 56L), new KeyValue<>("eve", 78L), new KeyValue<>("alice", 40L), new KeyValue<>("fang", 99L));
    // Input 2: Region per user (multiple records allowed per user).
    final List<KeyValue<String, String>> userRegions = Arrays.asList(new KeyValue<>("alice", "asia"), /* Alice lived in Asia originally... */
    new KeyValue<>("bob", "americas"), new KeyValue<>("chao", "asia"), new KeyValue<>("dave", "europe"), new KeyValue<>("alice", "europe"), /* ...but moved to Europe some time later. */
    new KeyValue<>("eve", "americas"), new KeyValue<>("fang", "asia"));
    final Map<String, Long> expectedClicksPerRegion = mkMap(mkEntry("americas", 101L), mkEntry("europe", 109L), mkEntry("asia", 124L));
    // 
    // Step 1: Configure and start the processor topology.
    // 
    final Serde<String> stringSerde = Serdes.String();
    final Serde<Long> longSerde = Serdes.Long();
    final Properties streamsConfiguration = new Properties();
    streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, "stream-table-join-lambda-integration-test");
    streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "dummy config");
    streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
    streamsConfiguration.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
    // Use a temporary directory for storing state, which will be automatically removed after the test.
    streamsConfiguration.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getAbsolutePath());
    final StreamsBuilder builder = new StreamsBuilder();
    // This KStream contains information such as "alice" -> 13L.
    // 
    // Because this is a KStream ("record stream"), multiple records for the same user will be
    // considered as separate click-count events, each of which will be added to the total count.
    final KStream<String, Long> userClicksStream = builder.stream(userClicksTopic, Consumed.with(stringSerde, longSerde));
    // This KTable contains information such as "alice" -> "europe".
    // 
    // Because this is a KTable ("changelog stream"), only the latest value (here: region) for a
    // record key will be considered at the time when a new user-click record (see above) is
    // received for the `leftJoin` below.  Any previous region values are being considered out of
    // date.  This behavior is quite different to the KStream for user clicks above.
    // 
    // For example, the user "alice" will be considered to live in "europe" (although originally she
    // lived in "asia") because, at the time her first user-click record is being received and
    // subsequently processed in the `leftJoin`, the latest region update for "alice" is "europe"
    // (which overrides her previous region value of "asia").
    final KTable<String, String> userRegionsTable = builder.table(userRegionsTopic);
    // Compute the number of clicks per region, e.g. "europe" -> 13L.
    // 
    // The resulting KTable is continuously being updated as new data records are arriving in the
    // input KStream `userClicksStream` and input KTable `userRegionsTable`.
    final KTable<String, Long> clicksPerRegion = userClicksStream.leftJoin(userRegionsTable, (clicks, region) -> new RegionWithClicks(region == null ? "UNKNOWN" : region, clicks)).map((user, regionWithClicks) -> new KeyValue<>(regionWithClicks.getRegion(), regionWithClicks.getClicks())).groupByKey(Grouped.with(stringSerde, longSerde)).reduce(Long::sum);
    // Write the (continuously updating) results to the output topic.
    clicksPerRegion.toStream().to(outputTopic, Produced.with(stringSerde, longSerde));
    try (final TopologyTestDriver topologyTestDriver = new TopologyTestDriver(builder.build(), streamsConfiguration)) {
        // 
        // Step 2: Setup input and output topics.
        // 
        final TestInputTopic<String, String> regionInput = topologyTestDriver.createInputTopic(userRegionsTopic, new StringSerializer(), new StringSerializer());
        final TestInputTopic<String, Long> clickInput = topologyTestDriver.createInputTopic(userClicksTopic, new StringSerializer(), new LongSerializer());
        final TestOutputTopic<String, Long> output = topologyTestDriver.createOutputTopic(outputTopic, new StringDeserializer(), new LongDeserializer());
        // 
        // Step 3: Publish user-region information.
        // 
        // To keep this code example simple and easier to understand/reason about, we publish all
        // user-region records before any user-click records (cf. step 3).  In practice though,
        // data records would typically be arriving concurrently in both input streams/topics.
        regionInput.pipeKeyValueList(userRegions);
        // 
        // Step 4: Publish some user click events.
        // 
        clickInput.pipeKeyValueList(userClicks);
        // 
        // Step 5: Verify the application's output data.
        // 
        assertThat(output.readKeyValuesToMap(), equalTo(expectedClicksPerRegion));
    }
}
Also used : StreamsConfig(org.apache.kafka.streams.StreamsConfig) Arrays(java.util.Arrays) Produced(org.apache.kafka.streams.kstream.Produced) CoreMatchers.equalTo(org.hamcrest.CoreMatchers.equalTo) KStream(org.apache.kafka.streams.kstream.KStream) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) IntegrationTestUtils.mkMap(io.confluent.examples.streams.IntegrationTestUtils.mkMap) Serde(org.apache.kafka.common.serialization.Serde) Map(java.util.Map) Serdes(org.apache.kafka.common.serialization.Serdes) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) IntegrationTestUtils.mkEntry(io.confluent.examples.streams.IntegrationTestUtils.mkEntry) TopologyTestDriver(org.apache.kafka.streams.TopologyTestDriver) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) KTable(org.apache.kafka.streams.kstream.KTable) TestOutputTopic(org.apache.kafka.streams.TestOutputTopic) Properties(java.util.Properties) TestUtils(org.apache.kafka.test.TestUtils) Consumed(org.apache.kafka.streams.kstream.Consumed) KeyValue(org.apache.kafka.streams.KeyValue) LongDeserializer(org.apache.kafka.common.serialization.LongDeserializer) Test(org.junit.Test) LongSerializer(org.apache.kafka.common.serialization.LongSerializer) Grouped(org.apache.kafka.streams.kstream.Grouped) List(java.util.List) TestInputTopic(org.apache.kafka.streams.TestInputTopic) KeyValue(org.apache.kafka.streams.KeyValue) LongSerializer(org.apache.kafka.common.serialization.LongSerializer) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) TopologyTestDriver(org.apache.kafka.streams.TopologyTestDriver) Properties(java.util.Properties) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) LongDeserializer(org.apache.kafka.common.serialization.LongDeserializer) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) Test(org.junit.Test)

Aggregations

LongDeserializer (org.apache.kafka.common.serialization.LongDeserializer)37 StringDeserializer (org.apache.kafka.common.serialization.StringDeserializer)33 KeyValue (org.apache.kafka.streams.KeyValue)22 Test (org.junit.Test)22 StringSerializer (org.apache.kafka.common.serialization.StringSerializer)20 List (java.util.List)19 Properties (java.util.Properties)18 StreamsBuilder (org.apache.kafka.streams.StreamsBuilder)16 HashMap (java.util.HashMap)14 Arrays (java.util.Arrays)13 Map (java.util.Map)13 Serdes (org.apache.kafka.common.serialization.Serdes)13 StreamsConfig (org.apache.kafka.streams.StreamsConfig)13 TopologyTestDriver (org.apache.kafka.streams.TopologyTestDriver)12 KStream (org.apache.kafka.streams.kstream.KStream)12 ArrayList (java.util.ArrayList)11 IntegerDeserializer (org.apache.kafka.common.serialization.IntegerDeserializer)11 Consumed (org.apache.kafka.streams.kstream.Consumed)10 Produced (org.apache.kafka.streams.kstream.Produced)10 Deserializer (org.apache.kafka.common.serialization.Deserializer)9