Search in sources :

Example 6 with IntegerDeserializer

use of org.apache.kafka.common.serialization.IntegerDeserializer in project kafka by apache.

the class KStreamRepartitionIntegrationTest method shouldDeductNumberOfPartitionsFromRepartitionOperation.

@Test
public void shouldDeductNumberOfPartitionsFromRepartitionOperation() throws Exception {
    final String topicBMapperName = "topic-b-mapper";
    final int topicBNumberOfPartitions = 6;
    final String inputTopicRepartitionName = "join-repartition-test";
    final int inputTopicRepartitionedNumOfPartitions = 3;
    final long timestamp = System.currentTimeMillis();
    CLUSTER.createTopic(topicB, topicBNumberOfPartitions, 1);
    final List<KeyValue<Integer, String>> expectedRecords = Arrays.asList(new KeyValue<>(1, "A"), new KeyValue<>(2, "B"));
    sendEvents(timestamp, expectedRecords);
    sendEvents(topicB, timestamp, expectedRecords);
    final StreamsBuilder builder = new StreamsBuilder();
    final Repartitioned<Integer, String> inputTopicRepartitioned = Repartitioned.<Integer, String>as(inputTopicRepartitionName).withNumberOfPartitions(inputTopicRepartitionedNumOfPartitions);
    final KStream<Integer, String> topicBStream = builder.stream(topicB, Consumed.with(Serdes.Integer(), Serdes.String())).map(KeyValue::new, Named.as(topicBMapperName));
    builder.stream(inputTopic, Consumed.with(Serdes.Integer(), Serdes.String())).repartition(inputTopicRepartitioned).join(topicBStream, (value1, value2) -> value2, JoinWindows.of(Duration.ofSeconds(10))).to(outputTopic);
    builder.build(streamsConfiguration);
    startStreams(builder);
    assertEquals(inputTopicRepartitionedNumOfPartitions, getNumberOfPartitionsForTopic(toRepartitionTopicName(inputTopicRepartitionName)));
    assertEquals(inputTopicRepartitionedNumOfPartitions, getNumberOfPartitionsForTopic(toRepartitionTopicName(topicBMapperName)));
    validateReceivedMessages(new IntegerDeserializer(), new StringDeserializer(), expectedRecords);
}
Also used : Arrays(java.util.Arrays) Repartitioned(org.apache.kafka.streams.kstream.Repartitioned) AdminClient(org.apache.kafka.clients.admin.AdminClient) Matcher(java.util.regex.Matcher) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) IntegrationTestUtils.safeUniqueTestName(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.safeUniqueTestName) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) After(org.junit.After) Duration(java.time.Duration) Serdes(org.apache.kafka.common.serialization.Serdes) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) Parameterized(org.junit.runners.Parameterized) AfterClass(org.junit.AfterClass) TestUtils(org.apache.kafka.test.TestUtils) Collection(java.util.Collection) KeyValue(org.apache.kafka.streams.KeyValue) LongDeserializer(org.apache.kafka.common.serialization.LongDeserializer) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) State(org.apache.kafka.streams.KafkaStreams.State) Category(org.junit.experimental.categories.Category) Objects(java.util.Objects) IntegrationTestUtils(org.apache.kafka.streams.integration.utils.IntegrationTestUtils) CountDownLatch(java.util.concurrent.CountDownLatch) List(java.util.List) Pattern(java.util.regex.Pattern) ERROR(org.apache.kafka.streams.KafkaStreams.State.ERROR) StreamsConfig(org.apache.kafka.streams.StreamsConfig) BeforeClass(org.junit.BeforeClass) RunWith(org.junit.runner.RunWith) Parameters(org.junit.runners.Parameterized.Parameters) IntegrationTest(org.apache.kafka.test.IntegrationTest) KStream(org.apache.kafka.streams.kstream.KStream) AtomicReference(java.util.concurrent.atomic.AtomicReference) ArrayList(java.util.ArrayList) JoinWindows(org.apache.kafka.streams.kstream.JoinWindows) EmbeddedKafkaCluster(org.apache.kafka.streams.integration.utils.EmbeddedKafkaCluster) TestName(org.junit.rules.TestName) Named(org.apache.kafka.streams.kstream.Named) IntegerSerializer(org.apache.kafka.common.serialization.IntegerSerializer) TopicDescription(org.apache.kafka.clients.admin.TopicDescription) Deserializer(org.apache.kafka.common.serialization.Deserializer) Before(org.junit.Before) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) Properties(java.util.Properties) Consumed(org.apache.kafka.streams.kstream.Consumed) Parameter(org.junit.runners.Parameterized.Parameter) Assert.assertNotNull(org.junit.Assert.assertNotNull) AdminClientConfig(org.apache.kafka.clients.admin.AdminClientConfig) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) IOException(java.io.IOException) TimeUnit(java.util.concurrent.TimeUnit) RUNNING(org.apache.kafka.streams.KafkaStreams.State.RUNNING) Rule(org.junit.Rule) IntegerDeserializer(org.apache.kafka.common.serialization.IntegerDeserializer) KafkaStreams(org.apache.kafka.streams.KafkaStreams) REBALANCING(org.apache.kafka.streams.KafkaStreams.State.REBALANCING) Collections(java.util.Collections) Assert.assertEquals(org.junit.Assert.assertEquals) IntegerDeserializer(org.apache.kafka.common.serialization.IntegerDeserializer) KeyValue(org.apache.kafka.streams.KeyValue) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) IntegrationTest(org.apache.kafka.test.IntegrationTest) Test(org.junit.Test)

Example 7 with IntegerDeserializer

use of org.apache.kafka.common.serialization.IntegerDeserializer in project kafka by apache.

the class RepartitionOptimizingTest method runTest.

private void runTest(final String optimizationConfig, final int expectedNumberRepartitionTopics) {
    final StreamsBuilder builder = new StreamsBuilder();
    final KStream<String, String> sourceStream = builder.stream(INPUT_TOPIC, Consumed.with(Serdes.String(), Serdes.String()).withName("sourceStream"));
    final KStream<String, String> mappedStream = sourceStream.map((k, v) -> KeyValue.pair(k.toUpperCase(Locale.getDefault()), v), Named.as("source-map"));
    mappedStream.filter((k, v) -> k.equals("B"), Named.as("process-filter")).mapValues(v -> v.toUpperCase(Locale.getDefault()), Named.as("process-mapValues")).process(() -> new SimpleProcessor(processorValueCollector), Named.as("process"));
    final KStream<String, Long> countStream = mappedStream.groupByKey(Grouped.as("count-groupByKey")).count(Named.as("count"), Materialized.<String, Long>as(Stores.inMemoryKeyValueStore("count-store")).withKeySerde(Serdes.String()).withValueSerde(Serdes.Long())).toStream(Named.as("count-toStream"));
    countStream.to(COUNT_TOPIC, Produced.with(Serdes.String(), Serdes.Long()).withName("count-to"));
    mappedStream.groupByKey(Grouped.as("aggregate-groupByKey")).aggregate(initializer, aggregator, Named.as("aggregate"), Materialized.<String, Integer>as(Stores.inMemoryKeyValueStore("aggregate-store")).withKeySerde(Serdes.String()).withValueSerde(Serdes.Integer())).toStream(Named.as("aggregate-toStream")).to(AGGREGATION_TOPIC, Produced.with(Serdes.String(), Serdes.Integer()).withName("reduce-to"));
    // adding operators for case where the repartition node is further downstream
    mappedStream.filter((k, v) -> true, Named.as("reduce-filter")).peek((k, v) -> System.out.println(k + ":" + v), Named.as("reduce-peek")).groupByKey(Grouped.as("reduce-groupByKey")).reduce(reducer, Named.as("reducer"), Materialized.as(Stores.inMemoryKeyValueStore("reduce-store"))).toStream(Named.as("reduce-toStream")).to(REDUCE_TOPIC, Produced.with(Serdes.String(), Serdes.String()));
    mappedStream.filter((k, v) -> k.equals("A"), Named.as("join-filter")).join(countStream, (v1, v2) -> v1 + ":" + v2.toString(), JoinWindows.of(ofMillis(5000)), StreamJoined.<String, String, Long>with(Stores.inMemoryWindowStore("join-store", ofDays(1), ofMillis(10000), true), Stores.inMemoryWindowStore("other-join-store", ofDays(1), ofMillis(10000), true)).withName("join").withKeySerde(Serdes.String()).withValueSerde(Serdes.String()).withOtherValueSerde(Serdes.Long())).to(JOINED_TOPIC, Produced.as("join-to"));
    streamsConfiguration.setProperty(StreamsConfig.TOPOLOGY_OPTIMIZATION_CONFIG, optimizationConfig);
    final Topology topology = builder.build(streamsConfiguration);
    topologyTestDriver = new TopologyTestDriver(topology, streamsConfiguration);
    final TestInputTopic<String, String> inputTopicA = topologyTestDriver.createInputTopic(INPUT_TOPIC, stringSerializer, stringSerializer);
    final TestOutputTopic<String, Long> countOutputTopic = topologyTestDriver.createOutputTopic(COUNT_TOPIC, stringDeserializer, new LongDeserializer());
    final TestOutputTopic<String, Integer> aggregationOutputTopic = topologyTestDriver.createOutputTopic(AGGREGATION_TOPIC, stringDeserializer, new IntegerDeserializer());
    final TestOutputTopic<String, String> reduceOutputTopic = topologyTestDriver.createOutputTopic(REDUCE_TOPIC, stringDeserializer, stringDeserializer);
    final TestOutputTopic<String, String> joinedOutputTopic = topologyTestDriver.createOutputTopic(JOINED_TOPIC, stringDeserializer, stringDeserializer);
    inputTopicA.pipeKeyValueList(getKeyValues());
    // Verify the topology
    final String topologyString = topology.describe().toString();
    if (optimizationConfig.equals(StreamsConfig.OPTIMIZE)) {
        assertEquals(EXPECTED_OPTIMIZED_TOPOLOGY, topologyString);
    } else {
        assertEquals(EXPECTED_UNOPTIMIZED_TOPOLOGY, topologyString);
    }
    // Verify the number of repartition topics
    assertEquals(expectedNumberRepartitionTopics, getCountOfRepartitionTopicsFound(topologyString));
    // Verify the values collected by the processor
    assertThat(3, equalTo(processorValueCollector.size()));
    assertThat(processorValueCollector, equalTo(expectedCollectedProcessorValues));
    // Verify the expected output
    assertThat(countOutputTopic.readKeyValuesToMap(), equalTo(keyValueListToMap(expectedCountKeyValues)));
    assertThat(aggregationOutputTopic.readKeyValuesToMap(), equalTo(keyValueListToMap(expectedAggKeyValues)));
    assertThat(reduceOutputTopic.readKeyValuesToMap(), equalTo(keyValueListToMap(expectedReduceKeyValues)));
    assertThat(joinedOutputTopic.readKeyValuesToMap(), equalTo(keyValueListToMap(expectedJoinKeyValues)));
}
Also used : StreamsConfig(org.apache.kafka.streams.StreamsConfig) Arrays(java.util.Arrays) Produced(org.apache.kafka.streams.kstream.Produced) CoreMatchers.equalTo(org.hamcrest.CoreMatchers.equalTo) Stores(org.apache.kafka.streams.state.Stores) LoggerFactory(org.slf4j.LoggerFactory) HashMap(java.util.HashMap) KStream(org.apache.kafka.streams.kstream.KStream) StreamJoined(org.apache.kafka.streams.kstream.StreamJoined) ArrayList(java.util.ArrayList) Initializer(org.apache.kafka.streams.kstream.Initializer) JoinWindows(org.apache.kafka.streams.kstream.JoinWindows) Matcher(java.util.regex.Matcher) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) Locale(java.util.Locale) Map(java.util.Map) Named(org.apache.kafka.streams.kstream.Named) After(org.junit.After) Serdes(org.apache.kafka.common.serialization.Serdes) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) Record(org.apache.kafka.streams.processor.api.Record) Deserializer(org.apache.kafka.common.serialization.Deserializer) Processor(org.apache.kafka.streams.processor.api.Processor) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) Aggregator(org.apache.kafka.streams.kstream.Aggregator) Before(org.junit.Before) Duration.ofDays(java.time.Duration.ofDays) TopologyTestDriver(org.apache.kafka.streams.TopologyTestDriver) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) TestOutputTopic(org.apache.kafka.streams.TestOutputTopic) Properties(java.util.Properties) Logger(org.slf4j.Logger) Consumed(org.apache.kafka.streams.kstream.Consumed) KeyValue(org.apache.kafka.streams.KeyValue) LongDeserializer(org.apache.kafka.common.serialization.LongDeserializer) Test(org.junit.Test) Grouped(org.apache.kafka.streams.kstream.Grouped) List(java.util.List) Serializer(org.apache.kafka.common.serialization.Serializer) Reducer(org.apache.kafka.streams.kstream.Reducer) Materialized(org.apache.kafka.streams.kstream.Materialized) IntegerDeserializer(org.apache.kafka.common.serialization.IntegerDeserializer) TestInputTopic(org.apache.kafka.streams.TestInputTopic) StreamsTestUtils(org.apache.kafka.test.StreamsTestUtils) Pattern(java.util.regex.Pattern) Duration.ofMillis(java.time.Duration.ofMillis) Topology(org.apache.kafka.streams.Topology) Assert.assertEquals(org.junit.Assert.assertEquals) IntegerDeserializer(org.apache.kafka.common.serialization.IntegerDeserializer) TopologyTestDriver(org.apache.kafka.streams.TopologyTestDriver) Topology(org.apache.kafka.streams.Topology) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) LongDeserializer(org.apache.kafka.common.serialization.LongDeserializer)

Example 8 with IntegerDeserializer

use of org.apache.kafka.common.serialization.IntegerDeserializer in project kafka by apache.

the class StreamTableJoinTopologyOptimizationIntegrationTest method shouldDoStreamTableJoinWithDifferentNumberOfPartitions.

@Test
public void shouldDoStreamTableJoinWithDifferentNumberOfPartitions() throws Exception {
    final String storeName = "store";
    final String selectKeyName = "selectKey";
    final StreamsBuilder streamsBuilder = new StreamsBuilder();
    final KStream<Integer, String> stream = streamsBuilder.stream(inputTopic);
    final KTable<Integer, String> table = streamsBuilder.table(tableTopic, Materialized.as(storeName));
    stream.selectKey((key, value) -> key, Named.as(selectKeyName)).join(table, (value1, value2) -> value2).to(outputTopic);
    kafkaStreams = startStreams(streamsBuilder);
    final long timestamp = System.currentTimeMillis();
    final List<KeyValue<Integer, String>> expectedRecords = Arrays.asList(new KeyValue<>(1, "A"), new KeyValue<>(2, "B"));
    sendEvents(inputTopic, timestamp, expectedRecords);
    sendEvents(outputTopic, timestamp, expectedRecords);
    validateReceivedMessages(outputTopic, new IntegerDeserializer(), new StringDeserializer(), expectedRecords);
    final Set<String> allTopicsInCluster = CLUSTER.getAllTopicsInCluster();
    final String repartitionTopicName = applicationId + "-" + selectKeyName + "-repartition";
    final String tableChangelogStoreName = applicationId + "-" + storeName + "-changelog";
    assertTrue(topicExists(repartitionTopicName));
    assertEquals(2, getNumberOfPartitionsForTopic(repartitionTopicName));
    if (StreamsConfig.OPTIMIZE.equals(topologyOptimization)) {
        assertFalse(allTopicsInCluster.contains(tableChangelogStoreName));
    } else if (StreamsConfig.NO_OPTIMIZATION.equals(topologyOptimization)) {
        assertTrue(allTopicsInCluster.contains(tableChangelogStoreName));
    }
}
Also used : StreamsConfig(org.apache.kafka.streams.StreamsConfig) Arrays(java.util.Arrays) BeforeClass(org.junit.BeforeClass) RunWith(org.junit.runner.RunWith) IntegrationTest(org.apache.kafka.test.IntegrationTest) KStream(org.apache.kafka.streams.kstream.KStream) AdminClient(org.apache.kafka.clients.admin.AdminClient) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) IntegrationTestUtils.safeUniqueTestName(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.safeUniqueTestName) EmbeddedKafkaCluster(org.apache.kafka.streams.integration.utils.EmbeddedKafkaCluster) TestName(org.junit.rules.TestName) Named(org.apache.kafka.streams.kstream.Named) After(org.junit.After) IntegerSerializer(org.apache.kafka.common.serialization.IntegerSerializer) Serdes(org.apache.kafka.common.serialization.Serdes) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) TopicDescription(org.apache.kafka.clients.admin.TopicDescription) Deserializer(org.apache.kafka.common.serialization.Deserializer) Parameterized(org.junit.runners.Parameterized) Before(org.junit.Before) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) KTable(org.apache.kafka.streams.kstream.KTable) AfterClass(org.junit.AfterClass) Properties(java.util.Properties) TestUtils(org.apache.kafka.test.TestUtils) AdminClientConfig(org.apache.kafka.clients.admin.AdminClientConfig) Collection(java.util.Collection) KeyValue(org.apache.kafka.streams.KeyValue) Set(java.util.Set) Assert.assertTrue(org.junit.Assert.assertTrue) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) Test(org.junit.Test) IOException(java.io.IOException) Category(org.junit.experimental.categories.Category) TimeUnit(java.util.concurrent.TimeUnit) IntegrationTestUtils(org.apache.kafka.streams.integration.utils.IntegrationTestUtils) CountDownLatch(java.util.concurrent.CountDownLatch) List(java.util.List) Rule(org.junit.Rule) Assert.assertFalse(org.junit.Assert.assertFalse) Materialized(org.apache.kafka.streams.kstream.Materialized) IntegerDeserializer(org.apache.kafka.common.serialization.IntegerDeserializer) KafkaStreams(org.apache.kafka.streams.KafkaStreams) Collections(java.util.Collections) Assert.assertEquals(org.junit.Assert.assertEquals) IntegerDeserializer(org.apache.kafka.common.serialization.IntegerDeserializer) KeyValue(org.apache.kafka.streams.KeyValue) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) IntegrationTest(org.apache.kafka.test.IntegrationTest) Test(org.junit.Test)

Example 9 with IntegerDeserializer

use of org.apache.kafka.common.serialization.IntegerDeserializer in project kafka by apache.

the class EosTestDriver method verifyReceivedAllRecords.

private static void verifyReceivedAllRecords(final Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> expectedRecords, final Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> receivedRecords) {
    if (expectedRecords.size() != receivedRecords.size()) {
        throw new RuntimeException("Result verification failed. Received " + receivedRecords.size() + " records but expected " + expectedRecords.size());
    }
    final StringDeserializer stringDeserializer = new StringDeserializer();
    final IntegerDeserializer integerDeserializer = new IntegerDeserializer();
    for (final Map.Entry<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> partitionRecords : receivedRecords.entrySet()) {
        final TopicPartition inputTopicPartition = new TopicPartition("data", partitionRecords.getKey().partition());
        final List<ConsumerRecord<byte[], byte[]>> receivedRecordsForPartition = partitionRecords.getValue();
        final List<ConsumerRecord<byte[], byte[]>> expectedRecordsForPartition = expectedRecords.get(inputTopicPartition);
        System.out.println(partitionRecords.getKey() + " with " + receivedRecordsForPartition.size() + ", " + inputTopicPartition + " with " + expectedRecordsForPartition.size());
        final Iterator<ConsumerRecord<byte[], byte[]>> expectedRecord = expectedRecordsForPartition.iterator();
        RuntimeException exception = null;
        for (final ConsumerRecord<byte[], byte[]> receivedRecord : receivedRecordsForPartition) {
            if (!expectedRecord.hasNext()) {
                exception = new RuntimeException("Result verification failed for " + receivedRecord + " since there's no more expected record");
            }
            final ConsumerRecord<byte[], byte[]> expected = expectedRecord.next();
            final String receivedKey = stringDeserializer.deserialize(receivedRecord.topic(), receivedRecord.key());
            final int receivedValue = integerDeserializer.deserialize(receivedRecord.topic(), receivedRecord.value());
            final String expectedKey = stringDeserializer.deserialize(expected.topic(), expected.key());
            final int expectedValue = integerDeserializer.deserialize(expected.topic(), expected.value());
            if (!receivedKey.equals(expectedKey) || receivedValue != expectedValue) {
                exception = new RuntimeException("Result verification failed for " + receivedRecord + " expected <" + expectedKey + "," + expectedValue + "> but was <" + receivedKey + "," + receivedValue + ">");
            }
        }
        if (exception != null) {
            throw exception;
        }
    }
}
Also used : IntegerDeserializer(org.apache.kafka.common.serialization.IntegerDeserializer) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) TopicPartition(org.apache.kafka.common.TopicPartition) ArrayList(java.util.ArrayList) LinkedList(java.util.LinkedList) List(java.util.List) HashMap(java.util.HashMap) Map(java.util.Map)

Example 10 with IntegerDeserializer

use of org.apache.kafka.common.serialization.IntegerDeserializer in project kafka by apache.

the class EosTestDriver method verifyMax.

private static void verifyMax(final Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> inputPerTopicPerPartition, final Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> maxPerTopicPerPartition) {
    final StringDeserializer stringDeserializer = new StringDeserializer();
    final IntegerDeserializer integerDeserializer = new IntegerDeserializer();
    final HashMap<String, Integer> currentMinPerKey = new HashMap<>();
    for (final Map.Entry<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> partitionRecords : maxPerTopicPerPartition.entrySet()) {
        final TopicPartition inputTopicPartition = new TopicPartition("repartition", partitionRecords.getKey().partition());
        final List<ConsumerRecord<byte[], byte[]>> partitionInput = inputPerTopicPerPartition.get(inputTopicPartition);
        final List<ConsumerRecord<byte[], byte[]>> partitionMax = partitionRecords.getValue();
        if (partitionInput.size() != partitionMax.size()) {
            throw new RuntimeException("Result verification failed: expected " + partitionInput.size() + " records for " + partitionRecords.getKey() + " but received " + partitionMax.size());
        }
        final Iterator<ConsumerRecord<byte[], byte[]>> inputRecords = partitionInput.iterator();
        for (final ConsumerRecord<byte[], byte[]> receivedRecord : partitionMax) {
            final ConsumerRecord<byte[], byte[]> input = inputRecords.next();
            final String receivedKey = stringDeserializer.deserialize(receivedRecord.topic(), receivedRecord.key());
            final int receivedValue = integerDeserializer.deserialize(receivedRecord.topic(), receivedRecord.value());
            final String key = stringDeserializer.deserialize(input.topic(), input.key());
            final int value = integerDeserializer.deserialize(input.topic(), input.value());
            Integer max = currentMinPerKey.get(key);
            if (max == null) {
                max = Integer.MIN_VALUE;
            }
            max = Math.max(max, value);
            currentMinPerKey.put(key, max);
            if (!receivedKey.equals(key) || receivedValue != max) {
                throw new RuntimeException("Result verification failed for " + receivedRecord + " expected <" + key + "," + max + "> but was <" + receivedKey + "," + receivedValue + ">");
            }
        }
    }
}
Also used : IntegerDeserializer(org.apache.kafka.common.serialization.IntegerDeserializer) HashMap(java.util.HashMap) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) TopicPartition(org.apache.kafka.common.TopicPartition) ArrayList(java.util.ArrayList) LinkedList(java.util.LinkedList) List(java.util.List) HashMap(java.util.HashMap) Map(java.util.Map)

Aggregations

IntegerDeserializer (org.apache.kafka.common.serialization.IntegerDeserializer)31 StringDeserializer (org.apache.kafka.common.serialization.StringDeserializer)27 Test (org.junit.Test)22 StreamsBuilder (org.apache.kafka.streams.StreamsBuilder)15 IntegrationTest (org.apache.kafka.test.IntegrationTest)13 List (java.util.List)12 Map (java.util.Map)12 StringSerializer (org.apache.kafka.common.serialization.StringSerializer)12 ArrayList (java.util.ArrayList)11 IntegerSerializer (org.apache.kafka.common.serialization.IntegerSerializer)11 KeyValue (org.apache.kafka.streams.KeyValue)11 TopologyTestDriver (org.apache.kafka.streams.TopologyTestDriver)10 HashMap (java.util.HashMap)9 Properties (java.util.Properties)8 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)8 TopicPartition (org.apache.kafka.common.TopicPartition)8 Arrays (java.util.Arrays)7 LongDeserializer (org.apache.kafka.common.serialization.LongDeserializer)7 Serdes (org.apache.kafka.common.serialization.Serdes)7 KStream (org.apache.kafka.streams.kstream.KStream)7