Search in sources :

Example 16 with LongDeserializer

use of org.apache.kafka.common.serialization.LongDeserializer in project kafka-streams-examples by confluentinc.

the class WikipediaFeedAvroExampleTest method shouldRunTheWikipediaFeedExample.

@Test
public void shouldRunTheWikipediaFeedExample() {
    final Properties props = new Properties();
    props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers());
    props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
    props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, io.confluent.kafka.serializers.KafkaAvroSerializer.class);
    props.put(AbstractKafkaSchemaSerDeConfig.SCHEMA_REGISTRY_URL_CONFIG, CLUSTER.schemaRegistryUrl());
    final KafkaProducer<String, WikiFeed> producer = new KafkaProducer<>(props);
    producer.send(new ProducerRecord<>(WikipediaFeedAvroExample.WIKIPEDIA_FEED, new WikiFeed("donna", true, "first post")));
    producer.send(new ProducerRecord<>(WikipediaFeedAvroExample.WIKIPEDIA_FEED, new WikiFeed("donna", true, "second post")));
    producer.send(new ProducerRecord<>(WikipediaFeedAvroExample.WIKIPEDIA_FEED, new WikiFeed("donna", true, "third post")));
    producer.send(new ProducerRecord<>(WikipediaFeedAvroExample.WIKIPEDIA_FEED, new WikiFeed("becca", true, "first post")));
    producer.send(new ProducerRecord<>(WikipediaFeedAvroExample.WIKIPEDIA_FEED, new WikiFeed("becca", true, "second post")));
    producer.send(new ProducerRecord<>(WikipediaFeedAvroExample.WIKIPEDIA_FEED, new WikiFeed("john", true, "first post")));
    producer.flush();
    streams.start();
    final Properties consumerProperties = new Properties();
    consumerProperties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers());
    consumerProperties.put(ConsumerConfig.GROUP_ID_CONFIG, "wikipedia-feed-consumer");
    consumerProperties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    final KafkaConsumer<String, Long> consumer = new KafkaConsumer<>(consumerProperties, new StringDeserializer(), new LongDeserializer());
    final Map<String, Long> expected = new HashMap<>();
    expected.put("donna", 3L);
    expected.put("becca", 2L);
    expected.put("john", 1L);
    final Map<String, Long> actual = new HashMap<>();
    consumer.subscribe(Collections.singleton(WikipediaFeedAvroExample.WIKIPEDIA_STATS));
    final long timeout = System.currentTimeMillis() + 30000L;
    while (!actual.equals(expected) && System.currentTimeMillis() < timeout) {
        final ConsumerRecords<String, Long> records = consumer.poll(Duration.ofSeconds(1));
        records.forEach(record -> actual.put(record.key(), record.value()));
    }
    assertThat(expected, equalTo(actual));
}
Also used : KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) HashMap(java.util.HashMap) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) Properties(java.util.Properties) LongDeserializer(org.apache.kafka.common.serialization.LongDeserializer) WikiFeed(io.confluent.examples.streams.avro.WikiFeed) Test(org.junit.Test)

Example 17 with LongDeserializer

use of org.apache.kafka.common.serialization.LongDeserializer in project kafka-streams-examples by confluentinc.

the class WordCountLambdaIntegrationTest method shouldCountWords.

@Test
public void shouldCountWords() {
    final List<String> inputValues = Arrays.asList("Hello Kafka Streams", "All streams lead to Kafka", "Join Kafka Summit", "И теперь пошли русские слова");
    final Map<String, Long> expectedWordCounts = mkMap(mkEntry("hello", 1L), mkEntry("all", 1L), mkEntry("streams", 2L), mkEntry("lead", 1L), mkEntry("to", 1L), mkEntry("join", 1L), mkEntry("kafka", 3L), mkEntry("summit", 1L), mkEntry("и", 1L), mkEntry("теперь", 1L), mkEntry("пошли", 1L), mkEntry("русские", 1L), mkEntry("слова", 1L));
    // 
    // Step 1: Configure and start the processor topology.
    // 
    final Serde<String> stringSerde = Serdes.String();
    final Serde<Long> longSerde = Serdes.Long();
    final Properties streamsConfiguration = new Properties();
    streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, "wordcount-lambda-integration-test");
    streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "dummy config");
    streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
    streamsConfiguration.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
    // Use a temporary directory for storing state, which will be automatically removed after the test.
    streamsConfiguration.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getAbsolutePath());
    final StreamsBuilder builder = new StreamsBuilder();
    final KStream<String, String> textLines = builder.stream(inputTopic);
    final Pattern pattern = Pattern.compile("\\W+", Pattern.UNICODE_CHARACTER_CLASS);
    final KTable<String, Long> wordCounts = textLines.flatMapValues(value -> Arrays.asList(pattern.split(value.toLowerCase()))).groupBy((key, word) -> word).count();
    wordCounts.toStream().to(outputTopic, Produced.with(stringSerde, longSerde));
    try (final TopologyTestDriver topologyTestDriver = new TopologyTestDriver(builder.build(), streamsConfiguration)) {
        // 
        // Step 2: Setup input and output topics.
        // 
        final TestInputTopic<Void, String> input = topologyTestDriver.createInputTopic(inputTopic, new IntegrationTestUtils.NothingSerde<>(), new StringSerializer());
        final TestOutputTopic<String, Long> output = topologyTestDriver.createOutputTopic(outputTopic, new StringDeserializer(), new LongDeserializer());
        // 
        // Step 3: Produce some input data to the input topic.
        // 
        input.pipeValueList(inputValues);
        // 
        // Step 4: Verify the application's output data.
        // 
        assertThat(output.readKeyValuesToMap()).isEqualTo(expectedWordCounts);
    }
}
Also used : TopologyTestDriver(org.apache.kafka.streams.TopologyTestDriver) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) StreamsConfig(org.apache.kafka.streams.StreamsConfig) KTable(org.apache.kafka.streams.kstream.KTable) Arrays(java.util.Arrays) TestOutputTopic(org.apache.kafka.streams.TestOutputTopic) Properties(java.util.Properties) Produced(org.apache.kafka.streams.kstream.Produced) TestUtils(org.apache.kafka.test.TestUtils) Assertions.assertThat(org.assertj.core.api.Assertions.assertThat) LongDeserializer(org.apache.kafka.common.serialization.LongDeserializer) Test(org.junit.Test) KStream(org.apache.kafka.streams.kstream.KStream) List(java.util.List) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) IntegrationTestUtils.mkMap(io.confluent.examples.streams.IntegrationTestUtils.mkMap) Serde(org.apache.kafka.common.serialization.Serde) Map(java.util.Map) Serdes(org.apache.kafka.common.serialization.Serdes) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) TestInputTopic(org.apache.kafka.streams.TestInputTopic) Pattern(java.util.regex.Pattern) IntegrationTestUtils.mkEntry(io.confluent.examples.streams.IntegrationTestUtils.mkEntry) Pattern(java.util.regex.Pattern) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) TopologyTestDriver(org.apache.kafka.streams.TopologyTestDriver) Properties(java.util.Properties) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) LongDeserializer(org.apache.kafka.common.serialization.LongDeserializer) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) Test(org.junit.Test)

Example 18 with LongDeserializer

use of org.apache.kafka.common.serialization.LongDeserializer in project kafka-tutorials by confluentinc.

the class RunningAverageTest method validateAverageRating.

@Test
public void validateAverageRating() {
    TestInputTopic<Long, Rating> inputTopic = testDriver.createInputTopic(RATINGS_TOPIC_NAME, new LongSerializer(), ratingSpecificAvroSerde.serializer());
    inputTopic.pipeKeyValueList(asList(new KeyValue<>(LETHAL_WEAPON_RATING_8.getMovieId(), LETHAL_WEAPON_RATING_8), new KeyValue<>(LETHAL_WEAPON_RATING_10.getMovieId(), LETHAL_WEAPON_RATING_10)));
    final TestOutputTopic<Long, Double> outputTopic = testDriver.createOutputTopic(AVERAGE_RATINGS_TOPIC_NAME, new LongDeserializer(), new DoubleDeserializer());
    final List<KeyValue<Long, Double>> keyValues = outputTopic.readKeyValuesToList();
    // I sent two records to input topic
    // I expect second record in topic will contain correct result
    final KeyValue<Long, Double> longDoubleKeyValue = keyValues.get(1);
    System.out.println("longDoubleKeyValue = " + longDoubleKeyValue);
    MatcherAssert.assertThat(longDoubleKeyValue, equalTo(new KeyValue<>(362L, 9.0)));
    final KeyValueStore<Long, Double> keyValueStore = testDriver.getKeyValueStore("average-ratings");
    final Double expected = keyValueStore.get(362L);
    Assert.assertEquals("Message", expected, 9.0, 0.0);
}
Also used : LongSerializer(org.apache.kafka.common.serialization.LongSerializer) KeyValue(org.apache.kafka.streams.KeyValue) LongDeserializer(org.apache.kafka.common.serialization.LongDeserializer) Rating(io.confluent.demo.Rating) DoubleDeserializer(org.apache.kafka.common.serialization.DoubleDeserializer) Test(org.junit.Test)

Example 19 with LongDeserializer

use of org.apache.kafka.common.serialization.LongDeserializer in project apache-kafka-on-k8s by banzaicloud.

the class EosTestDriver method verifyCnt.

private static void verifyCnt(final Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> inputPerTopicPerPartition, final Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> cntPerTopicPerPartition) {
    final StringDeserializer stringDeserializer = new StringDeserializer();
    final LongDeserializer longDeserializer = new LongDeserializer();
    final HashMap<String, Long> currentSumPerKey = new HashMap<>();
    for (final Map.Entry<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> partitionRecords : cntPerTopicPerPartition.entrySet()) {
        final TopicPartition inputTopicPartition = new TopicPartition("repartition", partitionRecords.getKey().partition());
        final List<ConsumerRecord<byte[], byte[]>> partitionInput = inputPerTopicPerPartition.get(inputTopicPartition);
        final List<ConsumerRecord<byte[], byte[]>> partitionCnt = partitionRecords.getValue();
        if (partitionInput.size() != partitionCnt.size()) {
            throw new RuntimeException("Result verification failed: expected " + partitionInput.size() + " records for " + partitionRecords.getKey() + " but received " + partitionCnt.size());
        }
        final Iterator<ConsumerRecord<byte[], byte[]>> inputRecords = partitionInput.iterator();
        for (final ConsumerRecord<byte[], byte[]> receivedRecord : partitionCnt) {
            final ConsumerRecord<byte[], byte[]> input = inputRecords.next();
            final String receivedKey = stringDeserializer.deserialize(receivedRecord.topic(), receivedRecord.key());
            final long receivedValue = longDeserializer.deserialize(receivedRecord.topic(), receivedRecord.value());
            final String key = stringDeserializer.deserialize(input.topic(), input.key());
            Long cnt = currentSumPerKey.get(key);
            if (cnt == null) {
                cnt = 0L;
            }
            currentSumPerKey.put(key, ++cnt);
            if (!receivedKey.equals(key) || receivedValue != cnt.longValue()) {
                throw new RuntimeException("Result verification failed for " + receivedRecord + " expected <" + key + "," + cnt + "> but was <" + receivedKey + "," + receivedValue + ">");
            }
        }
    }
}
Also used : HashMap(java.util.HashMap) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) LongDeserializer(org.apache.kafka.common.serialization.LongDeserializer) TopicPartition(org.apache.kafka.common.TopicPartition) ArrayList(java.util.ArrayList) List(java.util.List) HashMap(java.util.HashMap) Map(java.util.Map)

Example 20 with LongDeserializer

use of org.apache.kafka.common.serialization.LongDeserializer in project apache-kafka-on-k8s by banzaicloud.

the class EosTestDriver method verifySum.

private static void verifySum(final Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> inputPerTopicPerPartition, final Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> minPerTopicPerPartition) {
    final StringDeserializer stringDeserializer = new StringDeserializer();
    final IntegerDeserializer integerDeserializer = new IntegerDeserializer();
    final LongDeserializer longDeserializer = new LongDeserializer();
    final HashMap<String, Long> currentSumPerKey = new HashMap<>();
    for (final Map.Entry<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> partitionRecords : minPerTopicPerPartition.entrySet()) {
        final TopicPartition inputTopicPartition = new TopicPartition("data", partitionRecords.getKey().partition());
        final List<ConsumerRecord<byte[], byte[]>> partitionInput = inputPerTopicPerPartition.get(inputTopicPartition);
        final List<ConsumerRecord<byte[], byte[]>> partitionSum = partitionRecords.getValue();
        if (partitionInput.size() != partitionSum.size()) {
            throw new RuntimeException("Result verification failed: expected " + partitionInput.size() + " records for " + partitionRecords.getKey() + " but received " + partitionSum.size());
        }
        final Iterator<ConsumerRecord<byte[], byte[]>> inputRecords = partitionInput.iterator();
        for (final ConsumerRecord<byte[], byte[]> receivedRecord : partitionSum) {
            final ConsumerRecord<byte[], byte[]> input = inputRecords.next();
            final String receivedKey = stringDeserializer.deserialize(receivedRecord.topic(), receivedRecord.key());
            final long receivedValue = longDeserializer.deserialize(receivedRecord.topic(), receivedRecord.value());
            final String key = stringDeserializer.deserialize(input.topic(), input.key());
            final int value = integerDeserializer.deserialize(input.topic(), input.value());
            Long sum = currentSumPerKey.get(key);
            if (sum == null) {
                sum = (long) value;
            } else {
                sum += value;
            }
            currentSumPerKey.put(key, sum);
            if (!receivedKey.equals(key) || receivedValue != sum) {
                throw new RuntimeException("Result verification failed for " + receivedRecord + " expected <" + key + "," + sum + "> but was <" + receivedKey + "," + receivedValue + ">");
            }
        }
    }
}
Also used : IntegerDeserializer(org.apache.kafka.common.serialization.IntegerDeserializer) HashMap(java.util.HashMap) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) LongDeserializer(org.apache.kafka.common.serialization.LongDeserializer) TopicPartition(org.apache.kafka.common.TopicPartition) ArrayList(java.util.ArrayList) List(java.util.List) HashMap(java.util.HashMap) Map(java.util.Map)

Aggregations

LongDeserializer (org.apache.kafka.common.serialization.LongDeserializer)37 StringDeserializer (org.apache.kafka.common.serialization.StringDeserializer)33 KeyValue (org.apache.kafka.streams.KeyValue)22 Test (org.junit.Test)22 StringSerializer (org.apache.kafka.common.serialization.StringSerializer)20 List (java.util.List)19 Properties (java.util.Properties)18 StreamsBuilder (org.apache.kafka.streams.StreamsBuilder)16 HashMap (java.util.HashMap)14 Arrays (java.util.Arrays)13 Map (java.util.Map)13 Serdes (org.apache.kafka.common.serialization.Serdes)13 StreamsConfig (org.apache.kafka.streams.StreamsConfig)13 TopologyTestDriver (org.apache.kafka.streams.TopologyTestDriver)12 KStream (org.apache.kafka.streams.kstream.KStream)12 ArrayList (java.util.ArrayList)11 IntegerDeserializer (org.apache.kafka.common.serialization.IntegerDeserializer)11 Consumed (org.apache.kafka.streams.kstream.Consumed)10 Produced (org.apache.kafka.streams.kstream.Produced)10 Deserializer (org.apache.kafka.common.serialization.Deserializer)9