Search in sources :

Example 1 with IntegerDeserializer

use of org.apache.kafka.common.serialization.IntegerDeserializer in project apache-kafka-on-k8s by banzaicloud.

the class KStreamAggregationIntegrationTest method shouldAggregateWindowed.

@Test
public void shouldAggregateWindowed() throws Exception {
    final long firstTimestamp = mockTime.milliseconds();
    mockTime.sleep(1000);
    produceMessages(firstTimestamp);
    final long secondTimestamp = mockTime.milliseconds();
    produceMessages(secondTimestamp);
    produceMessages(secondTimestamp);
    groupedStream.windowedBy(TimeWindows.of(500L)).aggregate(initializer, aggregator, Materialized.<String, Integer, WindowStore<Bytes, byte[]>>with(null, Serdes.Integer())).toStream(new KeyValueMapper<Windowed<String>, Integer, String>() {

        @Override
        public String apply(final Windowed<String> windowedKey, final Integer value) {
            return windowedKey.key() + "@" + windowedKey.window().start();
        }
    }).to(outputTopic, Produced.with(Serdes.String(), Serdes.Integer()));
    startStreams();
    final List<KeyValue<String, Integer>> windowedMessages = receiveMessages(new StringDeserializer(), new IntegerDeserializer(), 15);
    final Comparator<KeyValue<String, Integer>> comparator = new Comparator<KeyValue<String, Integer>>() {

        @Override
        public int compare(final KeyValue<String, Integer> o1, final KeyValue<String, Integer> o2) {
            return KStreamAggregationIntegrationTest.compare(o1, o2);
        }
    };
    Collections.sort(windowedMessages, comparator);
    final long firstWindow = firstTimestamp / 500 * 500;
    final long secondWindow = secondTimestamp / 500 * 500;
    assertThat(windowedMessages, is(Arrays.asList(new KeyValue<>("A@" + firstWindow, 1), new KeyValue<>("A@" + secondWindow, 1), new KeyValue<>("A@" + secondWindow, 2), new KeyValue<>("B@" + firstWindow, 1), new KeyValue<>("B@" + secondWindow, 1), new KeyValue<>("B@" + secondWindow, 2), new KeyValue<>("C@" + firstWindow, 1), new KeyValue<>("C@" + secondWindow, 1), new KeyValue<>("C@" + secondWindow, 2), new KeyValue<>("D@" + firstWindow, 1), new KeyValue<>("D@" + secondWindow, 1), new KeyValue<>("D@" + secondWindow, 2), new KeyValue<>("E@" + firstWindow, 1), new KeyValue<>("E@" + secondWindow, 1), new KeyValue<>("E@" + secondWindow, 2))));
}
Also used : IntegerDeserializer(org.apache.kafka.common.serialization.IntegerDeserializer) KeyValue(org.apache.kafka.streams.KeyValue) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) KeyValueMapper(org.apache.kafka.streams.kstream.KeyValueMapper) Comparator(java.util.Comparator) Windowed(org.apache.kafka.streams.kstream.Windowed) Bytes(org.apache.kafka.common.utils.Bytes) IntegrationTest(org.apache.kafka.test.IntegrationTest) Test(org.junit.Test)

Example 2 with IntegerDeserializer

use of org.apache.kafka.common.serialization.IntegerDeserializer in project apache-kafka-on-k8s by banzaicloud.

the class EosTestDriver method verifyMin.

private static void verifyMin(final Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> inputPerTopicPerPartition, final Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> minPerTopicPerPartition) {
    final StringDeserializer stringDeserializer = new StringDeserializer();
    final IntegerDeserializer integerDeserializer = new IntegerDeserializer();
    final HashMap<String, Integer> currentMinPerKey = new HashMap<>();
    for (final Map.Entry<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> partitionRecords : minPerTopicPerPartition.entrySet()) {
        final TopicPartition inputTopicPartition = new TopicPartition("data", partitionRecords.getKey().partition());
        final List<ConsumerRecord<byte[], byte[]>> partitionInput = inputPerTopicPerPartition.get(inputTopicPartition);
        final List<ConsumerRecord<byte[], byte[]>> partitionMin = partitionRecords.getValue();
        if (partitionInput.size() != partitionMin.size()) {
            throw new RuntimeException("Result verification failed: expected " + partitionInput.size() + " records for " + partitionRecords.getKey() + " but received " + partitionMin.size());
        }
        final Iterator<ConsumerRecord<byte[], byte[]>> inputRecords = partitionInput.iterator();
        for (final ConsumerRecord<byte[], byte[]> receivedRecord : partitionMin) {
            final ConsumerRecord<byte[], byte[]> input = inputRecords.next();
            final String receivedKey = stringDeserializer.deserialize(receivedRecord.topic(), receivedRecord.key());
            final int receivedValue = integerDeserializer.deserialize(receivedRecord.topic(), receivedRecord.value());
            final String key = stringDeserializer.deserialize(input.topic(), input.key());
            final int value = integerDeserializer.deserialize(input.topic(), input.value());
            Integer min = currentMinPerKey.get(key);
            if (min == null) {
                min = value;
            } else {
                min = Math.min(min, value);
            }
            currentMinPerKey.put(key, min);
            if (!receivedKey.equals(key) || receivedValue != min) {
                throw new RuntimeException("Result verification failed for " + receivedRecord + " expected <" + key + "," + min + "> but was <" + receivedKey + "," + receivedValue + ">");
            }
        }
    }
}
Also used : IntegerDeserializer(org.apache.kafka.common.serialization.IntegerDeserializer) HashMap(java.util.HashMap) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) TopicPartition(org.apache.kafka.common.TopicPartition) ArrayList(java.util.ArrayList) List(java.util.List) HashMap(java.util.HashMap) Map(java.util.Map)

Example 3 with IntegerDeserializer

use of org.apache.kafka.common.serialization.IntegerDeserializer in project kafka by apache.

the class KStreamRepartitionTest method shouldInvokePartitionerWhenSet.

@Test
public void shouldInvokePartitionerWhenSet() {
    final int[] expectedKeys = new int[] { 0, 1 };
    final StreamPartitioner<Integer, String> streamPartitionerMock = EasyMock.mock(StreamPartitioner.class);
    expect(streamPartitionerMock.partition(anyString(), eq(0), eq("X0"), anyInt())).andReturn(1).times(1);
    expect(streamPartitionerMock.partition(anyString(), eq(1), eq("X1"), anyInt())).andReturn(1).times(1);
    replay(streamPartitionerMock);
    final String repartitionOperationName = "test";
    final Repartitioned<Integer, String> repartitioned = Repartitioned.streamPartitioner(streamPartitionerMock).withName(repartitionOperationName);
    builder.<Integer, String>stream(inputTopic).repartition(repartitioned);
    try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) {
        final TestInputTopic<Integer, String> testInputTopic = driver.createInputTopic(inputTopic, new IntegerSerializer(), new StringSerializer());
        final String topicName = repartitionOutputTopic(props, repartitionOperationName);
        final TestOutputTopic<Integer, String> testOutputTopic = driver.createOutputTopic(topicName, new IntegerDeserializer(), new StringDeserializer());
        for (int i = 0; i < 2; i++) {
            testInputTopic.pipeInput(expectedKeys[i], "X" + expectedKeys[i], i + 10);
        }
        assertThat(testOutputTopic.readRecord(), equalTo(new TestRecord<>(0, "X0", Instant.ofEpochMilli(10))));
        assertThat(testOutputTopic.readRecord(), equalTo(new TestRecord<>(1, "X1", Instant.ofEpochMilli(11))));
        assertTrue(testOutputTopic.readRecordsToList().isEmpty());
    }
    verify(streamPartitionerMock);
}
Also used : IntegerDeserializer(org.apache.kafka.common.serialization.IntegerDeserializer) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) TopologyTestDriver(org.apache.kafka.streams.TopologyTestDriver) EasyMock.anyString(org.easymock.EasyMock.anyString) IntegerSerializer(org.apache.kafka.common.serialization.IntegerSerializer) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) TestRecord(org.apache.kafka.streams.test.TestRecord) Test(org.junit.Test)

Example 4 with IntegerDeserializer

use of org.apache.kafka.common.serialization.IntegerDeserializer in project kafka by apache.

the class CogroupedKStreamImplTest method shouldAllowDifferentOutputTypeInCoGroup.

@Test
public void shouldAllowDifferentOutputTypeInCoGroup() {
    final StreamsBuilder builder = new StreamsBuilder();
    final KStream<String, String> stream1 = builder.stream("one", stringConsumed);
    final KStream<String, String> stream2 = builder.stream("two", stringConsumed);
    final KGroupedStream<String, String> grouped1 = stream1.groupByKey();
    final KGroupedStream<String, String> grouped2 = stream2.groupByKey();
    final KTable<String, Integer> customers = grouped1.cogroup(STRING_SUM_AGGREGATOR).cogroup(grouped2, STRING_SUM_AGGREGATOR).aggregate(SUM_INITIALIZER, Materialized.<String, Integer, KeyValueStore<Bytes, byte[]>>as("store1").withValueSerde(Serdes.Integer()));
    customers.toStream().to(OUTPUT);
    try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) {
        final TestInputTopic<String, String> testInputTopic = driver.createInputTopic("one", new StringSerializer(), new StringSerializer());
        final TestInputTopic<String, String> testInputTopic2 = driver.createInputTopic("two", new StringSerializer(), new StringSerializer());
        final TestOutputTopic<String, Integer> testOutputTopic = driver.createOutputTopic(OUTPUT, new StringDeserializer(), new IntegerDeserializer());
        testInputTopic.pipeInput("k1", "1", 0L);
        testInputTopic.pipeInput("k2", "1", 1L);
        testInputTopic.pipeInput("k1", "1", 10L);
        testInputTopic.pipeInput("k2", "1", 100L);
        testInputTopic2.pipeInput("k2", "2", 100L);
        testInputTopic2.pipeInput("k2", "2", 200L);
        testInputTopic2.pipeInput("k1", "2", 1L);
        testInputTopic2.pipeInput("k2", "2", 500L);
        testInputTopic2.pipeInput("k1", "2", 500L);
        testInputTopic2.pipeInput("k2", "3", 500L);
        testInputTopic2.pipeInput("k3", "2", 500L);
        testInputTopic2.pipeInput("k2", "2", 100L);
        assertOutputKeyValueTimestamp(testOutputTopic, "k1", 1, 0);
        assertOutputKeyValueTimestamp(testOutputTopic, "k2", 1, 1);
        assertOutputKeyValueTimestamp(testOutputTopic, "k1", 2, 10);
        assertOutputKeyValueTimestamp(testOutputTopic, "k2", 2, 100);
        assertOutputKeyValueTimestamp(testOutputTopic, "k2", 4, 100);
        assertOutputKeyValueTimestamp(testOutputTopic, "k2", 6, 200);
        assertOutputKeyValueTimestamp(testOutputTopic, "k1", 4, 10);
        assertOutputKeyValueTimestamp(testOutputTopic, "k2", 8, 500);
        assertOutputKeyValueTimestamp(testOutputTopic, "k1", 6, 500);
        assertOutputKeyValueTimestamp(testOutputTopic, "k2", 11, 500);
    }
}
Also used : IntegerDeserializer(org.apache.kafka.common.serialization.IntegerDeserializer) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) TopologyTestDriver(org.apache.kafka.streams.TopologyTestDriver) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) Test(org.junit.Test)

Example 5 with IntegerDeserializer

use of org.apache.kafka.common.serialization.IntegerDeserializer in project kafka by apache.

the class KStreamRepartitionIntegrationTest method shouldCreateRepartitionTopicWithSpecifiedNumberOfPartitions.

@Test
public void shouldCreateRepartitionTopicWithSpecifiedNumberOfPartitions() throws Exception {
    final String repartitionName = "new-partitions";
    final long timestamp = System.currentTimeMillis();
    sendEvents(timestamp, Arrays.asList(new KeyValue<>(1, "A"), new KeyValue<>(2, "B")));
    final StreamsBuilder builder = new StreamsBuilder();
    builder.stream(inputTopic, Consumed.with(Serdes.Integer(), Serdes.String())).repartition(Repartitioned.<Integer, String>as(repartitionName).withNumberOfPartitions(2)).groupByKey().count().toStream().to(outputTopic);
    startStreams(builder);
    validateReceivedMessages(new IntegerDeserializer(), new LongDeserializer(), Arrays.asList(new KeyValue<>(1, 1L), new KeyValue<>(2, 1L)));
    final String repartitionTopicName = toRepartitionTopicName(repartitionName);
    assertTrue(topicExists(repartitionTopicName));
    assertEquals(2, getNumberOfPartitionsForTopic(repartitionTopicName));
}
Also used : StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) IntegerDeserializer(org.apache.kafka.common.serialization.IntegerDeserializer) KeyValue(org.apache.kafka.streams.KeyValue) LongDeserializer(org.apache.kafka.common.serialization.LongDeserializer) IntegrationTest(org.apache.kafka.test.IntegrationTest) Test(org.junit.Test)

Aggregations

IntegerDeserializer (org.apache.kafka.common.serialization.IntegerDeserializer)31 StringDeserializer (org.apache.kafka.common.serialization.StringDeserializer)27 Test (org.junit.Test)22 StreamsBuilder (org.apache.kafka.streams.StreamsBuilder)15 IntegrationTest (org.apache.kafka.test.IntegrationTest)13 List (java.util.List)12 Map (java.util.Map)12 StringSerializer (org.apache.kafka.common.serialization.StringSerializer)12 ArrayList (java.util.ArrayList)11 IntegerSerializer (org.apache.kafka.common.serialization.IntegerSerializer)11 KeyValue (org.apache.kafka.streams.KeyValue)11 TopologyTestDriver (org.apache.kafka.streams.TopologyTestDriver)10 HashMap (java.util.HashMap)9 Properties (java.util.Properties)8 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)8 TopicPartition (org.apache.kafka.common.TopicPartition)8 Arrays (java.util.Arrays)7 LongDeserializer (org.apache.kafka.common.serialization.LongDeserializer)7 Serdes (org.apache.kafka.common.serialization.Serdes)7 KStream (org.apache.kafka.streams.kstream.KStream)7