use of org.apache.kafka.common.serialization.LongDeserializer in project kafka by apache.
the class RepartitionWithMergeOptimizingTest method runTest.
private void runTest(final String optimizationConfig, final int expectedNumberRepartitionTopics) {
streamsConfiguration.setProperty(StreamsConfig.TOPOLOGY_OPTIMIZATION_CONFIG, optimizationConfig);
final StreamsBuilder builder = new StreamsBuilder();
final KStream<String, String> sourceAStream = builder.stream(INPUT_A_TOPIC, Consumed.with(Serdes.String(), Serdes.String()).withName("sourceAStream"));
final KStream<String, String> sourceBStream = builder.stream(INPUT_B_TOPIC, Consumed.with(Serdes.String(), Serdes.String()).withName("sourceBStream"));
final KStream<String, String> mappedAStream = sourceAStream.map((k, v) -> KeyValue.pair(v.split(":")[0], v), Named.as("mappedAStream"));
final KStream<String, String> mappedBStream = sourceBStream.map((k, v) -> KeyValue.pair(v.split(":")[0], v), Named.as("mappedBStream"));
final KStream<String, String> mergedStream = mappedAStream.merge(mappedBStream, Named.as("mergedStream"));
mergedStream.groupByKey(Grouped.as("long-groupByKey")).count(Named.as("long-count"), Materialized.as(Stores.inMemoryKeyValueStore("long-store"))).toStream(Named.as("long-toStream")).to(COUNT_TOPIC, Produced.with(Serdes.String(), Serdes.Long()).withName("long-to"));
mergedStream.groupByKey(Grouped.as("string-groupByKey")).count(Named.as("string-count"), Materialized.as(Stores.inMemoryKeyValueStore("string-store"))).toStream(Named.as("string-toStream")).mapValues(v -> v.toString(), Named.as("string-mapValues")).to(STRING_COUNT_TOPIC, Produced.with(Serdes.String(), Serdes.String()).withName("string-to"));
final Topology topology = builder.build(streamsConfiguration);
topologyTestDriver = new TopologyTestDriver(topology, streamsConfiguration);
final TestInputTopic<String, String> inputTopicA = topologyTestDriver.createInputTopic(INPUT_A_TOPIC, stringSerializer, stringSerializer);
final TestInputTopic<String, String> inputTopicB = topologyTestDriver.createInputTopic(INPUT_B_TOPIC, stringSerializer, stringSerializer);
final TestOutputTopic<String, Long> countOutputTopic = topologyTestDriver.createOutputTopic(COUNT_TOPIC, stringDeserializer, new LongDeserializer());
final TestOutputTopic<String, String> stringCountOutputTopic = topologyTestDriver.createOutputTopic(STRING_COUNT_TOPIC, stringDeserializer, stringDeserializer);
inputTopicA.pipeKeyValueList(getKeyValues());
inputTopicB.pipeKeyValueList(getKeyValues());
final String topologyString = topology.describe().toString();
// Verify the topology
if (optimizationConfig.equals(StreamsConfig.OPTIMIZE)) {
assertEquals(EXPECTED_OPTIMIZED_TOPOLOGY, topologyString);
} else {
assertEquals(EXPECTED_UNOPTIMIZED_TOPOLOGY, topologyString);
}
// Verify the number of repartition topics
assertEquals(expectedNumberRepartitionTopics, getCountOfRepartitionTopicsFound(topologyString));
// Verify the expected output
assertThat(countOutputTopic.readKeyValuesToMap(), equalTo(keyValueListToMap(expectedCountKeyValues)));
assertThat(stringCountOutputTopic.readKeyValuesToMap(), equalTo(keyValueListToMap(expectedStringCountKeyValues)));
}
use of org.apache.kafka.common.serialization.LongDeserializer in project kafka by apache.
the class EosTestDriver method verifyCnt.
private static void verifyCnt(final Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> inputPerTopicPerPartition, final Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> cntPerTopicPerPartition) {
final StringDeserializer stringDeserializer = new StringDeserializer();
final LongDeserializer longDeserializer = new LongDeserializer();
final HashMap<String, Long> currentSumPerKey = new HashMap<>();
for (final Map.Entry<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> partitionRecords : cntPerTopicPerPartition.entrySet()) {
final TopicPartition inputTopicPartition = new TopicPartition("repartition", partitionRecords.getKey().partition());
final List<ConsumerRecord<byte[], byte[]>> partitionInput = inputPerTopicPerPartition.get(inputTopicPartition);
final List<ConsumerRecord<byte[], byte[]>> partitionCnt = partitionRecords.getValue();
if (partitionInput.size() != partitionCnt.size()) {
throw new RuntimeException("Result verification failed: expected " + partitionInput.size() + " records for " + partitionRecords.getKey() + " but received " + partitionCnt.size());
}
final Iterator<ConsumerRecord<byte[], byte[]>> inputRecords = partitionInput.iterator();
for (final ConsumerRecord<byte[], byte[]> receivedRecord : partitionCnt) {
final ConsumerRecord<byte[], byte[]> input = inputRecords.next();
final String receivedKey = stringDeserializer.deserialize(receivedRecord.topic(), receivedRecord.key());
final long receivedValue = longDeserializer.deserialize(receivedRecord.topic(), receivedRecord.value());
final String key = stringDeserializer.deserialize(input.topic(), input.key());
Long cnt = currentSumPerKey.get(key);
if (cnt == null) {
cnt = 0L;
}
currentSumPerKey.put(key, ++cnt);
if (!receivedKey.equals(key) || receivedValue != cnt) {
throw new RuntimeException("Result verification failed for " + receivedRecord + " expected <" + key + "," + cnt + "> but was <" + receivedKey + "," + receivedValue + ">");
}
}
}
}
use of org.apache.kafka.common.serialization.LongDeserializer in project apache-kafka-on-k8s by banzaicloud.
the class KStreamAggregationDedupIntegrationTest method shouldGroupByKey.
@Test
public void shouldGroupByKey() throws Exception {
final long timestamp = mockTime.milliseconds();
produceMessages(timestamp);
produceMessages(timestamp);
stream.groupByKey(Serialized.with(Serdes.Integer(), Serdes.String())).count(TimeWindows.of(500L), "count-windows").toStream(new KeyValueMapper<Windowed<Integer>, Long, String>() {
@Override
public String apply(final Windowed<Integer> windowedKey, final Long value) {
return windowedKey.key() + "@" + windowedKey.window().start();
}
}).to(Serdes.String(), Serdes.Long(), outputTopic);
startStreams();
final List<KeyValue<String, Long>> results = receiveMessages(new StringDeserializer(), new LongDeserializer(), 5);
Collections.sort(results, new Comparator<KeyValue<String, Long>>() {
@Override
public int compare(final KeyValue<String, Long> o1, final KeyValue<String, Long> o2) {
return KStreamAggregationDedupIntegrationTest.compare(o1, o2);
}
});
final long window = timestamp / 500 * 500;
assertThat(results, is(Arrays.asList(KeyValue.pair("1@" + window, 2L), KeyValue.pair("2@" + window, 2L), KeyValue.pair("3@" + window, 2L), KeyValue.pair("4@" + window, 2L), KeyValue.pair("5@" + window, 2L))));
}
use of org.apache.kafka.common.serialization.LongDeserializer in project apache-kafka-on-k8s by banzaicloud.
the class KStreamAggregationIntegrationTest method shouldGroupByKey.
@Test
public void shouldGroupByKey() throws Exception {
final long timestamp = mockTime.milliseconds();
produceMessages(timestamp);
produceMessages(timestamp);
stream.groupByKey(Serialized.with(Serdes.Integer(), Serdes.String())).windowedBy(TimeWindows.of(500L)).count().toStream(new KeyValueMapper<Windowed<Integer>, Long, String>() {
@Override
public String apply(final Windowed<Integer> windowedKey, final Long value) {
return windowedKey.key() + "@" + windowedKey.window().start();
}
}).to(outputTopic, Produced.with(Serdes.String(), Serdes.Long()));
startStreams();
final List<KeyValue<String, Long>> results = receiveMessages(new StringDeserializer(), new LongDeserializer(), 10);
Collections.sort(results, new Comparator<KeyValue<String, Long>>() {
@Override
public int compare(final KeyValue<String, Long> o1, final KeyValue<String, Long> o2) {
return KStreamAggregationIntegrationTest.compare(o1, o2);
}
});
final long window = timestamp / 500 * 500;
assertThat(results, is(Arrays.asList(KeyValue.pair("1@" + window, 1L), KeyValue.pair("1@" + window, 2L), KeyValue.pair("2@" + window, 1L), KeyValue.pair("2@" + window, 2L), KeyValue.pair("3@" + window, 1L), KeyValue.pair("3@" + window, 2L), KeyValue.pair("4@" + window, 1L), KeyValue.pair("4@" + window, 2L), KeyValue.pair("5@" + window, 1L), KeyValue.pair("5@" + window, 2L))));
}
use of org.apache.kafka.common.serialization.LongDeserializer in project sda-dropwizard-commons by SDA-SE.
the class KafkaBundleWithConfigIT method multiTest.
@Test
public void multiTest() {
final String TOPIC_CREATE = "create";
final String TOPIC_DELETE = "delete";
KAFKA.getKafkaTestUtils().createTopic(TOPIC_CREATE, 1, (short) 1);
KAFKA.getKafkaTestUtils().createTopic(TOPIC_DELETE, 1, (short) 1);
kafkaBundle.createMessageListener(MessageListenerRegistration.builder().withDefaultListenerConfig().forTopic(TOPIC_CREATE).withDefaultConsumer().withKeyDeserializer(new LongDeserializer()).withListenerStrategy(new SyncCommitMLS<Long, String>(record -> resultsString.add(record.value()), new IgnoreAndProceedErrorHandler<>())).build());
kafkaBundle.createMessageListener(MessageListenerRegistration.builder().withDefaultListenerConfig().forTopic(TOPIC_DELETE).withDefaultConsumer().withListenerStrategy(new SyncCommitMLS<String, String>(record -> resultsString.add(record.value()), new IgnoreAndProceedErrorHandler<>())).build());
MessageProducer<Long, String> createProducer = kafkaBundle.registerProducer(ProducerRegistration.<Long, String>builder().forTopic(TOPIC_CREATE).withProducerConfig(new ProducerConfig()).withKeySerializer(new LongSerializer()).build());
MessageProducer<String, String> deleteProducer = kafkaBundle.registerProducer(ProducerRegistration.<String, String>builder().forTopic(TOPIC_DELETE).withDefaultProducer().build());
createProducer.send(1L, "test1");
deleteProducer.send("key", "test2");
await().atMost(KafkaBundleConsts.N_MAX_WAIT_MS, MILLISECONDS).until(() -> resultsString.size() == 2);
assertThat(resultsString).containsExactlyInAnyOrder("test1", "test2");
}
Aggregations