use of org.apache.kafka.common.serialization.IntegerDeserializer in project kafka by apache.
the class KStreamAggregationIntegrationTest method shouldAggregate.
@Test
public void shouldAggregate() throws Exception {
produceMessages(mockTime.milliseconds());
groupedStream.aggregate(initializer, aggregator, Materialized.as("aggregate-by-selected-key")).toStream().to(outputTopic, Produced.with(Serdes.String(), Serdes.Integer()));
startStreams();
produceMessages(mockTime.milliseconds());
final List<KeyValueTimestamp<String, Integer>> results = receiveMessages(new StringDeserializer(), new IntegerDeserializer(), 10);
results.sort(KStreamAggregationIntegrationTest::compare);
assertThat(results, is(Arrays.asList(new KeyValueTimestamp("A", 1, mockTime.milliseconds()), new KeyValueTimestamp("A", 2, mockTime.milliseconds()), new KeyValueTimestamp("B", 1, mockTime.milliseconds()), new KeyValueTimestamp("B", 2, mockTime.milliseconds()), new KeyValueTimestamp("C", 1, mockTime.milliseconds()), new KeyValueTimestamp("C", 2, mockTime.milliseconds()), new KeyValueTimestamp("D", 1, mockTime.milliseconds()), new KeyValueTimestamp("D", 2, mockTime.milliseconds()), new KeyValueTimestamp("E", 1, mockTime.milliseconds()), new KeyValueTimestamp("E", 2, mockTime.milliseconds()))));
}
use of org.apache.kafka.common.serialization.IntegerDeserializer in project kafka by apache.
the class KStreamSplitTest method testBranchingWithNoTerminalOperation.
@Test
public void testBranchingWithNoTerminalOperation() {
final String outputTopicName = "output";
source.split().branch(isEven, Branched.withConsumer(ks -> ks.to(outputTopicName))).branch(isMultipleOfFive, Branched.withConsumer(ks -> ks.to(outputTopicName)));
builder.build();
withDriver(driver -> {
final TestOutputTopic<Integer, String> outputTopic = driver.createOutputTopic(outputTopicName, new IntegerDeserializer(), new StringDeserializer());
assertEquals(Arrays.asList("V0", "V2", "V4", "V5", "V6"), outputTopic.readValuesToList());
});
}
use of org.apache.kafka.common.serialization.IntegerDeserializer in project kafka by apache.
the class CogroupedKStreamImplTest method shouldCoGroupStreamsWithDifferentInputTypes.
@Test
public void shouldCoGroupStreamsWithDifferentInputTypes() {
final StreamsBuilder builder = new StreamsBuilder();
final Consumed<String, Integer> integerConsumed = Consumed.with(Serdes.String(), Serdes.Integer());
final KStream<String, String> stream1 = builder.stream("one", stringConsumed);
final KStream<String, Integer> stream2 = builder.stream("two", integerConsumed);
final KGroupedStream<String, String> grouped1 = stream1.groupByKey();
final KGroupedStream<String, Integer> grouped2 = stream2.groupByKey();
final KTable<String, Integer> customers = grouped1.cogroup(STRING_SUM_AGGREGATOR).cogroup(grouped2, SUM_AGGREGATOR).aggregate(SUM_INITIALIZER, Materialized.<String, Integer, KeyValueStore<Bytes, byte[]>>as("store1").withValueSerde(Serdes.Integer()));
customers.toStream().to(OUTPUT);
try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) {
final TestInputTopic<String, String> testInputTopic = driver.createInputTopic("one", new StringSerializer(), new StringSerializer());
final TestInputTopic<String, Integer> testInputTopic2 = driver.createInputTopic("two", new StringSerializer(), new IntegerSerializer());
final TestOutputTopic<String, Integer> testOutputTopic = driver.createOutputTopic(OUTPUT, new StringDeserializer(), new IntegerDeserializer());
testInputTopic.pipeInput("k1", "1", 0L);
testInputTopic.pipeInput("k2", "1", 1L);
testInputTopic.pipeInput("k1", "1", 10L);
testInputTopic.pipeInput("k2", "1", 100L);
testInputTopic2.pipeInput("k2", 2, 100L);
testInputTopic2.pipeInput("k2", 2, 200L);
testInputTopic2.pipeInput("k1", 2, 1L);
testInputTopic2.pipeInput("k2", 2, 500L);
testInputTopic2.pipeInput("k1", 2, 500L);
testInputTopic2.pipeInput("k2", 3, 500L);
testInputTopic2.pipeInput("k3", 2, 500L);
testInputTopic2.pipeInput("k2", 2, 100L);
assertOutputKeyValueTimestamp(testOutputTopic, "k1", 1, 0);
assertOutputKeyValueTimestamp(testOutputTopic, "k2", 1, 1);
assertOutputKeyValueTimestamp(testOutputTopic, "k1", 2, 10);
assertOutputKeyValueTimestamp(testOutputTopic, "k2", 2, 100);
assertOutputKeyValueTimestamp(testOutputTopic, "k2", 4, 100);
assertOutputKeyValueTimestamp(testOutputTopic, "k2", 6, 200);
assertOutputKeyValueTimestamp(testOutputTopic, "k1", 4, 10);
assertOutputKeyValueTimestamp(testOutputTopic, "k2", 8, 500);
assertOutputKeyValueTimestamp(testOutputTopic, "k1", 6, 500);
assertOutputKeyValueTimestamp(testOutputTopic, "k2", 11, 500);
assertOutputKeyValueTimestamp(testOutputTopic, "k3", 2, 500);
}
}
use of org.apache.kafka.common.serialization.IntegerDeserializer in project kafka by apache.
the class EosTestDriver method verifyMin.
private static void verifyMin(final Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> inputPerTopicPerPartition, final Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> minPerTopicPerPartition) {
final StringDeserializer stringDeserializer = new StringDeserializer();
final IntegerDeserializer integerDeserializer = new IntegerDeserializer();
final HashMap<String, Integer> currentMinPerKey = new HashMap<>();
for (final Map.Entry<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> partitionRecords : minPerTopicPerPartition.entrySet()) {
final TopicPartition inputTopicPartition = new TopicPartition("data", partitionRecords.getKey().partition());
final List<ConsumerRecord<byte[], byte[]>> partitionInput = inputPerTopicPerPartition.get(inputTopicPartition);
final List<ConsumerRecord<byte[], byte[]>> partitionMin = partitionRecords.getValue();
if (partitionInput.size() != partitionMin.size()) {
throw new RuntimeException("Result verification failed: expected " + partitionInput.size() + " records for " + partitionRecords.getKey() + " but received " + partitionMin.size());
}
final Iterator<ConsumerRecord<byte[], byte[]>> inputRecords = partitionInput.iterator();
for (final ConsumerRecord<byte[], byte[]> receivedRecord : partitionMin) {
final ConsumerRecord<byte[], byte[]> input = inputRecords.next();
final String receivedKey = stringDeserializer.deserialize(receivedRecord.topic(), receivedRecord.key());
final int receivedValue = integerDeserializer.deserialize(receivedRecord.topic(), receivedRecord.value());
final String key = stringDeserializer.deserialize(input.topic(), input.key());
final int value = integerDeserializer.deserialize(input.topic(), input.value());
Integer min = currentMinPerKey.get(key);
if (min == null) {
min = value;
} else {
min = Math.min(min, value);
}
currentMinPerKey.put(key, min);
if (!receivedKey.equals(key) || receivedValue != min) {
throw new RuntimeException("Result verification failed for " + receivedRecord + " expected <" + key + "," + min + "> but was <" + receivedKey + "," + receivedValue + ">");
}
}
}
}
use of org.apache.kafka.common.serialization.IntegerDeserializer in project kafka by apache.
the class EosTestDriver method verifySum.
private static void verifySum(final Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> inputPerTopicPerPartition, final Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> minPerTopicPerPartition) {
final StringDeserializer stringDeserializer = new StringDeserializer();
final IntegerDeserializer integerDeserializer = new IntegerDeserializer();
final LongDeserializer longDeserializer = new LongDeserializer();
final HashMap<String, Long> currentSumPerKey = new HashMap<>();
for (final Map.Entry<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> partitionRecords : minPerTopicPerPartition.entrySet()) {
final TopicPartition inputTopicPartition = new TopicPartition("data", partitionRecords.getKey().partition());
final List<ConsumerRecord<byte[], byte[]>> partitionInput = inputPerTopicPerPartition.get(inputTopicPartition);
final List<ConsumerRecord<byte[], byte[]>> partitionSum = partitionRecords.getValue();
if (partitionInput.size() != partitionSum.size()) {
throw new RuntimeException("Result verification failed: expected " + partitionInput.size() + " records for " + partitionRecords.getKey() + " but received " + partitionSum.size());
}
final Iterator<ConsumerRecord<byte[], byte[]>> inputRecords = partitionInput.iterator();
for (final ConsumerRecord<byte[], byte[]> receivedRecord : partitionSum) {
final ConsumerRecord<byte[], byte[]> input = inputRecords.next();
final String receivedKey = stringDeserializer.deserialize(receivedRecord.topic(), receivedRecord.key());
final long receivedValue = longDeserializer.deserialize(receivedRecord.topic(), receivedRecord.value());
final String key = stringDeserializer.deserialize(input.topic(), input.key());
final int value = integerDeserializer.deserialize(input.topic(), input.value());
Long sum = currentSumPerKey.get(key);
if (sum == null) {
sum = (long) value;
} else {
sum += value;
}
currentSumPerKey.put(key, sum);
if (!receivedKey.equals(key) || receivedValue != sum) {
throw new RuntimeException("Result verification failed for " + receivedRecord + " expected <" + key + "," + sum + "> but was <" + receivedKey + "," + receivedValue + ">");
}
}
}
}
Aggregations