use of org.apache.kafka.common.serialization.StringDeserializer in project kafka by apache.
the class CogroupedKStreamImplTest method testCogroupWithThreeGroupedStreams.
@Test
public void testCogroupWithThreeGroupedStreams() {
final StreamsBuilder builder = new StreamsBuilder();
final KStream<String, String> stream1 = builder.stream("one", stringConsumed);
final KStream<String, String> stream2 = builder.stream("two", stringConsumed);
final KStream<String, String> stream3 = builder.stream("three", stringConsumed);
final KGroupedStream<String, String> grouped1 = stream1.groupByKey();
final KGroupedStream<String, String> grouped2 = stream2.groupByKey();
final KGroupedStream<String, String> grouped3 = stream3.groupByKey();
final KTable<String, String> customers = grouped1.cogroup(STRING_AGGREGATOR).cogroup(grouped2, STRING_AGGREGATOR).cogroup(grouped3, STRING_AGGREGATOR).aggregate(STRING_INITIALIZER);
customers.toStream().to(OUTPUT);
try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) {
final TestInputTopic<String, String> testInputTopic = driver.createInputTopic("one", new StringSerializer(), new StringSerializer());
final TestInputTopic<String, String> testInputTopic2 = driver.createInputTopic("two", new StringSerializer(), new StringSerializer());
final TestInputTopic<String, String> testInputTopic3 = driver.createInputTopic("three", new StringSerializer(), new StringSerializer());
final TestOutputTopic<String, String> testOutputTopic = driver.createOutputTopic(OUTPUT, new StringDeserializer(), new StringDeserializer());
testInputTopic.pipeInput("k1", "A", 0L);
testInputTopic.pipeInput("k2", "A", 1L);
testInputTopic.pipeInput("k1", "A", 10L);
testInputTopic.pipeInput("k2", "A", 100L);
testInputTopic2.pipeInput("k2", "B", 100L);
testInputTopic2.pipeInput("k2", "B", 200L);
testInputTopic2.pipeInput("k1", "B", 1L);
testInputTopic2.pipeInput("k2", "B", 500L);
testInputTopic3.pipeInput("k1", "B", 500L);
testInputTopic3.pipeInput("k2", "B", 500L);
testInputTopic3.pipeInput("k3", "B", 500L);
testInputTopic3.pipeInput("k2", "B", 100L);
assertOutputKeyValueTimestamp(testOutputTopic, "k1", "A", 0);
assertOutputKeyValueTimestamp(testOutputTopic, "k2", "A", 1);
assertOutputKeyValueTimestamp(testOutputTopic, "k1", "AA", 10);
assertOutputKeyValueTimestamp(testOutputTopic, "k2", "AA", 100);
assertOutputKeyValueTimestamp(testOutputTopic, "k2", "AAB", 100);
assertOutputKeyValueTimestamp(testOutputTopic, "k2", "AABB", 200);
assertOutputKeyValueTimestamp(testOutputTopic, "k1", "AAB", 10);
assertOutputKeyValueTimestamp(testOutputTopic, "k2", "AABBB", 500);
assertOutputKeyValueTimestamp(testOutputTopic, "k1", "AABB", 500);
assertOutputKeyValueTimestamp(testOutputTopic, "k2", "AABBBB", 500);
assertOutputKeyValueTimestamp(testOutputTopic, "k3", "B", 500);
}
}
use of org.apache.kafka.common.serialization.StringDeserializer in project kafka by apache.
the class VerifiableConsumer method createFromArgs.
public static VerifiableConsumer createFromArgs(ArgumentParser parser, String[] args) throws ArgumentParserException {
Namespace res = parser.parseArgs(args);
boolean useAutoCommit = res.getBoolean("useAutoCommit");
String configFile = res.getString("consumer.config");
String brokerHostandPort = null;
Properties consumerProps = new Properties();
if (configFile != null) {
try {
consumerProps.putAll(Utils.loadProps(configFile));
} catch (IOException e) {
throw new ArgumentParserException(e.getMessage(), parser);
}
}
consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, res.getString("groupId"));
String groupInstanceId = res.getString("groupInstanceId");
if (groupInstanceId != null) {
consumerProps.put(ConsumerConfig.GROUP_INSTANCE_ID_CONFIG, groupInstanceId);
}
if (res.get("bootstrapServer") != null) {
brokerHostandPort = res.getString("bootstrapServer");
} else if (res.getString("brokerList") != null) {
brokerHostandPort = res.getString("brokerList");
} else {
parser.printHelp();
// Can't use `Exit.exit` here because it didn't exist until 0.11.0.0.
System.exit(0);
}
consumerProps.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerHostandPort);
consumerProps.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, useAutoCommit);
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, res.getString("resetPolicy"));
consumerProps.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, Integer.toString(res.getInt("sessionTimeout")));
consumerProps.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, res.getString("assignmentStrategy"));
StringDeserializer deserializer = new StringDeserializer();
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(consumerProps, deserializer, deserializer);
String topic = res.getString("topic");
int maxMessages = res.getInt("maxMessages");
boolean verbose = res.getBoolean("verbose");
return new VerifiableConsumer(consumer, System.out, topic, maxMessages, useAutoCommit, false, verbose);
}
use of org.apache.kafka.common.serialization.StringDeserializer in project kafka by apache.
the class KStreamAggregationDedupIntegrationTest method shouldGroupByKey.
@Test
public void shouldGroupByKey() throws Exception {
final long timestamp = mockTime.milliseconds();
produceMessages(timestamp);
produceMessages(timestamp);
stream.groupByKey(Grouped.with(Serdes.Integer(), Serdes.String())).windowedBy(TimeWindows.of(ofMillis(500L))).count(Materialized.as("count-windows")).toStream((windowedKey, value) -> windowedKey.key() + "@" + windowedKey.window().start()).to(outputTopic, Produced.with(Serdes.String(), Serdes.Long()));
startStreams();
final long window = timestamp / 500 * 500;
validateReceivedMessages(new StringDeserializer(), new LongDeserializer(), Arrays.asList(new KeyValueTimestamp<>("1@" + window, 2L, timestamp), new KeyValueTimestamp<>("2@" + window, 2L, timestamp), new KeyValueTimestamp<>("3@" + window, 2L, timestamp), new KeyValueTimestamp<>("4@" + window, 2L, timestamp), new KeyValueTimestamp<>("5@" + window, 2L, timestamp)));
}
use of org.apache.kafka.common.serialization.StringDeserializer in project kafka by apache.
the class KStreamAggregationDedupIntegrationTest method shouldReduce.
@Test
public void shouldReduce() throws Exception {
produceMessages(System.currentTimeMillis());
groupedStream.reduce(reducer, Materialized.as("reduce-by-key")).toStream().to(outputTopic, Produced.with(Serdes.String(), Serdes.String()));
startStreams();
final long timestamp = System.currentTimeMillis();
produceMessages(timestamp);
validateReceivedMessages(new StringDeserializer(), new StringDeserializer(), Arrays.asList(new KeyValueTimestamp<>("A", "A:A", timestamp), new KeyValueTimestamp<>("B", "B:B", timestamp), new KeyValueTimestamp<>("C", "C:C", timestamp), new KeyValueTimestamp<>("D", "D:D", timestamp), new KeyValueTimestamp<>("E", "E:E", timestamp)));
}
use of org.apache.kafka.common.serialization.StringDeserializer in project kafka by apache.
the class KStreamRepartitionIntegrationTest method shouldCreateOnlyOneRepartitionTopicWhenRepartitionIsFollowedByGroupByKey.
@Test
public void shouldCreateOnlyOneRepartitionTopicWhenRepartitionIsFollowedByGroupByKey() throws Exception {
final String repartitionName = "new-partitions";
final long timestamp = System.currentTimeMillis();
sendEvents(timestamp, Arrays.asList(new KeyValue<>(1, "A"), new KeyValue<>(2, "B")));
final StreamsBuilder builder = new StreamsBuilder();
final Repartitioned<String, String> repartitioned = Repartitioned.<String, String>as(repartitionName).withKeySerde(Serdes.String()).withValueSerde(Serdes.String()).withNumberOfPartitions(1);
builder.stream(inputTopic, Consumed.with(Serdes.Integer(), Serdes.String())).selectKey((key, value) -> key.toString()).repartition(repartitioned).groupByKey().count().toStream().to(outputTopic);
startStreams(builder);
final String topology = builder.build().describe().toString();
validateReceivedMessages(new StringDeserializer(), new LongDeserializer(), Arrays.asList(new KeyValue<>("1", 1L), new KeyValue<>("2", 1L)));
assertTrue(topicExists(toRepartitionTopicName(repartitionName)));
assertEquals(1, countOccurrencesInTopology(topology, "Sink: .*-repartition"));
}
Aggregations