use of org.apache.kafka.streams.kstream.KStream in project kafka by apache.
the class CogroupedKStreamImplTest method shouldInsertRepartitionsTopicForUpstreamKeyModification.
@Test
public void shouldInsertRepartitionsTopicForUpstreamKeyModification() {
final StreamsBuilder builder = new StreamsBuilder();
final KStream<String, String> stream1 = builder.stream("one", stringConsumed);
final KStream<String, String> test2 = builder.stream("two", stringConsumed);
final KGroupedStream<String, String> groupedOne = stream1.map((k, v) -> new KeyValue<>(v, k)).groupByKey();
final KGroupedStream<String, String> groupedTwo = test2.groupByKey();
final KTable<String, String> customers = groupedOne.cogroup(STRING_AGGREGATOR).cogroup(groupedTwo, STRING_AGGREGATOR).aggregate(STRING_INITIALIZER, Named.as("test"), Materialized.as("store"));
customers.toStream().to(OUTPUT);
final String topologyDescription = builder.build().describe().toString();
assertThat(topologyDescription, equalTo("Topologies:\n" + " Sub-topology: 0\n" + " Source: KSTREAM-SOURCE-0000000000 (topics: [one])\n" + " --> KSTREAM-MAP-0000000002\n" + " Processor: KSTREAM-MAP-0000000002 (stores: [])\n" + " --> store-repartition-filter\n" + " <-- KSTREAM-SOURCE-0000000000\n" + " Processor: store-repartition-filter (stores: [])\n" + " --> store-repartition-sink\n" + " <-- KSTREAM-MAP-0000000002\n" + " Sink: store-repartition-sink (topic: store-repartition)\n" + " <-- store-repartition-filter\n\n" + " Sub-topology: 1\n" + " Source: KSTREAM-SOURCE-0000000001 (topics: [two])\n" + " --> test-cogroup-agg-1\n" + " Source: store-repartition-source (topics: [store-repartition])\n" + " --> test-cogroup-agg-0\n" + " Processor: test-cogroup-agg-0 (stores: [store])\n" + " --> test-cogroup-merge\n" + " <-- store-repartition-source\n" + " Processor: test-cogroup-agg-1 (stores: [store])\n" + " --> test-cogroup-merge\n" + " <-- KSTREAM-SOURCE-0000000001\n" + " Processor: test-cogroup-merge (stores: [])\n" + " --> KTABLE-TOSTREAM-0000000009\n" + " <-- test-cogroup-agg-0, test-cogroup-agg-1\n" + " Processor: KTABLE-TOSTREAM-0000000009 (stores: [])\n" + " --> KSTREAM-SINK-0000000010\n" + " <-- test-cogroup-merge\n" + " Sink: KSTREAM-SINK-0000000010 (topic: output)\n" + " <-- KTABLE-TOSTREAM-0000000009\n\n"));
}
use of org.apache.kafka.streams.kstream.KStream in project kafka by apache.
the class KStreamForeachTest method testForeach.
@Test
public void testForeach() {
// Given
final List<KeyValue<Integer, String>> inputRecords = Arrays.asList(new KeyValue<>(0, "zero"), new KeyValue<>(1, "one"), new KeyValue<>(2, "two"), new KeyValue<>(3, "three"));
final List<KeyValue<Integer, String>> expectedRecords = Arrays.asList(new KeyValue<>(0, "ZERO"), new KeyValue<>(2, "ONE"), new KeyValue<>(4, "TWO"), new KeyValue<>(6, "THREE"));
final List<KeyValue<Integer, String>> actualRecords = new ArrayList<>();
final ForeachAction<Integer, String> action = (key, value) -> actualRecords.add(new KeyValue<>(key * 2, value.toUpperCase(Locale.ROOT)));
// When
final StreamsBuilder builder = new StreamsBuilder();
final KStream<Integer, String> stream = builder.stream(topicName, Consumed.with(Serdes.Integer(), Serdes.String()));
stream.foreach(action);
// Then
try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) {
final TestInputTopic<Integer, String> inputTopic = driver.createInputTopic(topicName, new IntegerSerializer(), new StringSerializer());
for (final KeyValue<Integer, String> record : inputRecords) {
inputTopic.pipeInput(record.key, record.value);
}
}
assertEquals(expectedRecords.size(), actualRecords.size());
for (int i = 0; i < expectedRecords.size(); i++) {
final KeyValue<Integer, String> expectedRecord = expectedRecords.get(i);
final KeyValue<Integer, String> actualRecord = actualRecords.get(i);
assertEquals(expectedRecord, actualRecord);
}
}
use of org.apache.kafka.streams.kstream.KStream in project kafka by apache.
the class StreamsGraphTest method getTopologyWithChangingValuesAfterChangingKey.
private Topology getTopologyWithChangingValuesAfterChangingKey(final String optimizeConfig) {
final StreamsBuilder builder = new StreamsBuilder();
final Properties properties = new Properties();
properties.put(StreamsConfig.TOPOLOGY_OPTIMIZATION_CONFIG, optimizeConfig);
final KStream<String, String> inputStream = builder.stream("input");
final KStream<String, String> mappedKeyStream = inputStream.selectKey((k, v) -> k + v);
mappedKeyStream.mapValues(v -> v.toUpperCase(Locale.getDefault())).groupByKey().count().toStream().to("output");
mappedKeyStream.flatMapValues(v -> Arrays.asList(v.split("\\s"))).groupByKey().windowedBy(TimeWindows.of(ofMillis(5000))).count().toStream().to("windowed-output");
return builder.build(properties);
}
use of org.apache.kafka.streams.kstream.KStream in project kafka by apache.
the class StreamsGraphTest method shouldBeAbleToBuildTopologyIncrementally.
// Test builds topology in succesive manner but only graph node not yet processed written to topology
@Test
public void shouldBeAbleToBuildTopologyIncrementally() {
final StreamsBuilder builder = new StreamsBuilder();
final KStream<String, String> stream = builder.stream("topic");
final KStream<String, String> streamII = builder.stream("other-topic");
final ValueJoiner<String, String, String> valueJoiner = (v, v2) -> v + v2;
final KStream<String, String> joinedStream = stream.join(streamII, valueJoiner, JoinWindows.of(ofMillis(5000)));
// build step one
assertEquals(expectedJoinedTopology, builder.build().describe().toString());
final KStream<String, String> filteredJoinStream = joinedStream.filter((k, v) -> v.equals("foo"));
// build step two
assertEquals(expectedJoinedFilteredTopology, builder.build().describe().toString());
filteredJoinStream.mapValues(v -> v + "some value").to("output-topic");
// build step three
assertEquals(expectedFullTopology, builder.build().describe().toString());
}
use of org.apache.kafka.streams.kstream.KStream in project kafka by apache.
the class StreamsGraphTest method getTopologyWithThroughOperation.
// specifically testing the deprecated variant
@Deprecated
private Topology getTopologyWithThroughOperation(final String optimizeConfig) {
final StreamsBuilder builder = new StreamsBuilder();
final Properties properties = new Properties();
properties.put(StreamsConfig.TOPOLOGY_OPTIMIZATION_CONFIG, optimizeConfig);
final KStream<String, String> inputStream = builder.stream("input");
final KStream<String, String> mappedKeyStream = inputStream.selectKey((k, v) -> k + v).through("through-topic");
mappedKeyStream.groupByKey().count().toStream().to("output");
mappedKeyStream.groupByKey().windowedBy(TimeWindows.of(ofMillis(5000))).count().toStream().to("windowed-output");
return builder.build(properties);
}
Aggregations