use of org.apache.kafka.streams.kstream.KTable in project kafka by apache.
the class KGroupedTableImplTest method shouldReduceAndMaterializeResults.
@Test
public void shouldReduceAndMaterializeResults() {
final KeyValueMapper<String, Number, KeyValue<String, Integer>> intProjection = (key, value) -> KeyValue.pair(key, value.intValue());
final KTable<String, Integer> reduced = builder.table(topic, Consumed.with(Serdes.String(), Serdes.Double())).groupBy(intProjection).reduce(MockReducer.INTEGER_ADDER, MockReducer.INTEGER_SUBTRACTOR, Materialized.<String, Integer, KeyValueStore<Bytes, byte[]>>as("reduce").withKeySerde(Serdes.String()).withValueSerde(Serdes.Integer()));
final MockApiProcessorSupplier<String, Integer, Void, Void> supplier = getReducedResults(reduced);
try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) {
assertReduced(supplier.theCapturedProcessor().lastValueAndTimestampPerKey(), topic, driver);
{
final KeyValueStore<String, Integer> reduce = driver.getKeyValueStore("reduce");
assertThat(reduce.get("A"), equalTo(5));
assertThat(reduce.get("B"), equalTo(6));
}
{
final KeyValueStore<String, ValueAndTimestamp<Integer>> reduce = driver.getTimestampedKeyValueStore("reduce");
assertThat(reduce.get("A"), equalTo(ValueAndTimestamp.make(5, 50L)));
assertThat(reduce.get("B"), equalTo(ValueAndTimestamp.make(6, 30L)));
}
}
}
use of org.apache.kafka.streams.kstream.KTable in project kafka by apache.
the class KGroupedTableImplTest method shouldReduceWithInternalStoreName.
@Test
public void shouldReduceWithInternalStoreName() {
final KeyValueMapper<String, Number, KeyValue<String, Integer>> intProjection = (key, value) -> KeyValue.pair(key, value.intValue());
final KTable<String, Integer> reduced = builder.table(topic, Consumed.with(Serdes.String(), Serdes.Double()), Materialized.<String, Double, KeyValueStore<Bytes, byte[]>>as("store").withKeySerde(Serdes.String()).withValueSerde(Serdes.Double())).groupBy(intProjection).reduce(MockReducer.INTEGER_ADDER, MockReducer.INTEGER_SUBTRACTOR);
final MockApiProcessorSupplier<String, Integer, Void, Void> supplier = getReducedResults(reduced);
try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) {
assertReduced(supplier.theCapturedProcessor().lastValueAndTimestampPerKey(), topic, driver);
assertNull(reduced.queryableStoreName());
}
}
use of org.apache.kafka.streams.kstream.KTable in project kafka by apache.
the class CogroupedKStreamImplTest method shouldInsertRepartitionsTopicForUpstreamKeyModification.
@Test
public void shouldInsertRepartitionsTopicForUpstreamKeyModification() {
final StreamsBuilder builder = new StreamsBuilder();
final KStream<String, String> stream1 = builder.stream("one", stringConsumed);
final KStream<String, String> test2 = builder.stream("two", stringConsumed);
final KGroupedStream<String, String> groupedOne = stream1.map((k, v) -> new KeyValue<>(v, k)).groupByKey();
final KGroupedStream<String, String> groupedTwo = test2.groupByKey();
final KTable<String, String> customers = groupedOne.cogroup(STRING_AGGREGATOR).cogroup(groupedTwo, STRING_AGGREGATOR).aggregate(STRING_INITIALIZER, Named.as("test"), Materialized.as("store"));
customers.toStream().to(OUTPUT);
final String topologyDescription = builder.build().describe().toString();
assertThat(topologyDescription, equalTo("Topologies:\n" + " Sub-topology: 0\n" + " Source: KSTREAM-SOURCE-0000000000 (topics: [one])\n" + " --> KSTREAM-MAP-0000000002\n" + " Processor: KSTREAM-MAP-0000000002 (stores: [])\n" + " --> store-repartition-filter\n" + " <-- KSTREAM-SOURCE-0000000000\n" + " Processor: store-repartition-filter (stores: [])\n" + " --> store-repartition-sink\n" + " <-- KSTREAM-MAP-0000000002\n" + " Sink: store-repartition-sink (topic: store-repartition)\n" + " <-- store-repartition-filter\n\n" + " Sub-topology: 1\n" + " Source: KSTREAM-SOURCE-0000000001 (topics: [two])\n" + " --> test-cogroup-agg-1\n" + " Source: store-repartition-source (topics: [store-repartition])\n" + " --> test-cogroup-agg-0\n" + " Processor: test-cogroup-agg-0 (stores: [store])\n" + " --> test-cogroup-merge\n" + " <-- store-repartition-source\n" + " Processor: test-cogroup-agg-1 (stores: [store])\n" + " --> test-cogroup-merge\n" + " <-- KSTREAM-SOURCE-0000000001\n" + " Processor: test-cogroup-merge (stores: [])\n" + " --> KTABLE-TOSTREAM-0000000009\n" + " <-- test-cogroup-agg-0, test-cogroup-agg-1\n" + " Processor: KTABLE-TOSTREAM-0000000009 (stores: [])\n" + " --> KSTREAM-SINK-0000000010\n" + " <-- test-cogroup-merge\n" + " Sink: KSTREAM-SINK-0000000010 (topic: output)\n" + " <-- KTABLE-TOSTREAM-0000000009\n\n"));
}
use of org.apache.kafka.streams.kstream.KTable in project kafka by apache.
the class SuppressScenarioTest method shouldImmediatelyEmitEventsWithZeroEmitAfter.
@Test
public void shouldImmediatelyEmitEventsWithZeroEmitAfter() {
final StreamsBuilder builder = new StreamsBuilder();
final KTable<String, Long> valueCounts = builder.table("input", Consumed.with(STRING_SERDE, STRING_SERDE), Materialized.<String, String, KeyValueStore<Bytes, byte[]>>with(STRING_SERDE, STRING_SERDE).withCachingDisabled().withLoggingDisabled()).groupBy((k, v) -> new KeyValue<>(v, k), Grouped.with(STRING_SERDE, STRING_SERDE)).count();
valueCounts.suppress(untilTimeLimit(ZERO, unbounded())).toStream().to("output-suppressed", Produced.with(STRING_SERDE, Serdes.Long()));
valueCounts.toStream().to("output-raw", Produced.with(STRING_SERDE, Serdes.Long()));
final Topology topology = builder.build();
try (final TopologyTestDriver driver = new TopologyTestDriver(topology, config)) {
final TestInputTopic<String, String> inputTopic = driver.createInputTopic("input", STRING_SERIALIZER, STRING_SERIALIZER);
inputTopic.pipeInput("k1", "v1", 0L);
inputTopic.pipeInput("k1", "v2", 1L);
inputTopic.pipeInput("k2", "v1", 2L);
verify(drainProducerRecords(driver, "output-raw", STRING_DESERIALIZER, LONG_DESERIALIZER), asList(new KeyValueTimestamp<>("v1", 1L, 0L), new KeyValueTimestamp<>("v1", 0L, 1L), new KeyValueTimestamp<>("v2", 1L, 1L), new KeyValueTimestamp<>("v1", 1L, 2L)));
verify(drainProducerRecords(driver, "output-suppressed", STRING_DESERIALIZER, LONG_DESERIALIZER), asList(new KeyValueTimestamp<>("v1", 1L, 0L), new KeyValueTimestamp<>("v1", 0L, 1L), new KeyValueTimestamp<>("v2", 1L, 1L), new KeyValueTimestamp<>("v1", 1L, 2L)));
inputTopic.pipeInput("x", "x", 3L);
verify(drainProducerRecords(driver, "output-raw", STRING_DESERIALIZER, LONG_DESERIALIZER), singletonList(new KeyValueTimestamp<>("x", 1L, 3L)));
verify(drainProducerRecords(driver, "output-suppressed", STRING_DESERIALIZER, LONG_DESERIALIZER), singletonList(new KeyValueTimestamp<>("x", 1L, 3L)));
inputTopic.pipeInput("x", "y", 4L);
verify(drainProducerRecords(driver, "output-raw", STRING_DESERIALIZER, LONG_DESERIALIZER), asList(new KeyValueTimestamp<>("x", 0L, 4L), new KeyValueTimestamp<>("y", 1L, 4L)));
verify(drainProducerRecords(driver, "output-suppressed", STRING_DESERIALIZER, LONG_DESERIALIZER), asList(new KeyValueTimestamp<>("x", 0L, 4L), new KeyValueTimestamp<>("y", 1L, 4L)));
}
}
use of org.apache.kafka.streams.kstream.KTable in project kafka by apache.
the class SuppressScenarioTest method shouldSuppressIntermediateEventsWithTimeLimit.
@Test
public void shouldSuppressIntermediateEventsWithTimeLimit() {
final StreamsBuilder builder = new StreamsBuilder();
final KTable<String, Long> valueCounts = builder.table("input", Consumed.with(STRING_SERDE, STRING_SERDE), Materialized.<String, String, KeyValueStore<Bytes, byte[]>>with(STRING_SERDE, STRING_SERDE).withCachingDisabled().withLoggingDisabled()).groupBy((k, v) -> new KeyValue<>(v, k), Grouped.with(STRING_SERDE, STRING_SERDE)).count();
valueCounts.suppress(untilTimeLimit(ofMillis(2L), unbounded())).toStream().to("output-suppressed", Produced.with(STRING_SERDE, Serdes.Long()));
valueCounts.toStream().to("output-raw", Produced.with(STRING_SERDE, Serdes.Long()));
final Topology topology = builder.build();
try (final TopologyTestDriver driver = new TopologyTestDriver(topology, config)) {
final TestInputTopic<String, String> inputTopic = driver.createInputTopic("input", STRING_SERIALIZER, STRING_SERIALIZER);
inputTopic.pipeInput("k1", "v1", 0L);
inputTopic.pipeInput("k1", "v2", 1L);
inputTopic.pipeInput("k2", "v1", 2L);
verify(drainProducerRecords(driver, "output-raw", STRING_DESERIALIZER, LONG_DESERIALIZER), asList(new KeyValueTimestamp<>("v1", 1L, 0L), new KeyValueTimestamp<>("v1", 0L, 1L), new KeyValueTimestamp<>("v2", 1L, 1L), new KeyValueTimestamp<>("v1", 1L, 2L)));
verify(drainProducerRecords(driver, "output-suppressed", STRING_DESERIALIZER, LONG_DESERIALIZER), singletonList(new KeyValueTimestamp<>("v1", 1L, 2L)));
// inserting a dummy "tick" record just to advance stream time
inputTopic.pipeInput("tick", "tick", 3L);
verify(drainProducerRecords(driver, "output-raw", STRING_DESERIALIZER, LONG_DESERIALIZER), singletonList(new KeyValueTimestamp<>("tick", 1L, 3L)));
// the stream time is now 3, so it's time to emit this record
verify(drainProducerRecords(driver, "output-suppressed", STRING_DESERIALIZER, LONG_DESERIALIZER), singletonList(new KeyValueTimestamp<>("v2", 1L, 1L)));
inputTopic.pipeInput("tick", "tock", 4L);
verify(drainProducerRecords(driver, "output-raw", STRING_DESERIALIZER, LONG_DESERIALIZER), asList(new KeyValueTimestamp<>("tick", 0L, 4L), new KeyValueTimestamp<>("tock", 1L, 4L)));
// tick is still buffered, since it was first inserted at time 3, and it is only time 4 right now.
verify(drainProducerRecords(driver, "output-suppressed", STRING_DESERIALIZER, LONG_DESERIALIZER), emptyList());
}
}
Aggregations