use of org.apache.kafka.streams.test.TestRecord in project kafka by apache.
the class KStreamSlidingWindowAggregateTest method shouldLogAndMeterWhenSkippingExpiredWindowByGrace.
@Test
public void shouldLogAndMeterWhenSkippingExpiredWindowByGrace() {
final String builtInMetricsVersion = StreamsConfig.METRICS_LATEST;
final StreamsBuilder builder = new StreamsBuilder();
final String topic = "topic";
final WindowBytesStoreSupplier storeSupplier = inOrderIterator ? new InOrderMemoryWindowStoreSupplier("InOrder", 50000L, 10L, false) : Stores.inMemoryWindowStore("Reverse", Duration.ofMillis(50000), Duration.ofMillis(10), false);
final KStream<String, String> stream1 = builder.stream(topic, Consumed.with(Serdes.String(), Serdes.String()));
stream1.groupByKey(Grouped.with(Serdes.String(), Serdes.String())).windowedBy(SlidingWindows.ofTimeDifferenceAndGrace(ofMillis(10), ofMillis(90))).aggregate(MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER, Materialized.as(storeSupplier)).toStream().to("output");
props.setProperty(StreamsConfig.BUILT_IN_METRICS_VERSION_CONFIG, builtInMetricsVersion);
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KStreamSlidingWindowAggregate.class);
final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) {
final TestInputTopic<String, String> inputTopic = driver.createInputTopic(topic, new StringSerializer(), new StringSerializer());
inputTopic.pipeInput("k", "100", 200L);
inputTopic.pipeInput("k", "0", 100L);
inputTopic.pipeInput("k", "1", 101L);
inputTopic.pipeInput("k", "2", 102L);
inputTopic.pipeInput("k", "3", 103L);
inputTopic.pipeInput("k", "4", 104L);
inputTopic.pipeInput("k", "5", 105L);
inputTopic.pipeInput("k", "6", 15L);
assertLatenessMetrics(driver, is(7.0), is(185.0), is(96.25));
assertThat(appender.getMessages(), hasItems(// left window for k@100
"Skipping record for expired window. topic=[topic] partition=[0] offset=[1] timestamp=[100] window=[90,100] expiration=[110] streamTime=[200]", // left window for k@101
"Skipping record for expired window. topic=[topic] partition=[0] offset=[2] timestamp=[101] window=[91,101] expiration=[110] streamTime=[200]", // left window for k@102
"Skipping record for expired window. topic=[topic] partition=[0] offset=[3] timestamp=[102] window=[92,102] expiration=[110] streamTime=[200]", // left window for k@103
"Skipping record for expired window. topic=[topic] partition=[0] offset=[4] timestamp=[103] window=[93,103] expiration=[110] streamTime=[200]", // left window for k@104
"Skipping record for expired window. topic=[topic] partition=[0] offset=[5] timestamp=[104] window=[94,104] expiration=[110] streamTime=[200]", // left window for k@105
"Skipping record for expired window. topic=[topic] partition=[0] offset=[6] timestamp=[105] window=[95,105] expiration=[110] streamTime=[200]", // left window for k@15
"Skipping record for expired window. topic=[topic] partition=[0] offset=[7] timestamp=[15] window=[5,15] expiration=[110] streamTime=[200]"));
final TestOutputTopic<Windowed<String>, String> outputTopic = driver.createOutputTopic("output", new TimeWindowedDeserializer<>(new StringDeserializer(), 10L), new StringDeserializer());
assertThat(outputTopic.readRecord(), equalTo(new TestRecord<>(new Windowed<>("k", new TimeWindow(190, 200)), "0+100", null, 200L)));
assertTrue(outputTopic.isEmpty());
}
}
use of org.apache.kafka.streams.test.TestRecord in project kafka by apache.
the class KStreamWindowAggregateTest method shouldLogAndMeterWhenSkippingExpiredWindowByGrace.
@Test
public void shouldLogAndMeterWhenSkippingExpiredWindowByGrace() {
final StreamsBuilder builder = new StreamsBuilder();
final String topic = "topic";
final KStream<String, String> stream1 = builder.stream(topic, Consumed.with(Serdes.String(), Serdes.String()));
stream1.groupByKey(Grouped.with(Serdes.String(), Serdes.String())).windowedBy(TimeWindows.ofSizeAndGrace(ofMillis(10), ofMillis(90L)).advanceBy(ofMillis(10))).aggregate(() -> "", MockAggregator.toStringInstance("+"), Materialized.<String, String, WindowStore<Bytes, byte[]>>as("topic1-Canonicalized").withValueSerde(Serdes.String()).withCachingDisabled().withLoggingDisabled()).toStream().map((key, value) -> new KeyValue<>(key.toString(), value)).to("output");
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KStreamWindowAggregate.class);
final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) {
final TestInputTopic<String, String> inputTopic = driver.createInputTopic(topic, new StringSerializer(), new StringSerializer());
inputTopic.pipeInput("k", "100", 200L);
inputTopic.pipeInput("k", "0", 100L);
inputTopic.pipeInput("k", "1", 101L);
inputTopic.pipeInput("k", "2", 102L);
inputTopic.pipeInput("k", "3", 103L);
inputTopic.pipeInput("k", "4", 104L);
inputTopic.pipeInput("k", "5", 105L);
inputTopic.pipeInput("k", "6", 6L);
assertLatenessMetrics(driver, is(7.0), is(194.0), is(97.375));
assertThat(appender.getMessages(), hasItems("Skipping record for expired window. topic=[topic] partition=[0] offset=[1] timestamp=[100] window=[100,110) expiration=[110] streamTime=[200]", "Skipping record for expired window. topic=[topic] partition=[0] offset=[2] timestamp=[101] window=[100,110) expiration=[110] streamTime=[200]", "Skipping record for expired window. topic=[topic] partition=[0] offset=[3] timestamp=[102] window=[100,110) expiration=[110] streamTime=[200]", "Skipping record for expired window. topic=[topic] partition=[0] offset=[4] timestamp=[103] window=[100,110) expiration=[110] streamTime=[200]", "Skipping record for expired window. topic=[topic] partition=[0] offset=[5] timestamp=[104] window=[100,110) expiration=[110] streamTime=[200]", "Skipping record for expired window. topic=[topic] partition=[0] offset=[6] timestamp=[105] window=[100,110) expiration=[110] streamTime=[200]", "Skipping record for expired window. topic=[topic] partition=[0] offset=[7] timestamp=[6] window=[0,10) expiration=[110] streamTime=[200]"));
final TestOutputTopic<String, String> outputTopic = driver.createOutputTopic("output", new StringDeserializer(), new StringDeserializer());
assertThat(outputTopic.readRecord(), equalTo(new TestRecord<>("[k@200/210]", "+100", null, 200L)));
assertTrue(outputTopic.isEmpty());
}
}
use of org.apache.kafka.streams.test.TestRecord in project kafka by apache.
the class ProcessorTopologyTest method testDrivingInternalRepartitioningForwardingTimestampTopology.
@Test
public void testDrivingInternalRepartitioningForwardingTimestampTopology() {
driver = new TopologyTestDriver(createInternalRepartitioningWithValueTimestampTopology(), props);
final TestInputTopic<String, String> inputTopic = driver.createInputTopic(INPUT_TOPIC_1, STRING_SERIALIZER, STRING_SERIALIZER);
inputTopic.pipeInput("key1", "value1@1000");
inputTopic.pipeInput("key2", "value2@2000");
inputTopic.pipeInput("key3", "value3@3000");
final TestOutputTopic<String, String> outputTopic = driver.createOutputTopic(OUTPUT_TOPIC_1, STRING_DESERIALIZER, STRING_DESERIALIZER);
assertThat(outputTopic.readRecord(), equalTo(new TestRecord<>("key1", "value1", null, 1000L)));
assertThat(outputTopic.readRecord(), equalTo(new TestRecord<>("key2", "value2", null, 2000L)));
assertThat(outputTopic.readRecord(), equalTo(new TestRecord<>("key3", "value3", null, 3000L)));
}
use of org.apache.kafka.streams.test.TestRecord in project kafka by apache.
the class KStreamImplTest method shouldSupportTriggerMaterializedWithKTableFromKStream.
@Test
public void shouldSupportTriggerMaterializedWithKTableFromKStream() {
final Consumed<String, String> consumed = Consumed.with(Serdes.String(), Serdes.String());
final StreamsBuilder builder = new StreamsBuilder();
final String input = "input";
final String output = "output";
final String storeName = "store";
builder.stream(input, consumed).toTable().mapValues(value -> value.charAt(0) - (int) 'a', Materialized.<String, Integer, KeyValueStore<Bytes, byte[]>>as(storeName).withKeySerde(Serdes.String()).withValueSerde(Serdes.Integer())).toStream().to(output);
final Topology topology = builder.build(props);
final String topologyDescription = topology.describe().toString();
assertThat(topologyDescription, equalTo("Topologies:\n" + " Sub-topology: 0\n" + " Source: KSTREAM-SOURCE-0000000000 (topics: [input])\n" + " --> KSTREAM-TOTABLE-0000000001\n" + " Processor: KSTREAM-TOTABLE-0000000001 (stores: [])\n" + " --> KTABLE-MAPVALUES-0000000003\n" + " <-- KSTREAM-SOURCE-0000000000\n" + " Processor: KTABLE-MAPVALUES-0000000003 (stores: [store])\n" + " --> KTABLE-TOSTREAM-0000000004\n" + " <-- KSTREAM-TOTABLE-0000000001\n" + " Processor: KTABLE-TOSTREAM-0000000004 (stores: [])\n" + " --> KSTREAM-SINK-0000000005\n" + " <-- KTABLE-MAPVALUES-0000000003\n" + " Sink: KSTREAM-SINK-0000000005 (topic: output)\n" + " <-- KTABLE-TOSTREAM-0000000004\n\n"));
try (final TopologyTestDriver driver = new TopologyTestDriver(topology, props)) {
final TestInputTopic<String, String> inputTopic = driver.createInputTopic(input, new StringSerializer(), new StringSerializer(), Instant.ofEpochMilli(0L), Duration.ZERO);
final TestOutputTopic<String, Integer> outputTopic = driver.createOutputTopic(output, Serdes.String().deserializer(), Serdes.Integer().deserializer());
final KeyValueStore<String, Integer> store = driver.getKeyValueStore(storeName);
inputTopic.pipeInput("A", "green", 10L);
inputTopic.pipeInput("B", "green", 9L);
inputTopic.pipeInput("A", "blue", 12L);
inputTopic.pipeInput("C", "yellow", 15L);
inputTopic.pipeInput("D", "green", 11L);
final Map<String, Integer> expectedStore = new HashMap<>();
expectedStore.putIfAbsent("A", 1);
expectedStore.putIfAbsent("B", 6);
expectedStore.putIfAbsent("C", 24);
expectedStore.putIfAbsent("D", 6);
assertEquals(expectedStore, asMap(store));
assertEquals(asList(new TestRecord<>("A", 6, Instant.ofEpochMilli(10)), new TestRecord<>("B", 6, Instant.ofEpochMilli(9)), new TestRecord<>("A", 1, Instant.ofEpochMilli(12)), new TestRecord<>("C", 24, Instant.ofEpochMilli(15)), new TestRecord<>("D", 6, Instant.ofEpochMilli(11))), outputTopic.readRecordsToList());
}
}
use of org.apache.kafka.streams.test.TestRecord in project kafka by apache.
the class SuppressScenarioTest method shouldSupportFinalResultsForSlidingWindows.
@Test
public void shouldSupportFinalResultsForSlidingWindows() {
final StreamsBuilder builder = new StreamsBuilder();
final KTable<Windowed<String>, Long> valueCounts = builder.stream("input", Consumed.with(STRING_SERDE, STRING_SERDE)).groupBy((String k, String v) -> k, Grouped.with(STRING_SERDE, STRING_SERDE)).windowedBy(SlidingWindows.withTimeDifferenceAndGrace(ofMillis(5L), ofMillis(15L))).count(Materialized.<String, Long, WindowStore<Bytes, byte[]>>as("counts").withCachingDisabled().withKeySerde(STRING_SERDE));
valueCounts.suppress(untilWindowCloses(unbounded())).toStream().map((final Windowed<String> k, final Long v) -> new KeyValue<>(k.toString(), v)).to("output-suppressed", Produced.with(STRING_SERDE, Serdes.Long()));
valueCounts.toStream().map((final Windowed<String> k, final Long v) -> new KeyValue<>(k.toString(), v)).to("output-raw", Produced.with(STRING_SERDE, Serdes.Long()));
final Topology topology = builder.build();
System.out.println(topology.describe());
try (final TopologyTestDriver driver = new TopologyTestDriver(topology, config)) {
final TestInputTopic<String, String> inputTopic = driver.createInputTopic("input", STRING_SERIALIZER, STRING_SERIALIZER);
inputTopic.pipeInput("k1", "v1", 10L);
inputTopic.pipeInput("k1", "v1", 11L);
inputTopic.pipeInput("k1", "v1", 10L);
inputTopic.pipeInput("k1", "v1", 13L);
inputTopic.pipeInput("k1", "v1", 10L);
inputTopic.pipeInput("k1", "v1", 24L);
// this update should get dropped, since the previous event advanced the stream time and closed the window.
inputTopic.pipeInput("k1", "v1", 5L);
inputTopic.pipeInput("k1", "v1", 7L);
// final record to advance stream time and flush windows
inputTopic.pipeInput("k1", "v1", 90L);
final Comparator<TestRecord<String, Long>> comparator = Comparator.comparing((TestRecord<String, Long> o) -> o.getKey()).thenComparing((TestRecord<String, Long> o) -> o.timestamp());
final List<TestRecord<String, Long>> actual = drainProducerRecords(driver, "output-raw", STRING_DESERIALIZER, LONG_DESERIALIZER);
actual.sort(comparator);
verify(actual, asList(// right window for k1@10 created when k1@11 is processed
new KeyValueTimestamp<>("[k1@11/16]", 1L, 11L), // right window for k1@10 updated when k1@13 is processed
new KeyValueTimestamp<>("[k1@11/16]", 2L, 13L), // right window for k1@11 created when k1@13 is processed
new KeyValueTimestamp<>("[k1@12/17]", 1L, 13L), // left window for k1@24 created when k1@24 is processed
new KeyValueTimestamp<>("[k1@19/24]", 1L, 24L), // left window for k1@10 created when k1@10 is processed
new KeyValueTimestamp<>("[k1@5/10]", 1L, 10L), // left window for k1@10 updated when k1@10 is processed
new KeyValueTimestamp<>("[k1@5/10]", 2L, 10L), // left window for k1@10 updated when k1@10 is processed
new KeyValueTimestamp<>("[k1@5/10]", 3L, 10L), // left window for k1@10 updated when k1@5 is processed
new KeyValueTimestamp<>("[k1@5/10]", 4L, 10L), // left window for k1@10 updated when k1@7 is processed
new KeyValueTimestamp<>("[k1@5/10]", 5L, 10L), // left window for k1@11 created when k1@11 is processed
new KeyValueTimestamp<>("[k1@6/11]", 2L, 11L), // left window for k1@11 updated when k1@10 is processed
new KeyValueTimestamp<>("[k1@6/11]", 3L, 11L), // left window for k1@11 updated when k1@10 is processed
new KeyValueTimestamp<>("[k1@6/11]", 4L, 11L), // left window for k1@11 updated when k1@7 is processed
new KeyValueTimestamp<>("[k1@6/11]", 5L, 11L), // left window for k1@13 created when k1@13 is processed
new KeyValueTimestamp<>("[k1@8/13]", 4L, 13L), // left window for k1@13 updated when k1@10 is processed
new KeyValueTimestamp<>("[k1@8/13]", 5L, 13L), // right window for k1@90 created when k1@90 is processed
new KeyValueTimestamp<>("[k1@85/90]", 1L, 90L)));
verify(drainProducerRecords(driver, "output-suppressed", STRING_DESERIALIZER, LONG_DESERIALIZER), asList(new KeyValueTimestamp<>("[k1@5/10]", 5L, 10L), new KeyValueTimestamp<>("[k1@6/11]", 5L, 11L), new KeyValueTimestamp<>("[k1@8/13]", 5L, 13L), new KeyValueTimestamp<>("[k1@11/16]", 2L, 13L), new KeyValueTimestamp<>("[k1@12/17]", 1L, 13L), new KeyValueTimestamp<>("[k1@19/24]", 1L, 24L)));
}
}
Aggregations