use of org.apache.kafka.streams.TopologyTestDriver in project kafka by apache.
the class AbstractStreamTest method testShouldBeExtensible.
@Test
public void testShouldBeExtensible() {
final StreamsBuilder builder = new StreamsBuilder();
final int[] expectedKeys = new int[] { 1, 2, 3, 4, 5, 6, 7 };
final MockApiProcessorSupplier<Integer, String, Void, Void> supplier = new MockApiProcessorSupplier<>();
final String topicName = "topic";
final ExtendedKStream<Integer, String> stream = new ExtendedKStream<>(builder.stream(topicName, Consumed.with(Serdes.Integer(), Serdes.String())));
stream.randomFilter().process(supplier);
try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build())) {
final TestInputTopic<Integer, String> inputTopic = driver.createInputTopic(topicName, new IntegerSerializer(), new StringSerializer());
for (final int expectedKey : expectedKeys) {
inputTopic.pipeInput(expectedKey, "V" + expectedKey);
}
assertTrue(supplier.theCapturedProcessor().processed().size() <= expectedKeys.length);
}
}
use of org.apache.kafka.streams.TopologyTestDriver in project kafka by apache.
the class KStreamKStreamOuterJoinTest method testWindowing.
@Test
public void testWindowing() {
final StreamsBuilder builder = new StreamsBuilder();
final int[] expectedKeys = new int[] { 0, 1, 2, 3 };
final KStream<Integer, String> stream1;
final KStream<Integer, String> stream2;
final KStream<Integer, String> joined;
final MockApiProcessorSupplier<Integer, String, Void, Void> supplier = new MockApiProcessorSupplier<>();
stream1 = builder.stream(topic1, consumed);
stream2 = builder.stream(topic2, consumed);
joined = stream1.outerJoin(stream2, MockValueJoiner.TOSTRING_JOINER, JoinWindows.ofTimeDifferenceWithNoGrace(ofMillis(100)), StreamJoined.with(Serdes.Integer(), Serdes.String(), Serdes.String()));
joined.process(supplier);
final Collection<Set<String>> copartitionGroups = TopologyWrapper.getInternalTopologyBuilder(builder.build()).copartitionGroups();
assertEquals(1, copartitionGroups.size());
assertEquals(new HashSet<>(Arrays.asList(topic1, topic2)), copartitionGroups.iterator().next());
try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) {
final TestInputTopic<Integer, String> inputTopic1 = driver.createInputTopic(topic1, new IntegerSerializer(), new StringSerializer(), Instant.ofEpochMilli(0L), Duration.ZERO);
final TestInputTopic<Integer, String> inputTopic2 = driver.createInputTopic(topic2, new IntegerSerializer(), new StringSerializer(), Instant.ofEpochMilli(0L), Duration.ZERO);
final MockApiProcessor<Integer, String, Void, Void> processor = supplier.theCapturedProcessor();
final long time = 0L;
// --> w2 = {}
for (int i = 0; i < 2; i++) {
inputTopic1.pipeInput(expectedKeys[i], "A" + expectedKeys[i], time);
}
processor.checkAndClearProcessResult();
// --> w2 = { 0:a0 (ts: 0), 1:a1 (ts: 0), 2:a2 (ts: 0), 3:a3 (ts: 0) }
for (final int expectedKey : expectedKeys) {
inputTopic2.pipeInput(expectedKey, "a" + expectedKey, time);
}
processor.checkAndClearProcessResult(new KeyValueTimestamp<>(0, "A0+a0", 0L), new KeyValueTimestamp<>(1, "A1+a1", 0L));
testUpperWindowBound(expectedKeys, driver, processor);
testLowerWindowBound(expectedKeys, driver, processor);
}
}
use of org.apache.kafka.streams.TopologyTestDriver in project kafka by apache.
the class KStreamKStreamOuterJoinTest method testLeftExpiredNonJoinedRecordsAreEmittedByTheRightProcessor.
@Test
public void testLeftExpiredNonJoinedRecordsAreEmittedByTheRightProcessor() {
final StreamsBuilder builder = new StreamsBuilder();
final KStream<Integer, String> stream1;
final KStream<Integer, String> stream2;
final KStream<Integer, String> joined;
final MockApiProcessorSupplier<Integer, String, Void, Void> supplier = new MockApiProcessorSupplier<>();
stream1 = builder.stream(topic1, consumed);
stream2 = builder.stream(topic2, consumed);
joined = stream1.outerJoin(stream2, MockValueJoiner.TOSTRING_JOINER, JoinWindows.ofTimeDifferenceAndGrace(ofMillis(100L), ofMillis(0L)), StreamJoined.with(Serdes.Integer(), Serdes.String(), Serdes.String()));
joined.process(supplier);
try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) {
final TestInputTopic<Integer, String> inputTopic1 = driver.createInputTopic(topic1, new IntegerSerializer(), new StringSerializer(), Instant.ofEpochMilli(0L), Duration.ZERO);
final TestInputTopic<Integer, String> inputTopic2 = driver.createInputTopic(topic2, new IntegerSerializer(), new StringSerializer(), Instant.ofEpochMilli(0L), Duration.ZERO);
final MockApiProcessor<Integer, String, Void, Void> processor = supplier.theCapturedProcessor();
final long windowStart = 0L;
// No joins detected; No null-joins emitted
inputTopic1.pipeInput(0, "A0", windowStart + 1L);
inputTopic1.pipeInput(1, "A1", windowStart + 2L);
inputTopic1.pipeInput(0, "A0-0", windowStart + 3L);
processor.checkAndClearProcessResult();
// Join detected; No null-joins emitted
inputTopic2.pipeInput(1, "a1", windowStart + 3L);
processor.checkAndClearProcessResult(new KeyValueTimestamp<>(1, "A1+a1", windowStart + 3L));
// Dummy record in right topic will emit expired non-joined records from the left topic
inputTopic2.pipeInput(2, "dummy", windowStart + 401L);
processor.checkAndClearProcessResult(new KeyValueTimestamp<>(0, "A0+null", windowStart + 1L), new KeyValueTimestamp<>(0, "A0-0+null", windowStart + 3L));
// Flush internal non-joined state store by joining the dummy record
inputTopic1.pipeInput(2, "dummy", windowStart + 402L);
processor.checkAndClearProcessResult(new KeyValueTimestamp<>(2, "dummy+dummy", windowStart + 402L));
}
}
use of org.apache.kafka.streams.TopologyTestDriver in project kafka by apache.
the class KStreamKStreamOuterJoinTest method testGracePeriod.
@Test
public void testGracePeriod() {
final StreamsBuilder builder = new StreamsBuilder();
final KStream<Integer, String> stream1;
final KStream<Integer, String> stream2;
final KStream<Integer, String> joined;
final MockApiProcessorSupplier<Integer, String, Void, Void> supplier = new MockApiProcessorSupplier<>();
stream1 = builder.stream(topic1, consumed);
stream2 = builder.stream(topic2, consumed);
joined = stream1.outerJoin(stream2, MockValueJoiner.TOSTRING_JOINER, JoinWindows.ofTimeDifferenceAndGrace(ofMillis(100), ofMillis(10)), StreamJoined.with(Serdes.Integer(), Serdes.String(), Serdes.String()));
joined.process(supplier);
final Collection<Set<String>> copartitionGroups = TopologyWrapper.getInternalTopologyBuilder(builder.build()).copartitionGroups();
assertEquals(1, copartitionGroups.size());
assertEquals(new HashSet<>(Arrays.asList(topic1, topic2)), copartitionGroups.iterator().next());
try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) {
final TestInputTopic<Integer, String> inputTopic1 = driver.createInputTopic(topic1, new IntegerSerializer(), new StringSerializer(), Instant.ofEpochMilli(0L), Duration.ZERO);
final TestInputTopic<Integer, String> inputTopic2 = driver.createInputTopic(topic2, new IntegerSerializer(), new StringSerializer(), Instant.ofEpochMilli(0L), Duration.ZERO);
final MockApiProcessor<Integer, String, Void, Void> processor = supplier.theCapturedProcessor();
// push one item to the primary stream; and one item in other stream; this should not produce items because there are no joins
// and window has not ended
// w1 = {}
// w2 = {}
// --> w1 = { 0:A0 (ts: 0) }
// --> w2 = { 1:a1 (ts: 0) }
inputTopic1.pipeInput(0, "A0", 0L);
inputTopic2.pipeInput(1, "a1", 0L);
processor.checkAndClearProcessResult();
// push one item on each stream with a window time after the previous window ended (not closed); this should not produce
// joined records because the window has ended, but will not produce non-joined records because the window has not closed.
// w1 = { 0:A0 (ts: 0) }
// w2 = { 1:a1 (ts: 0) }
// --> w1 = { 0:A0 (ts: 0), 1:A1 (ts: 0) }
// --> w2 = { 0:a0 (ts: 101), 1:a1 (ts: 101) }
inputTopic2.pipeInput(0, "a0", 101L);
inputTopic1.pipeInput(1, "A1", 101L);
processor.checkAndClearProcessResult();
// push a dummy item to the any stream after the window is closed; this should produced all expired non-joined records because
// the window has closed
// w1 = { 0:A0 (ts: 0), 1:A1 (ts: 0) }
// w2 = { 0:a0 (ts: 101), 1:a1 (ts: 101) }
// --> w1 = { 0:A0 (ts: 0), 1:A1 (ts: 0) }
// --> w2 = { 0:a0 (ts: 101), 1:a1 (ts: 101), 0:dummy (ts: 112) }
inputTopic2.pipeInput(0, "dummy", 211);
processor.checkAndClearProcessResult(new KeyValueTimestamp<>(1, "null+a1", 0L), new KeyValueTimestamp<>(0, "A0+null", 0L));
}
}
use of org.apache.kafka.streams.TopologyTestDriver in project kafka by apache.
the class KGroupedTableImplTest method shouldAggregateAndMaterializeResults.
@Test
public void shouldAggregateAndMaterializeResults() {
builder.table(topic, Consumed.with(Serdes.String(), Serdes.String())).groupBy(MockMapper.selectValueKeyValueMapper(), Grouped.with(Serdes.String(), Serdes.String())).aggregate(MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER, MockAggregator.TOSTRING_REMOVER, Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as("aggregate").withValueSerde(Serdes.String()).withKeySerde(Serdes.String()));
try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) {
processData(topic, driver);
{
{
final KeyValueStore<String, String> aggregate = driver.getKeyValueStore("aggregate");
assertThat(aggregate.get("1"), equalTo("0+1+1+1"));
assertThat(aggregate.get("2"), equalTo("0+2+2"));
}
{
final KeyValueStore<String, ValueAndTimestamp<String>> aggregate = driver.getTimestampedKeyValueStore("aggregate");
assertThat(aggregate.get("1"), equalTo(ValueAndTimestamp.make("0+1+1+1", 50L)));
assertThat(aggregate.get("2"), equalTo(ValueAndTimestamp.make("0+2+2", 60L)));
}
}
}
}
Aggregations