use of org.apache.kafka.streams.kstream.KStream in project kafka by apache.
the class KStreamKTableJoinTest method shouldCreateRepartitionTopicsWithUserProvidedName.
@Test
public void shouldCreateRepartitionTopicsWithUserProvidedName() {
final StreamsBuilder builder = new StreamsBuilder();
final Properties props = new Properties();
props.put(StreamsConfig.TOPOLOGY_OPTIMIZATION_CONFIG, StreamsConfig.NO_OPTIMIZATION);
final KStream<String, String> streamA = builder.stream("topic", Consumed.with(Serdes.String(), Serdes.String()));
final KTable<String, String> tableB = builder.table("topic2", Consumed.with(Serdes.String(), Serdes.String()));
final KTable<String, String> tableC = builder.table("topic3", Consumed.with(Serdes.String(), Serdes.String()));
final KStream<String, String> rekeyedStream = streamA.map((k, v) -> new KeyValue<>(v, k));
rekeyedStream.join(tableB, (value1, value2) -> value1 + value2, Joined.with(Serdes.String(), Serdes.String(), Serdes.String(), "first-join")).to("out-one");
rekeyedStream.join(tableC, (value1, value2) -> value1 + value2, Joined.with(Serdes.String(), Serdes.String(), Serdes.String(), "second-join")).to("out-two");
final Topology topology = builder.build(props);
System.out.println(topology.describe().toString());
assertEquals(expectedTopologyWithUserProvidedRepartitionTopicNames, topology.describe().toString());
}
use of org.apache.kafka.streams.kstream.KStream in project kafka by apache.
the class KStreamKTableJoinTest method shouldReuseRepartitionTopicWithGeneratedName.
@Test
public void shouldReuseRepartitionTopicWithGeneratedName() {
final StreamsBuilder builder = new StreamsBuilder();
final Properties props = new Properties();
props.put(StreamsConfig.TOPOLOGY_OPTIMIZATION_CONFIG, StreamsConfig.NO_OPTIMIZATION);
final KStream<String, String> streamA = builder.stream("topic", Consumed.with(Serdes.String(), Serdes.String()));
final KTable<String, String> tableB = builder.table("topic2", Consumed.with(Serdes.String(), Serdes.String()));
final KTable<String, String> tableC = builder.table("topic3", Consumed.with(Serdes.String(), Serdes.String()));
final KStream<String, String> rekeyedStream = streamA.map((k, v) -> new KeyValue<>(v, k));
rekeyedStream.join(tableB, (value1, value2) -> value1 + value2).to("out-one");
rekeyedStream.join(tableC, (value1, value2) -> value1 + value2).to("out-two");
final Topology topology = builder.build(props);
assertEquals(expectedTopologyWithGeneratedRepartitionTopicNames, topology.describe().toString());
}
use of org.apache.kafka.streams.kstream.KStream in project kafka by apache.
the class KStreamWindowAggregateTest method shouldLogAndMeterWhenSkippingExpiredWindowByGrace.
@Test
public void shouldLogAndMeterWhenSkippingExpiredWindowByGrace() {
final StreamsBuilder builder = new StreamsBuilder();
final String topic = "topic";
final KStream<String, String> stream1 = builder.stream(topic, Consumed.with(Serdes.String(), Serdes.String()));
stream1.groupByKey(Grouped.with(Serdes.String(), Serdes.String())).windowedBy(TimeWindows.ofSizeAndGrace(ofMillis(10), ofMillis(90L)).advanceBy(ofMillis(10))).aggregate(() -> "", MockAggregator.toStringInstance("+"), Materialized.<String, String, WindowStore<Bytes, byte[]>>as("topic1-Canonicalized").withValueSerde(Serdes.String()).withCachingDisabled().withLoggingDisabled()).toStream().map((key, value) -> new KeyValue<>(key.toString(), value)).to("output");
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KStreamWindowAggregate.class);
final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) {
final TestInputTopic<String, String> inputTopic = driver.createInputTopic(topic, new StringSerializer(), new StringSerializer());
inputTopic.pipeInput("k", "100", 200L);
inputTopic.pipeInput("k", "0", 100L);
inputTopic.pipeInput("k", "1", 101L);
inputTopic.pipeInput("k", "2", 102L);
inputTopic.pipeInput("k", "3", 103L);
inputTopic.pipeInput("k", "4", 104L);
inputTopic.pipeInput("k", "5", 105L);
inputTopic.pipeInput("k", "6", 6L);
assertLatenessMetrics(driver, is(7.0), is(194.0), is(97.375));
assertThat(appender.getMessages(), hasItems("Skipping record for expired window. topic=[topic] partition=[0] offset=[1] timestamp=[100] window=[100,110) expiration=[110] streamTime=[200]", "Skipping record for expired window. topic=[topic] partition=[0] offset=[2] timestamp=[101] window=[100,110) expiration=[110] streamTime=[200]", "Skipping record for expired window. topic=[topic] partition=[0] offset=[3] timestamp=[102] window=[100,110) expiration=[110] streamTime=[200]", "Skipping record for expired window. topic=[topic] partition=[0] offset=[4] timestamp=[103] window=[100,110) expiration=[110] streamTime=[200]", "Skipping record for expired window. topic=[topic] partition=[0] offset=[5] timestamp=[104] window=[100,110) expiration=[110] streamTime=[200]", "Skipping record for expired window. topic=[topic] partition=[0] offset=[6] timestamp=[105] window=[100,110) expiration=[110] streamTime=[200]", "Skipping record for expired window. topic=[topic] partition=[0] offset=[7] timestamp=[6] window=[0,10) expiration=[110] streamTime=[200]"));
final TestOutputTopic<String, String> outputTopic = driver.createOutputTopic("output", new StringDeserializer(), new StringDeserializer());
assertThat(outputTopic.readRecord(), equalTo(new TestRecord<>("[k@200/210]", "+100", null, 200L)));
assertTrue(outputTopic.isEmpty());
}
}
use of org.apache.kafka.streams.kstream.KStream in project kafka by apache.
the class KStreamRepartitionTest method shouldThrowAnExceptionWhenNumberOfPartitionsOfRepartitionOperationsDoNotMatchWhenJoining.
@Test
public void shouldThrowAnExceptionWhenNumberOfPartitionsOfRepartitionOperationsDoNotMatchWhenJoining() {
final String topicB = "topic-b";
final String outputTopic = "topic-output";
final String topicBRepartitionedName = "topic-b-scale-up";
final String inputTopicRepartitionedName = "input-topic-scale-up";
final int topicBNumberOfPartitions = 2;
final int inputTopicNumberOfPartitions = 4;
final StreamsBuilder builder = new StreamsBuilder();
final Repartitioned<Integer, String> inputTopicRepartitioned = Repartitioned.<Integer, String>as(inputTopicRepartitionedName).withNumberOfPartitions(inputTopicNumberOfPartitions);
final Repartitioned<Integer, String> topicBRepartitioned = Repartitioned.<Integer, String>as(topicBRepartitionedName).withNumberOfPartitions(topicBNumberOfPartitions);
final KStream<Integer, String> topicBStream = builder.stream(topicB, Consumed.with(Serdes.Integer(), Serdes.String())).repartition(topicBRepartitioned);
builder.stream(inputTopic, Consumed.with(Serdes.Integer(), Serdes.String())).repartition(inputTopicRepartitioned).join(topicBStream, (value1, value2) -> value2, JoinWindows.of(Duration.ofSeconds(10))).to(outputTopic);
final Map<String, Integer> repartitionTopicsWithNumOfPartitions = Utils.mkMap(Utils.mkEntry(toRepartitionTopicName(topicBRepartitionedName), topicBNumberOfPartitions), Utils.mkEntry(toRepartitionTopicName(inputTopicRepartitionedName), inputTopicNumberOfPartitions));
final TopologyException expected = assertThrows(TopologyException.class, () -> builder.build(props));
final String expectedErrorMessage = String.format("Following topics do not have the same " + "number of partitions: [%s]", new TreeMap<>(repartitionTopicsWithNumOfPartitions));
assertNotNull(expected);
assertTrue(expected.getMessage().contains(expectedErrorMessage));
}
use of org.apache.kafka.streams.kstream.KStream in project kafka by apache.
the class KStreamSelectKeyTest method testSelectKey.
@Test
public void testSelectKey() {
final StreamsBuilder builder = new StreamsBuilder();
final Map<Number, String> keyMap = new HashMap<>();
keyMap.put(1, "ONE");
keyMap.put(2, "TWO");
keyMap.put(3, "THREE");
final KeyValueTimestamp[] expected = new KeyValueTimestamp[] { new KeyValueTimestamp<>("ONE", 1, 0), new KeyValueTimestamp<>("TWO", 2, 0), new KeyValueTimestamp<>("THREE", 3, 0) };
final int[] expectedValues = new int[] { 1, 2, 3 };
final KStream<String, Integer> stream = builder.stream(topicName, Consumed.with(Serdes.String(), Serdes.Integer()));
final MockApiProcessorSupplier<String, Integer, Void, Void> supplier = new MockApiProcessorSupplier<>();
stream.selectKey((key, value) -> keyMap.get(value)).process(supplier);
try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) {
final TestInputTopic<String, Integer> inputTopic = driver.createInputTopic(topicName, new StringSerializer(), new IntegerSerializer(), Instant.ofEpochMilli(0L), Duration.ZERO);
for (final int expectedValue : expectedValues) {
inputTopic.pipeInput(expectedValue);
}
}
assertEquals(3, supplier.theCapturedProcessor().processed().size());
for (int i = 0; i < expected.length; i++) {
assertEquals(expected[i], supplier.theCapturedProcessor().processed().get(i));
}
}
Aggregations