use of org.apache.kafka.streams.Topology in project kafka by apache.
the class ProcessorNodeTest method testTopologyLevelConfigException.
@Test
public void testTopologyLevelConfigException() {
// Serdes configuration is missing and no default is set which will trigger an exception
final StreamsBuilder builder = new StreamsBuilder();
builder.<String, String>stream("streams-plaintext-input").flatMapValues(value -> Collections.singletonList(""));
final Topology topology = builder.build();
final ConfigException se = assertThrows(ConfigException.class, () -> new TopologyTestDriver(topology));
final String msg = se.getMessage();
assertTrue("Error about class cast with serdes", msg.contains("StreamsConfig#DEFAULT_KEY_SERDE_CLASS_CONFIG"));
assertTrue("Error about class cast with serdes", msg.contains("specify a key serde"));
}
use of org.apache.kafka.streams.Topology in project kafka by apache.
the class RegexSourceIntegrationTest method testRegexRecordsAreProcessedAfterNewTopicCreatedWithMultipleSubtopologies.
@Test
public void testRegexRecordsAreProcessedAfterNewTopicCreatedWithMultipleSubtopologies() throws Exception {
final String topic1 = "TEST-TOPIC-1";
final String topic2 = "TEST-TOPIC-2";
try {
CLUSTER.createTopic(topic1);
final StreamsBuilder builder = new StreamsBuilder();
final KStream<String, String> pattern1Stream = builder.stream(Pattern.compile("TEST-TOPIC-\\d"));
final KStream<String, String> otherStream = builder.stream(Pattern.compile("not-a-match"));
pattern1Stream.selectKey((k, v) -> k).groupByKey().aggregate(() -> "", (k, v, a) -> v).toStream().to(outputTopic, Produced.with(Serdes.String(), Serdes.String()));
final Topology topology = builder.build();
assertThat(topology.describe().subtopologies().size(), greaterThan(1));
streams = new KafkaStreams(topology, streamsConfiguration);
startApplicationAndWaitUntilRunning(Collections.singletonList(streams), Duration.ofSeconds(30));
CLUSTER.createTopic(topic2);
final KeyValue<String, String> record1 = new KeyValue<>("1", "1");
final KeyValue<String, String> record2 = new KeyValue<>("2", "2");
IntegrationTestUtils.produceKeyValuesSynchronously(topic1, Collections.singletonList(record1), TestUtils.producerConfig(CLUSTER.bootstrapServers(), StringSerializer.class, StringSerializer.class), CLUSTER.time);
IntegrationTestUtils.produceKeyValuesSynchronously(topic2, Collections.singletonList(record2), TestUtils.producerConfig(CLUSTER.bootstrapServers(), StringSerializer.class, StringSerializer.class), CLUSTER.time);
IntegrationTestUtils.waitUntilFinalKeyValueRecordsReceived(TestUtils.consumerConfig(CLUSTER.bootstrapServers(), StringDeserializer.class, StringDeserializer.class), outputTopic, Arrays.asList(record1, record2));
streams.close();
} finally {
CLUSTER.deleteTopicsAndWait(topic1, topic2);
}
}
use of org.apache.kafka.streams.Topology in project kafka by apache.
the class KTableKTableForeignKeyJoinIntegrationTest method doJoinFromLeftThenDeleteLeftEntity.
@Test
public void doJoinFromLeftThenDeleteLeftEntity() {
final Topology topology = getTopology(streamsConfig, materialized ? "store" : null, leftJoin, rejoin);
try (final TopologyTestDriver driver = new TopologyTestDriver(topology, streamsConfig)) {
final TestInputTopic<String, String> right = driver.createInputTopic(RIGHT_TABLE, new StringSerializer(), new StringSerializer());
final TestInputTopic<String, String> left = driver.createInputTopic(LEFT_TABLE, new StringSerializer(), new StringSerializer());
final TestOutputTopic<String, String> outputTopic = driver.createOutputTopic(OUTPUT, new StringDeserializer(), new StringDeserializer());
final TestOutputTopic<String, String> rejoinOutputTopic = rejoin ? driver.createOutputTopic(REJOIN_OUTPUT, new StringDeserializer(), new StringDeserializer()) : null;
final KeyValueStore<String, String> store = driver.getKeyValueStore("store");
// Pre-populate the RHS records. This test is all about what happens when we add/remove LHS records
right.pipeInput("rhs1", "rhsValue1");
right.pipeInput("rhs2", "rhsValue2");
// this unreferenced FK won't show up in any results
right.pipeInput("rhs3", "rhsValue3");
assertThat(outputTopic.readKeyValuesToMap(), is(emptyMap()));
if (rejoin) {
assertThat(rejoinOutputTopic.readKeyValuesToMap(), is(emptyMap()));
}
if (materialized) {
assertThat(asMap(store), is(emptyMap()));
}
left.pipeInput("lhs1", "lhsValue1|rhs1");
left.pipeInput("lhs2", "lhsValue2|rhs2");
{
final Map<String, String> expected = mkMap(mkEntry("lhs1", "(lhsValue1|rhs1,rhsValue1)"), mkEntry("lhs2", "(lhsValue2|rhs2,rhsValue2)"));
assertThat(outputTopic.readKeyValuesToMap(), is(expected));
if (rejoin) {
assertThat(rejoinOutputTopic.readKeyValuesToMap(), is(mkMap(mkEntry("lhs1", "rejoin((lhsValue1|rhs1,rhsValue1),lhsValue1|rhs1)"), mkEntry("lhs2", "rejoin((lhsValue2|rhs2,rhsValue2),lhsValue2|rhs2)"))));
}
if (materialized) {
assertThat(asMap(store), is(expected));
}
}
// Add another reference to an existing FK
left.pipeInput("lhs3", "lhsValue3|rhs1");
{
assertThat(outputTopic.readKeyValuesToMap(), is(mkMap(mkEntry("lhs3", "(lhsValue3|rhs1,rhsValue1)"))));
if (rejoin) {
assertThat(rejoinOutputTopic.readKeyValuesToMap(), is(mkMap(mkEntry("lhs3", "rejoin((lhsValue3|rhs1,rhsValue1),lhsValue3|rhs1)"))));
}
if (materialized) {
assertThat(asMap(store), is(mkMap(mkEntry("lhs1", "(lhsValue1|rhs1,rhsValue1)"), mkEntry("lhs2", "(lhsValue2|rhs2,rhsValue2)"), mkEntry("lhs3", "(lhsValue3|rhs1,rhsValue1)"))));
}
}
// Now delete one LHS entity such that one delete is propagated down to the output.
left.pipeInput("lhs1", (String) null);
assertThat(outputTopic.readKeyValuesToMap(), is(mkMap(mkEntry("lhs1", null))));
if (rejoin) {
assertThat(rejoinOutputTopic.readKeyValuesToMap(), is(mkMap(mkEntry("lhs1", null))));
}
if (materialized) {
assertThat(asMap(store), is(mkMap(mkEntry("lhs2", "(lhsValue2|rhs2,rhsValue2)"), mkEntry("lhs3", "(lhsValue3|rhs1,rhsValue1)"))));
}
}
}
use of org.apache.kafka.streams.Topology in project kafka by apache.
the class KTableKTableForeignKeyJoinIntegrationTest method getTopology.
private static Topology getTopology(final Properties streamsConfig, final String queryableStoreName, final boolean leftJoin, final boolean rejoin) {
final UniqueTopicSerdeScope serdeScope = new UniqueTopicSerdeScope();
final StreamsBuilder builder = new StreamsBuilder();
final KTable<String, String> left = builder.table(LEFT_TABLE, Consumed.with(serdeScope.decorateSerde(Serdes.String(), streamsConfig, true), serdeScope.decorateSerde(Serdes.String(), streamsConfig, false)));
final KTable<String, String> right = builder.table(RIGHT_TABLE, Consumed.with(serdeScope.decorateSerde(Serdes.String(), streamsConfig, true), serdeScope.decorateSerde(Serdes.String(), streamsConfig, false)));
final Function<String, String> extractor = value -> value.split("\\|")[1];
final ValueJoiner<String, String, String> joiner = (value1, value2) -> "(" + value1 + "," + value2 + ")";
final ValueJoiner<String, String, String> rejoiner = rejoin ? (value1, value2) -> "rejoin(" + value1 + "," + value2 + ")" : null;
// the cache suppresses some of the unnecessary tombstones we want to make assertions about
final Materialized<String, String, KeyValueStore<Bytes, byte[]>> mainMaterialized = queryableStoreName == null ? Materialized.<String, String, KeyValueStore<Bytes, byte[]>>with(null, serdeScope.decorateSerde(Serdes.String(), streamsConfig, false)).withCachingDisabled() : Materialized.<String, String>as(Stores.inMemoryKeyValueStore(queryableStoreName)).withValueSerde(serdeScope.decorateSerde(Serdes.String(), streamsConfig, false)).withCachingDisabled();
final Materialized<String, String, KeyValueStore<Bytes, byte[]>> rejoinMaterialized = !rejoin ? null : queryableStoreName == null ? Materialized.with(null, serdeScope.decorateSerde(Serdes.String(), streamsConfig, false)) : // to really test this confuguration
Materialized.<String, String>as(Stores.inMemoryKeyValueStore(queryableStoreName + "-rejoin")).withValueSerde(serdeScope.decorateSerde(Serdes.String(), streamsConfig, false)).withCachingDisabled();
if (leftJoin) {
final KTable<String, String> fkJoin = left.leftJoin(right, extractor, joiner, mainMaterialized);
fkJoin.toStream().to(OUTPUT);
// also make sure the FK join is set up right for downstream operations that require materialization
if (rejoin) {
fkJoin.leftJoin(left, rejoiner, rejoinMaterialized).toStream().to(REJOIN_OUTPUT);
}
} else {
final KTable<String, String> fkJoin = left.join(right, extractor, joiner, mainMaterialized);
fkJoin.toStream().to(OUTPUT);
// also make sure the FK join is set up right for downstream operations that require materialization
if (rejoin) {
fkJoin.join(left, rejoiner, rejoinMaterialized).toStream().to(REJOIN_OUTPUT);
}
}
return builder.build(streamsConfig);
}
use of org.apache.kafka.streams.Topology in project kafka by apache.
the class KTableKTableForeignKeyJoinIntegrationTest method shouldEmitTombstoneWhenDeletingNonJoiningRecords.
@Test
public void shouldEmitTombstoneWhenDeletingNonJoiningRecords() {
final Topology topology = getTopology(streamsConfig, materialized ? "store" : null, leftJoin, rejoin);
try (final TopologyTestDriver driver = new TopologyTestDriver(topology, streamsConfig)) {
final TestInputTopic<String, String> left = driver.createInputTopic(LEFT_TABLE, new StringSerializer(), new StringSerializer());
final TestOutputTopic<String, String> outputTopic = driver.createOutputTopic(OUTPUT, new StringDeserializer(), new StringDeserializer());
final KeyValueStore<String, String> store = driver.getKeyValueStore("store");
left.pipeInput("lhs1", "lhsValue1|rhs1");
{
final Map<String, String> expected = leftJoin ? mkMap(mkEntry("lhs1", "(lhsValue1|rhs1,null)")) : emptyMap();
assertThat(outputTopic.readKeyValuesToMap(), is(expected));
if (materialized) {
assertThat(asMap(store), is(expected));
}
}
// Deleting a non-joining record produces an unnecessary tombstone for inner joins, because
// it's not possible to know whether a result was previously emitted.
// For the left join, the tombstone is necessary.
left.pipeInput("lhs1", (String) null);
{
assertThat(outputTopic.readKeyValuesToMap(), is(mkMap(mkEntry("lhs1", null))));
if (materialized) {
assertThat(asMap(store), is(emptyMap()));
}
}
// Deleting a non-existing record is idempotent
left.pipeInput("lhs1", (String) null);
{
assertThat(outputTopic.readKeyValuesToMap(), is(emptyMap()));
if (materialized) {
assertThat(asMap(store), is(emptyMap()));
}
}
}
}
Aggregations