use of org.apache.kafka.streams.kstream.ValueJoiner in project kafka by apache.
the class JoinWithIncompleteMetadataIntegrationTest method testShouldAutoShutdownOnJoinWithIncompleteMetadata.
@Test
public void testShouldAutoShutdownOnJoinWithIncompleteMetadata() throws InterruptedException {
STREAMS_CONFIG.put(StreamsConfig.APPLICATION_ID_CONFIG, APP_ID);
STREAMS_CONFIG.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers());
final KStream<Long, String> notExistStream = builder.stream(NON_EXISTENT_INPUT_TOPIC_LEFT);
final KTable<Long, String> aggregatedTable = notExistStream.leftJoin(rightTable, valueJoiner).groupBy((key, value) -> key).reduce((value1, value2) -> value1 + value2);
// Write the (continuously updating) results to the output topic.
aggregatedTable.toStream().to(OUTPUT_TOPIC);
final KafkaStreamsWrapper streams = new KafkaStreamsWrapper(builder.build(), STREAMS_CONFIG);
final IntegrationTestUtils.StateListenerStub listener = new IntegrationTestUtils.StateListenerStub();
streams.setStreamThreadStateListener(listener);
streams.start();
TestUtils.waitForCondition(listener::transitToPendingShutdownSeen, "Did not seen thread state transited to PENDING_SHUTDOWN");
streams.close();
assertTrue(listener.transitToPendingShutdownSeen());
}
use of org.apache.kafka.streams.kstream.ValueJoiner in project kafka by apache.
the class KTableImplTest method shouldPreserveSerdesForOperators.
@Test
public void shouldPreserveSerdesForOperators() {
final StreamsBuilder builder = new StreamsBuilder();
final KTable<String, String> table1 = builder.table("topic-2", stringConsumed);
final ConsumedInternal<String, String> consumedInternal = new ConsumedInternal<>(stringConsumed);
final KeyValueMapper<String, String, String> selector = (key, value) -> key;
final ValueMapper<String, String> mapper = value -> value;
final ValueJoiner<String, String, String> joiner = (value1, value2) -> value1;
final ValueTransformerWithKeySupplier<String, String, String> valueTransformerWithKeySupplier = () -> new ValueTransformerWithKey<String, String, String>() {
@Override
public void init(final ProcessorContext context) {
}
@Override
public String transform(final String key, final String value) {
return value;
}
@Override
public void close() {
}
};
assertEquals(((AbstractStream) table1.filter((key, value) -> false)).keySerde(), consumedInternal.keySerde());
assertEquals(((AbstractStream) table1.filter((key, value) -> false)).valueSerde(), consumedInternal.valueSerde());
assertEquals(((AbstractStream) table1.filter((key, value) -> false, Materialized.with(mySerde, mySerde))).keySerde(), mySerde);
assertEquals(((AbstractStream) table1.filter((key, value) -> false, Materialized.with(mySerde, mySerde))).valueSerde(), mySerde);
assertEquals(((AbstractStream) table1.filterNot((key, value) -> false)).keySerde(), consumedInternal.keySerde());
assertEquals(((AbstractStream) table1.filterNot((key, value) -> false)).valueSerde(), consumedInternal.valueSerde());
assertEquals(((AbstractStream) table1.filterNot((key, value) -> false, Materialized.with(mySerde, mySerde))).keySerde(), mySerde);
assertEquals(((AbstractStream) table1.filterNot((key, value) -> false, Materialized.with(mySerde, mySerde))).valueSerde(), mySerde);
assertEquals(((AbstractStream) table1.mapValues(mapper)).keySerde(), consumedInternal.keySerde());
assertNull(((AbstractStream) table1.mapValues(mapper)).valueSerde());
assertEquals(((AbstractStream) table1.mapValues(mapper, Materialized.with(mySerde, mySerde))).keySerde(), mySerde);
assertEquals(((AbstractStream) table1.mapValues(mapper, Materialized.with(mySerde, mySerde))).valueSerde(), mySerde);
assertEquals(((AbstractStream) table1.toStream()).keySerde(), consumedInternal.keySerde());
assertEquals(((AbstractStream) table1.toStream()).valueSerde(), consumedInternal.valueSerde());
assertNull(((AbstractStream) table1.toStream(selector)).keySerde());
assertEquals(((AbstractStream) table1.toStream(selector)).valueSerde(), consumedInternal.valueSerde());
assertEquals(((AbstractStream) table1.transformValues(valueTransformerWithKeySupplier)).keySerde(), consumedInternal.keySerde());
assertNull(((AbstractStream) table1.transformValues(valueTransformerWithKeySupplier)).valueSerde());
assertEquals(((AbstractStream) table1.transformValues(valueTransformerWithKeySupplier, Materialized.with(mySerde, mySerde))).keySerde(), mySerde);
assertEquals(((AbstractStream) table1.transformValues(valueTransformerWithKeySupplier, Materialized.with(mySerde, mySerde))).valueSerde(), mySerde);
assertNull(((AbstractStream) table1.groupBy(KeyValue::new)).keySerde());
assertNull(((AbstractStream) table1.groupBy(KeyValue::new)).valueSerde());
assertEquals(((AbstractStream) table1.groupBy(KeyValue::new, Grouped.with(mySerde, mySerde))).keySerde(), mySerde);
assertEquals(((AbstractStream) table1.groupBy(KeyValue::new, Grouped.with(mySerde, mySerde))).valueSerde(), mySerde);
assertEquals(((AbstractStream) table1.join(table1, joiner)).keySerde(), consumedInternal.keySerde());
assertNull(((AbstractStream) table1.join(table1, joiner)).valueSerde());
assertEquals(((AbstractStream) table1.join(table1, joiner, Materialized.with(mySerde, mySerde))).keySerde(), mySerde);
assertEquals(((AbstractStream) table1.join(table1, joiner, Materialized.with(mySerde, mySerde))).valueSerde(), mySerde);
assertEquals(((AbstractStream) table1.leftJoin(table1, joiner)).keySerde(), consumedInternal.keySerde());
assertNull(((AbstractStream) table1.leftJoin(table1, joiner)).valueSerde());
assertEquals(((AbstractStream) table1.leftJoin(table1, joiner, Materialized.with(mySerde, mySerde))).keySerde(), mySerde);
assertEquals(((AbstractStream) table1.leftJoin(table1, joiner, Materialized.with(mySerde, mySerde))).valueSerde(), mySerde);
assertEquals(((AbstractStream) table1.outerJoin(table1, joiner)).keySerde(), consumedInternal.keySerde());
assertNull(((AbstractStream) table1.outerJoin(table1, joiner)).valueSerde());
assertEquals(((AbstractStream) table1.outerJoin(table1, joiner, Materialized.with(mySerde, mySerde))).keySerde(), mySerde);
assertEquals(((AbstractStream) table1.outerJoin(table1, joiner, Materialized.with(mySerde, mySerde))).valueSerde(), mySerde);
}
use of org.apache.kafka.streams.kstream.ValueJoiner in project kafka by apache.
the class KTableKTableForeignKeyInnerJoinMultiIntegrationTest method prepareTopology.
private static KafkaStreams prepareTopology(final String queryableName, final String queryableNameTwo, final Properties streamsConfig) {
final UniqueTopicSerdeScope serdeScope = new UniqueTopicSerdeScope();
final StreamsBuilder builder = new StreamsBuilder();
final KTable<Integer, Float> table1 = builder.table(TABLE_1, Consumed.with(serdeScope.decorateSerde(Serdes.Integer(), streamsConfig, true), serdeScope.decorateSerde(Serdes.Float(), streamsConfig, false)));
final KTable<String, Long> table2 = builder.table(TABLE_2, Consumed.with(serdeScope.decorateSerde(Serdes.String(), streamsConfig, true), serdeScope.decorateSerde(Serdes.Long(), streamsConfig, false)));
final KTable<Integer, String> table3 = builder.table(TABLE_3, Consumed.with(serdeScope.decorateSerde(Serdes.Integer(), streamsConfig, true), serdeScope.decorateSerde(Serdes.String(), streamsConfig, false)));
final Materialized<Integer, String, KeyValueStore<Bytes, byte[]>> materialized;
if (queryableName != null) {
materialized = Materialized.<Integer, String, KeyValueStore<Bytes, byte[]>>as(queryableName).withKeySerde(serdeScope.decorateSerde(Serdes.Integer(), streamsConfig, true)).withValueSerde(serdeScope.decorateSerde(Serdes.String(), streamsConfig, false)).withCachingDisabled();
} else {
throw new RuntimeException("Current implementation of joinOnForeignKey requires a materialized store");
}
final Materialized<Integer, String, KeyValueStore<Bytes, byte[]>> materializedTwo;
if (queryableNameTwo != null) {
materializedTwo = Materialized.<Integer, String, KeyValueStore<Bytes, byte[]>>as(queryableNameTwo).withKeySerde(serdeScope.decorateSerde(Serdes.Integer(), streamsConfig, true)).withValueSerde(serdeScope.decorateSerde(Serdes.String(), streamsConfig, false)).withCachingDisabled();
} else {
throw new RuntimeException("Current implementation of joinOnForeignKey requires a materialized store");
}
final Function<Float, String> tableOneKeyExtractor = value -> Integer.toString((int) value.floatValue());
final Function<String, Integer> joinedTableKeyExtractor = value -> {
// Hardwired to return the desired foreign key as a test shortcut
if (value.contains("value2=10"))
return 10;
else
return 0;
};
final ValueJoiner<Float, Long, String> joiner = (value1, value2) -> "value1=" + value1 + ",value2=" + value2;
final ValueJoiner<String, String, String> joinerTwo = (value1, value2) -> value1 + ",value3=" + value2;
table1.join(table2, tableOneKeyExtractor, joiner, materialized).join(table3, joinedTableKeyExtractor, joinerTwo, materializedTwo).toStream().to(OUTPUT, Produced.with(serdeScope.decorateSerde(Serdes.Integer(), streamsConfig, true), serdeScope.decorateSerde(Serdes.String(), streamsConfig, false)));
return new KafkaStreams(builder.build(streamsConfig), streamsConfig);
}
use of org.apache.kafka.streams.kstream.ValueJoiner in project kafka by apache.
the class KTableKTableForeignKeyJoinIntegrationTest method getTopology.
private static Topology getTopology(final Properties streamsConfig, final String queryableStoreName, final boolean leftJoin, final boolean rejoin) {
final UniqueTopicSerdeScope serdeScope = new UniqueTopicSerdeScope();
final StreamsBuilder builder = new StreamsBuilder();
final KTable<String, String> left = builder.table(LEFT_TABLE, Consumed.with(serdeScope.decorateSerde(Serdes.String(), streamsConfig, true), serdeScope.decorateSerde(Serdes.String(), streamsConfig, false)));
final KTable<String, String> right = builder.table(RIGHT_TABLE, Consumed.with(serdeScope.decorateSerde(Serdes.String(), streamsConfig, true), serdeScope.decorateSerde(Serdes.String(), streamsConfig, false)));
final Function<String, String> extractor = value -> value.split("\\|")[1];
final ValueJoiner<String, String, String> joiner = (value1, value2) -> "(" + value1 + "," + value2 + ")";
final ValueJoiner<String, String, String> rejoiner = rejoin ? (value1, value2) -> "rejoin(" + value1 + "," + value2 + ")" : null;
// the cache suppresses some of the unnecessary tombstones we want to make assertions about
final Materialized<String, String, KeyValueStore<Bytes, byte[]>> mainMaterialized = queryableStoreName == null ? Materialized.<String, String, KeyValueStore<Bytes, byte[]>>with(null, serdeScope.decorateSerde(Serdes.String(), streamsConfig, false)).withCachingDisabled() : Materialized.<String, String>as(Stores.inMemoryKeyValueStore(queryableStoreName)).withValueSerde(serdeScope.decorateSerde(Serdes.String(), streamsConfig, false)).withCachingDisabled();
final Materialized<String, String, KeyValueStore<Bytes, byte[]>> rejoinMaterialized = !rejoin ? null : queryableStoreName == null ? Materialized.with(null, serdeScope.decorateSerde(Serdes.String(), streamsConfig, false)) : // to really test this confuguration
Materialized.<String, String>as(Stores.inMemoryKeyValueStore(queryableStoreName + "-rejoin")).withValueSerde(serdeScope.decorateSerde(Serdes.String(), streamsConfig, false)).withCachingDisabled();
if (leftJoin) {
final KTable<String, String> fkJoin = left.leftJoin(right, extractor, joiner, mainMaterialized);
fkJoin.toStream().to(OUTPUT);
// also make sure the FK join is set up right for downstream operations that require materialization
if (rejoin) {
fkJoin.leftJoin(left, rejoiner, rejoinMaterialized).toStream().to(REJOIN_OUTPUT);
}
} else {
final KTable<String, String> fkJoin = left.join(right, extractor, joiner, mainMaterialized);
fkJoin.toStream().to(OUTPUT);
// also make sure the FK join is set up right for downstream operations that require materialization
if (rejoin) {
fkJoin.join(left, rejoiner, rejoinMaterialized).toStream().to(REJOIN_OUTPUT);
}
}
return builder.build(streamsConfig);
}
use of org.apache.kafka.streams.kstream.ValueJoiner in project kafka by apache.
the class KStreamImplTest method shouldSupportForeignKeyTableTableJoinWithKTableFromKStream.
@Test
public void shouldSupportForeignKeyTableTableJoinWithKTableFromKStream() {
final Consumed<String, String> consumed = Consumed.with(Serdes.String(), Serdes.String());
final StreamsBuilder builder = new StreamsBuilder();
final String input1 = "input1";
final String input2 = "input2";
final String output = "output";
final KTable<String, String> leftTable = builder.stream(input1, consumed).toTable();
final KTable<String, String> rightTable = builder.stream(input2, consumed).toTable();
final Function<String, String> extractor = value -> value.split("\\|")[1];
final ValueJoiner<String, String, String> joiner = (value1, value2) -> "(" + value1 + "," + value2 + ")";
leftTable.join(rightTable, extractor, joiner).toStream().to(output);
final Topology topology = builder.build(props);
final String topologyDescription = topology.describe().toString();
assertThat(topologyDescription, equalTo("Topologies:\n" + " Sub-topology: 0\n" + " Source: KTABLE-SOURCE-0000000016 (topics: [KTABLE-FK-JOIN-SUBSCRIPTION-RESPONSE-0000000014-topic])\n" + " --> KTABLE-FK-JOIN-SUBSCRIPTION-RESPONSE-RESOLVER-PROCESSOR-0000000017\n" + " Source: KSTREAM-SOURCE-0000000000 (topics: [input1])\n" + " --> KSTREAM-TOTABLE-0000000001\n" + " Processor: KTABLE-FK-JOIN-SUBSCRIPTION-RESPONSE-RESOLVER-PROCESSOR-0000000017 (stores: [KSTREAM-TOTABLE-STATE-STORE-0000000002])\n" + " --> KTABLE-FK-JOIN-OUTPUT-0000000018\n" + " <-- KTABLE-SOURCE-0000000016\n" + " Processor: KSTREAM-TOTABLE-0000000001 (stores: [KSTREAM-TOTABLE-STATE-STORE-0000000002])\n" + " --> KTABLE-FK-JOIN-SUBSCRIPTION-REGISTRATION-0000000007\n" + " <-- KSTREAM-SOURCE-0000000000\n" + " Processor: KTABLE-FK-JOIN-OUTPUT-0000000018 (stores: [])\n" + " --> KTABLE-TOSTREAM-0000000020\n" + " <-- KTABLE-FK-JOIN-SUBSCRIPTION-RESPONSE-RESOLVER-PROCESSOR-0000000017\n" + " Processor: KTABLE-FK-JOIN-SUBSCRIPTION-REGISTRATION-0000000007 (stores: [])\n" + " --> KTABLE-SINK-0000000008\n" + " <-- KSTREAM-TOTABLE-0000000001\n" + " Processor: KTABLE-TOSTREAM-0000000020 (stores: [])\n" + " --> KSTREAM-SINK-0000000021\n" + " <-- KTABLE-FK-JOIN-OUTPUT-0000000018\n" + " Sink: KSTREAM-SINK-0000000021 (topic: output)\n" + " <-- KTABLE-TOSTREAM-0000000020\n" + " Sink: KTABLE-SINK-0000000008 (topic: KTABLE-FK-JOIN-SUBSCRIPTION-REGISTRATION-0000000006-topic)\n" + " <-- KTABLE-FK-JOIN-SUBSCRIPTION-REGISTRATION-0000000007\n" + "\n" + " Sub-topology: 1\n" + " Source: KSTREAM-SOURCE-0000000003 (topics: [input2])\n" + " --> KSTREAM-TOTABLE-0000000004\n" + " Source: KTABLE-SOURCE-0000000009 (topics: [KTABLE-FK-JOIN-SUBSCRIPTION-REGISTRATION-0000000006-topic])\n" + " --> KTABLE-FK-JOIN-SUBSCRIPTION-PROCESSOR-0000000011\n" + " Processor: KSTREAM-TOTABLE-0000000004 (stores: [KSTREAM-TOTABLE-STATE-STORE-0000000005])\n" + " --> KTABLE-FK-JOIN-SUBSCRIPTION-PROCESSOR-0000000013\n" + " <-- KSTREAM-SOURCE-0000000003\n" + " Processor: KTABLE-FK-JOIN-SUBSCRIPTION-PROCESSOR-0000000011 (stores: [KTABLE-FK-JOIN-SUBSCRIPTION-STATE-STORE-0000000010])\n" + " --> KTABLE-FK-JOIN-SUBSCRIPTION-PROCESSOR-0000000012\n" + " <-- KTABLE-SOURCE-0000000009\n" + " Processor: KTABLE-FK-JOIN-SUBSCRIPTION-PROCESSOR-0000000012 (stores: [KSTREAM-TOTABLE-STATE-STORE-0000000005])\n" + " --> KTABLE-SINK-0000000015\n" + " <-- KTABLE-FK-JOIN-SUBSCRIPTION-PROCESSOR-0000000011\n" + " Processor: KTABLE-FK-JOIN-SUBSCRIPTION-PROCESSOR-0000000013 (stores: [KTABLE-FK-JOIN-SUBSCRIPTION-STATE-STORE-0000000010])\n" + " --> KTABLE-SINK-0000000015\n" + " <-- KSTREAM-TOTABLE-0000000004\n" + " Sink: KTABLE-SINK-0000000015 (topic: KTABLE-FK-JOIN-SUBSCRIPTION-RESPONSE-0000000014-topic)\n" + " <-- KTABLE-FK-JOIN-SUBSCRIPTION-PROCESSOR-0000000012, KTABLE-FK-JOIN-SUBSCRIPTION-PROCESSOR-0000000013\n\n"));
try (final TopologyTestDriver driver = new TopologyTestDriver(topology, props)) {
final TestInputTopic<String, String> left = driver.createInputTopic(input1, new StringSerializer(), new StringSerializer());
final TestInputTopic<String, String> right = driver.createInputTopic(input2, new StringSerializer(), new StringSerializer());
final TestOutputTopic<String, String> outputTopic = driver.createOutputTopic(output, new StringDeserializer(), new StringDeserializer());
// Pre-populate the RHS records. This test is all about what happens when we add/remove LHS records
right.pipeInput("rhs1", "rhsValue1");
right.pipeInput("rhs2", "rhsValue2");
// this unreferenced FK won't show up in any results
right.pipeInput("rhs3", "rhsValue3");
assertThat(outputTopic.readKeyValuesToMap(), is(emptyMap()));
left.pipeInput("lhs1", "lhsValue1|rhs1");
left.pipeInput("lhs2", "lhsValue2|rhs2");
final Map<String, String> expected = mkMap(mkEntry("lhs1", "(lhsValue1|rhs1,rhsValue1)"), mkEntry("lhs2", "(lhsValue2|rhs2,rhsValue2)"));
assertThat(outputTopic.readKeyValuesToMap(), is(expected));
// Add another reference to an existing FK
left.pipeInput("lhs3", "lhsValue3|rhs1");
assertThat(outputTopic.readKeyValuesToMap(), is(mkMap(mkEntry("lhs3", "(lhsValue3|rhs1,rhsValue1)"))));
left.pipeInput("lhs1", (String) null);
assertThat(outputTopic.readKeyValuesToMap(), is(mkMap(mkEntry("lhs1", null))));
}
}
Aggregations