use of org.apache.kafka.streams.kstream.GlobalKTable in project kafka-streams-examples by confluentinc.
the class EmailService method processStreams.
private KafkaStreams processStreams(final String bootstrapServers, final String stateDir) {
KStreamBuilder builder = new KStreamBuilder();
// Create the streams/tables for the join
KStream<String, Order> orders = builder.stream(ORDERS.keySerde(), ORDERS.valueSerde(), ORDERS.name());
KStream<String, Payment> payments = builder.stream(PAYMENTS.keySerde(), PAYMENTS.valueSerde(), PAYMENTS.name());
GlobalKTable<Long, Customer> customers = builder.globalTable(CUSTOMERS.keySerde(), CUSTOMERS.valueSerde(), CUSTOMERS.name());
// Rekey payments to be by OrderId for the windowed join
payments = payments.selectKey((s, payment) -> payment.getOrderId());
// Join the two streams and the table then send an email for each
orders.join(payments, EmailTuple::new, // Join Orders and Payments streams
JoinWindows.of(1 * MIN), serdes).join(customers, (key1, tuple) -> tuple.order.getCustomerId(), // note how, because we use a GKtable, we can join on any attribute of the Customer.
(tuple, customer) -> tuple.setCustomer(customer)).peek((key, emailTuple) -> emailer.sendEmail(emailTuple));
return new KafkaStreams(builder, baseStreamsConfig(bootstrapServers, stateDir, APP_ID));
}
use of org.apache.kafka.streams.kstream.GlobalKTable in project kafka by apache.
the class InternalStreamsBuilderTest method shouldAddGlobalTablesToEachGroup.
@Test
public void shouldAddGlobalTablesToEachGroup() {
final String one = "globalTable";
final String two = "globalTable2";
final MaterializedInternal<String, String, KeyValueStore<Bytes, byte[]>> materializedInternal = new MaterializedInternal<>(Materialized.as(one), builder, storePrefix);
final GlobalKTable<String, String> globalTable = builder.globalTable("table", consumed, materializedInternal);
final MaterializedInternal<String, String, KeyValueStore<Bytes, byte[]>> materializedInternal2 = new MaterializedInternal<>(Materialized.as(two), builder, storePrefix);
final GlobalKTable<String, String> globalTable2 = builder.globalTable("table2", consumed, materializedInternal2);
final MaterializedInternal<String, String, KeyValueStore<Bytes, byte[]>> materializedInternalNotGlobal = new MaterializedInternal<>(Materialized.as("not-global"), builder, storePrefix);
builder.table("not-global", consumed, materializedInternalNotGlobal);
final KeyValueMapper<String, String, String> kvMapper = (key, value) -> value;
final KStream<String, String> stream = builder.stream(Collections.singleton("t1"), consumed);
stream.leftJoin(globalTable, kvMapper, MockValueJoiner.TOSTRING_JOINER);
final KStream<String, String> stream2 = builder.stream(Collections.singleton("t2"), consumed);
stream2.leftJoin(globalTable2, kvMapper, MockValueJoiner.TOSTRING_JOINER);
final Map<Integer, Set<String>> nodeGroups = builder.internalTopologyBuilder.nodeGroups();
for (final Integer groupId : nodeGroups.keySet()) {
final ProcessorTopology topology = builder.internalTopologyBuilder.buildSubtopology(groupId);
final List<StateStore> stateStores = topology.globalStateStores();
final Set<String> names = new HashSet<>();
for (final StateStore stateStore : stateStores) {
names.add(stateStore.name());
}
assertEquals(2, stateStores.size());
assertTrue(names.contains(one));
assertTrue(names.contains(two));
}
}
use of org.apache.kafka.streams.kstream.GlobalKTable in project kafka by apache.
the class KStreamImplTest method shouldPreserveSerdesForOperators.
// specifically testing the deprecated variant
@SuppressWarnings({ "rawtypes", "deprecation" })
@Test
public void shouldPreserveSerdesForOperators() {
final StreamsBuilder builder = new StreamsBuilder();
final KStream<String, String> stream1 = builder.stream(Collections.singleton("topic-1"), stringConsumed);
final KTable<String, String> table1 = builder.table("topic-2", stringConsumed);
final GlobalKTable<String, String> table2 = builder.globalTable("topic-2", stringConsumed);
final ConsumedInternal<String, String> consumedInternal = new ConsumedInternal<>(stringConsumed);
final KeyValueMapper<String, String, String> selector = (key, value) -> key;
final KeyValueMapper<String, String, Iterable<KeyValue<String, String>>> flatSelector = (key, value) -> Collections.singleton(new KeyValue<>(key, value));
final ValueMapper<String, String> mapper = value -> value;
final ValueMapper<String, Iterable<String>> flatMapper = Collections::singleton;
final ValueJoiner<String, String, String> joiner = (value1, value2) -> value1;
assertEquals(((AbstractStream) stream1.filter((key, value) -> false)).keySerde(), consumedInternal.keySerde());
assertEquals(((AbstractStream) stream1.filter((key, value) -> false)).valueSerde(), consumedInternal.valueSerde());
assertEquals(((AbstractStream) stream1.filterNot((key, value) -> false)).keySerde(), consumedInternal.keySerde());
assertEquals(((AbstractStream) stream1.filterNot((key, value) -> false)).valueSerde(), consumedInternal.valueSerde());
assertNull(((AbstractStream) stream1.selectKey(selector)).keySerde());
assertEquals(((AbstractStream) stream1.selectKey(selector)).valueSerde(), consumedInternal.valueSerde());
assertNull(((AbstractStream) stream1.map(KeyValue::new)).keySerde());
assertNull(((AbstractStream) stream1.map(KeyValue::new)).valueSerde());
assertEquals(((AbstractStream) stream1.mapValues(mapper)).keySerde(), consumedInternal.keySerde());
assertNull(((AbstractStream) stream1.mapValues(mapper)).valueSerde());
assertNull(((AbstractStream) stream1.flatMap(flatSelector)).keySerde());
assertNull(((AbstractStream) stream1.flatMap(flatSelector)).valueSerde());
assertEquals(((AbstractStream) stream1.flatMapValues(flatMapper)).keySerde(), consumedInternal.keySerde());
assertNull(((AbstractStream) stream1.flatMapValues(flatMapper)).valueSerde());
assertNull(((AbstractStream) stream1.transform(transformerSupplier)).keySerde());
assertNull(((AbstractStream) stream1.transform(transformerSupplier)).valueSerde());
assertEquals(((AbstractStream) stream1.transformValues(valueTransformerSupplier)).keySerde(), consumedInternal.keySerde());
assertNull(((AbstractStream) stream1.transformValues(valueTransformerSupplier)).valueSerde());
assertNull(((AbstractStream) stream1.merge(stream1)).keySerde());
assertNull(((AbstractStream) stream1.merge(stream1)).valueSerde());
assertEquals(((AbstractStream) stream1.through("topic-3")).keySerde(), consumedInternal.keySerde());
assertEquals(((AbstractStream) stream1.through("topic-3")).valueSerde(), consumedInternal.valueSerde());
assertEquals(((AbstractStream) stream1.through("topic-3", Produced.with(mySerde, mySerde))).keySerde(), mySerde);
assertEquals(((AbstractStream) stream1.through("topic-3", Produced.with(mySerde, mySerde))).valueSerde(), mySerde);
assertEquals(((AbstractStream) stream1.repartition()).keySerde(), consumedInternal.keySerde());
assertEquals(((AbstractStream) stream1.repartition()).valueSerde(), consumedInternal.valueSerde());
assertEquals(((AbstractStream) stream1.repartition(Repartitioned.with(mySerde, mySerde))).keySerde(), mySerde);
assertEquals(((AbstractStream) stream1.repartition(Repartitioned.with(mySerde, mySerde))).valueSerde(), mySerde);
assertEquals(((AbstractStream) stream1.groupByKey()).keySerde(), consumedInternal.keySerde());
assertEquals(((AbstractStream) stream1.groupByKey()).valueSerde(), consumedInternal.valueSerde());
assertEquals(((AbstractStream) stream1.groupByKey(Grouped.with(mySerde, mySerde))).keySerde(), mySerde);
assertEquals(((AbstractStream) stream1.groupByKey(Grouped.with(mySerde, mySerde))).valueSerde(), mySerde);
assertNull(((AbstractStream) stream1.groupBy(selector)).keySerde());
assertEquals(((AbstractStream) stream1.groupBy(selector)).valueSerde(), consumedInternal.valueSerde());
assertEquals(((AbstractStream) stream1.groupBy(selector, Grouped.with(mySerde, mySerde))).keySerde(), mySerde);
assertEquals(((AbstractStream) stream1.groupBy(selector, Grouped.with(mySerde, mySerde))).valueSerde(), mySerde);
assertNull(((AbstractStream) stream1.join(stream1, joiner, JoinWindows.of(Duration.ofMillis(100L)))).keySerde());
assertNull(((AbstractStream) stream1.join(stream1, joiner, JoinWindows.of(Duration.ofMillis(100L)))).valueSerde());
assertEquals(((AbstractStream) stream1.join(stream1, joiner, JoinWindows.of(Duration.ofMillis(100L)), StreamJoined.with(mySerde, mySerde, mySerde))).keySerde(), mySerde);
assertNull(((AbstractStream) stream1.join(stream1, joiner, JoinWindows.of(Duration.ofMillis(100L)), StreamJoined.with(mySerde, mySerde, mySerde))).valueSerde());
assertNull(((AbstractStream) stream1.leftJoin(stream1, joiner, JoinWindows.of(Duration.ofMillis(100L)))).keySerde());
assertNull(((AbstractStream) stream1.leftJoin(stream1, joiner, JoinWindows.of(Duration.ofMillis(100L)))).valueSerde());
assertEquals(((AbstractStream) stream1.leftJoin(stream1, joiner, JoinWindows.of(Duration.ofMillis(100L)), StreamJoined.with(mySerde, mySerde, mySerde))).keySerde(), mySerde);
assertNull(((AbstractStream) stream1.leftJoin(stream1, joiner, JoinWindows.of(Duration.ofMillis(100L)), StreamJoined.with(mySerde, mySerde, mySerde))).valueSerde());
assertNull(((AbstractStream) stream1.outerJoin(stream1, joiner, JoinWindows.of(Duration.ofMillis(100L)))).keySerde());
assertNull(((AbstractStream) stream1.outerJoin(stream1, joiner, JoinWindows.of(Duration.ofMillis(100L)))).valueSerde());
assertEquals(((AbstractStream) stream1.outerJoin(stream1, joiner, JoinWindows.of(Duration.ofMillis(100L)), StreamJoined.with(mySerde, mySerde, mySerde))).keySerde(), mySerde);
assertNull(((AbstractStream) stream1.outerJoin(stream1, joiner, JoinWindows.of(Duration.ofMillis(100L)), StreamJoined.with(mySerde, mySerde, mySerde))).valueSerde());
assertEquals(((AbstractStream) stream1.join(table1, joiner)).keySerde(), consumedInternal.keySerde());
assertNull(((AbstractStream) stream1.join(table1, joiner)).valueSerde());
assertEquals(((AbstractStream) stream1.join(table1, joiner, Joined.with(mySerde, mySerde, mySerde))).keySerde(), mySerde);
assertNull(((AbstractStream) stream1.join(table1, joiner, Joined.with(mySerde, mySerde, mySerde))).valueSerde());
assertEquals(((AbstractStream) stream1.leftJoin(table1, joiner)).keySerde(), consumedInternal.keySerde());
assertNull(((AbstractStream) stream1.leftJoin(table1, joiner)).valueSerde());
assertEquals(((AbstractStream) stream1.leftJoin(table1, joiner, Joined.with(mySerde, mySerde, mySerde))).keySerde(), mySerde);
assertNull(((AbstractStream) stream1.leftJoin(table1, joiner, Joined.with(mySerde, mySerde, mySerde))).valueSerde());
assertEquals(((AbstractStream) stream1.join(table2, selector, joiner)).keySerde(), consumedInternal.keySerde());
assertNull(((AbstractStream) stream1.join(table2, selector, joiner)).valueSerde());
assertEquals(((AbstractStream) stream1.leftJoin(table2, selector, joiner)).keySerde(), consumedInternal.keySerde());
assertNull(((AbstractStream) stream1.leftJoin(table2, selector, joiner)).valueSerde());
}
use of org.apache.kafka.streams.kstream.GlobalKTable in project kafka by apache.
the class KStreamImplTest method shouldPropagateRepartitionFlagAfterGlobalKTableJoin.
@Test
public void shouldPropagateRepartitionFlagAfterGlobalKTableJoin() {
final StreamsBuilder builder = new StreamsBuilder();
final GlobalKTable<String, String> globalKTable = builder.globalTable("globalTopic");
final KeyValueMapper<String, String, String> kvMappper = (k, v) -> k + v;
final ValueJoiner<String, String, String> valueJoiner = (v1, v2) -> v1 + v2;
builder.<String, String>stream("topic").selectKey((k, v) -> v).join(globalKTable, kvMappper, valueJoiner).groupByKey().count();
final Pattern repartitionTopicPattern = Pattern.compile("Sink: .*-repartition");
final String topology = builder.build().describe().toString();
final Matcher matcher = repartitionTopicPattern.matcher(topology);
assertTrue(matcher.find());
final String match = matcher.group();
assertThat(match, notNullValue());
assertTrue(match.endsWith("repartition"));
}
Aggregations