use of io.confluent.examples.streams.microservices.util.MicroserviceUtils.MIN in project kafka-streams-examples by confluentinc.
the class ValidationsAggregatorService method aggregateOrderValidations.
private KafkaStreams aggregateOrderValidations(String bootstrapServers, String stateDir) {
// TODO put into a KTable to make dynamically configurable
final int numberOfRules = 3;
StreamsBuilder builder = new StreamsBuilder();
KStream<String, OrderValidation> validations = builder.stream(ORDER_VALIDATIONS.name(), serdes1);
KStream<String, Order> orders = builder.stream(ORDERS.name(), serdes2).filter((id, order) -> OrderState.CREATED.equals(order.getState()));
// If all rules pass then validate the order
validations.groupByKey(serdes3).windowedBy(SessionWindows.with(5 * MIN)).aggregate(() -> 0L, (id, result, total) -> PASS.equals(result.getValidationResult()) ? total + 1 : total, // include a merger as we're using session windows.
(k, a, b) -> b == null ? a : b, Materialized.with(null, Serdes.Long())).toStream((windowedKey, total) -> windowedKey.key()).filter((k1, v) -> v != null).filter((k, total) -> total >= numberOfRules).join(orders, (id, order) -> newBuilder(order).setState(VALIDATED).build(), JoinWindows.of(5 * MIN), serdes4).to(ORDERS.name(), serdes5);
// If any rule fails then fail the order
validations.filter((id, rule) -> FAIL.equals(rule.getValidationResult())).join(orders, (id, order) -> newBuilder(order).setState(OrderState.FAILED).build(), JoinWindows.of(5 * MIN), serdes7).groupByKey(serdes6).reduce((order, v1) -> order).toStream().to(ORDERS.name(), Produced.with(ORDERS.keySerde(), ORDERS.valueSerde()));
return new KafkaStreams(builder.build(), baseStreamsConfig(bootstrapServers, stateDir, ORDERS_SERVICE_APP_ID));
}
use of io.confluent.examples.streams.microservices.util.MicroserviceUtils.MIN in project kafka-streams-examples by confluentinc.
the class EmailService method processStreams.
private KafkaStreams processStreams(final String bootstrapServers, final String stateDir) {
KStreamBuilder builder = new KStreamBuilder();
// Create the streams/tables for the join
KStream<String, Order> orders = builder.stream(ORDERS.keySerde(), ORDERS.valueSerde(), ORDERS.name());
KStream<String, Payment> payments = builder.stream(PAYMENTS.keySerde(), PAYMENTS.valueSerde(), PAYMENTS.name());
GlobalKTable<Long, Customer> customers = builder.globalTable(CUSTOMERS.keySerde(), CUSTOMERS.valueSerde(), CUSTOMERS.name());
// Rekey payments to be by OrderId for the windowed join
payments = payments.selectKey((s, payment) -> payment.getOrderId());
// Join the two streams and the table then send an email for each
orders.join(payments, EmailTuple::new, // Join Orders and Payments streams
JoinWindows.of(1 * MIN), serdes).join(customers, (key1, tuple) -> tuple.order.getCustomerId(), // note how, because we use a GKtable, we can join on any attribute of the Customer.
(tuple, customer) -> tuple.setCustomer(customer)).peek((key, emailTuple) -> emailer.sendEmail(emailTuple));
return new KafkaStreams(builder, baseStreamsConfig(bootstrapServers, stateDir, APP_ID));
}
Aggregations