use of io.confluent.examples.streams.avro.microservices.OrderValue in project kafka-streams-examples by confluentinc.
the class FraudService method processStreams.
private KafkaStreams processStreams(final String bootstrapServers, final String stateDir) {
// Latch onto instances of the orders and inventory topics
StreamsBuilder builder = new StreamsBuilder();
KStream<String, Order> orders = builder.stream(ORDERS.name(), Consumed.with(ORDERS.keySerde(), ORDERS.valueSerde())).filter((id, order) -> OrderState.CREATED.equals(order.getState()));
// Create an aggregate of the total value by customer and hold it with the order. We use session windows to
// detect periods of activity.
KTable<Windowed<Long>, OrderValue> aggregate = orders.groupBy((id, order) -> order.getCustomerId(), Serialized.with(Serdes.Long(), ORDERS.valueSerde())).windowedBy(SessionWindows.with(60 * MIN)).aggregate(OrderValue::new, // Calculate running total for each customer within this window
(custId, order, total) -> new OrderValue(order, total.getValue() + order.getQuantity() * order.getPrice()), // include a merger as we're using session windows.
(k, a, b) -> simpleMerge(a, b), Materialized.with(null, Schemas.ORDER_VALUE_SERDE));
// Ditch the windowing and rekey
KStream<String, OrderValue> ordersWithTotals = aggregate.toStream((windowedKey, orderValue) -> windowedKey.key()).filter(// When elements are evicted from a session window they create delete events. Filter these out.
(k, v) -> v != null).selectKey((id, orderValue) -> orderValue.getOrder().getId());
// Now branch the stream into two, for pass and fail, based on whether the windowed total is over Fraud Limit
KStream<String, OrderValue>[] forks = ordersWithTotals.branch((id, orderValue) -> orderValue.getValue() >= FRAUD_LIMIT, (id, orderValue) -> orderValue.getValue() < FRAUD_LIMIT);
forks[0].mapValues(orderValue -> new OrderValidation(orderValue.getOrder().getId(), FRAUD_CHECK, FAIL)).to(ORDER_VALIDATIONS.name(), Produced.with(ORDER_VALIDATIONS.keySerde(), ORDER_VALIDATIONS.valueSerde()));
forks[1].mapValues(orderValue -> new OrderValidation(orderValue.getOrder().getId(), FRAUD_CHECK, PASS)).to(ORDER_VALIDATIONS.name(), Produced.with(ORDER_VALIDATIONS.keySerde(), ORDER_VALIDATIONS.valueSerde()));
// disable caching to ensure a complete aggregate changelog. This is a little trick we need to apply
// as caching in Kafka Streams will conflate subsequent updates for the same key. Disabling caching ensures
// we get a complete "changelog" from the aggregate(...) step above (i.e. every input event will have a
// corresponding output event.
Properties props = baseStreamsConfig(bootstrapServers, stateDir, FRAUD_SERVICE_APP_ID);
props.setProperty(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, "0");
return new KafkaStreams(builder.build(), props);
}
Aggregations