Search in sources :

Example 6 with Order

use of io.confluent.examples.streams.avro.microservices.Order in project kafka-streams-examples by confluentinc.

the class OrdersService method fetchLocal.

/**
 * Fetch the order from the local materialized view
 *
 * @param id ID to fetch
 * @param asyncResponse the response to call once completed
 * @param predicate a filter that for this fetch, so for example we might fetch only VALIDATED
 * orders.
 */
private void fetchLocal(String id, AsyncResponse asyncResponse, Predicate<String, Order> predicate) {
    log.info("running GET on this node");
    try {
        Order order = ordersStore().get(id);
        if (order == null || !predicate.test(id, order)) {
            log.info("Delaying get as order not present for id " + id);
            outstandingRequests.put(id, new FilteredResponse<>(asyncResponse, predicate));
        } else {
            asyncResponse.resume(toBean(order));
        }
    } catch (InvalidStateStoreException e) {
        // Store not ready so delay
        outstandingRequests.put(id, new FilteredResponse<>(asyncResponse, predicate));
    }
}
Also used : Order(io.confluent.examples.streams.avro.microservices.Order) InvalidStateStoreException(org.apache.kafka.streams.errors.InvalidStateStoreException)

Example 7 with Order

use of io.confluent.examples.streams.avro.microservices.Order in project kafka-streams-examples by confluentinc.

the class ValidationsAggregatorService method aggregateOrderValidations.

private KafkaStreams aggregateOrderValidations(String bootstrapServers, String stateDir) {
    // TODO put into a KTable to make dynamically configurable
    final int numberOfRules = 3;
    StreamsBuilder builder = new StreamsBuilder();
    KStream<String, OrderValidation> validations = builder.stream(ORDER_VALIDATIONS.name(), serdes1);
    KStream<String, Order> orders = builder.stream(ORDERS.name(), serdes2).filter((id, order) -> OrderState.CREATED.equals(order.getState()));
    // If all rules pass then validate the order
    validations.groupByKey(serdes3).windowedBy(SessionWindows.with(5 * MIN)).aggregate(() -> 0L, (id, result, total) -> PASS.equals(result.getValidationResult()) ? total + 1 : total, // include a merger as we're using session windows.
    (k, a, b) -> b == null ? a : b, Materialized.with(null, Serdes.Long())).toStream((windowedKey, total) -> windowedKey.key()).filter((k1, v) -> v != null).filter((k, total) -> total >= numberOfRules).join(orders, (id, order) -> newBuilder(order).setState(VALIDATED).build(), JoinWindows.of(5 * MIN), serdes4).to(ORDERS.name(), serdes5);
    // If any rule fails then fail the order
    validations.filter((id, rule) -> FAIL.equals(rule.getValidationResult())).join(orders, (id, order) -> newBuilder(order).setState(OrderState.FAILED).build(), JoinWindows.of(5 * MIN), serdes7).groupByKey(serdes6).reduce((order, v1) -> order).toStream().to(ORDERS.name(), Produced.with(ORDERS.keySerde(), ORDERS.valueSerde()));
    return new KafkaStreams(builder.build(), baseStreamsConfig(bootstrapServers, stateDir, ORDERS_SERVICE_APP_ID));
}
Also used : StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) Order(io.confluent.examples.streams.avro.microservices.Order) MIN(io.confluent.examples.streams.microservices.util.MicroserviceUtils.MIN) Order.newBuilder(io.confluent.examples.streams.avro.microservices.Order.newBuilder) Produced(org.apache.kafka.streams.kstream.Produced) SessionWindows(org.apache.kafka.streams.kstream.SessionWindows) Serialized(org.apache.kafka.streams.kstream.Serialized) LoggerFactory(org.slf4j.LoggerFactory) KStream(org.apache.kafka.streams.kstream.KStream) Joined(org.apache.kafka.streams.kstream.Joined) JoinWindows(org.apache.kafka.streams.kstream.JoinWindows) Consumed(org.apache.kafka.streams.Consumed) VALIDATED(io.confluent.examples.streams.avro.microservices.OrderState.VALIDATED) Serdes(org.apache.kafka.common.serialization.Serdes) ORDER_VALIDATIONS(io.confluent.examples.streams.microservices.domain.Schemas.Topics.ORDER_VALIDATIONS) Order(io.confluent.examples.streams.avro.microservices.Order) MicroserviceUtils.parseArgsAndConfigure(io.confluent.examples.streams.microservices.util.MicroserviceUtils.parseArgsAndConfigure) OrderState(io.confluent.examples.streams.avro.microservices.OrderState) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) Logger(org.slf4j.Logger) ORDERS(io.confluent.examples.streams.microservices.domain.Schemas.Topics.ORDERS) MicroserviceUtils.addShutdownHookAndBlock(io.confluent.examples.streams.microservices.util.MicroserviceUtils.addShutdownHookAndBlock) FAIL(io.confluent.examples.streams.avro.microservices.OrderValidationResult.FAIL) PASS(io.confluent.examples.streams.avro.microservices.OrderValidationResult.PASS) MicroserviceUtils.baseStreamsConfig(io.confluent.examples.streams.microservices.util.MicroserviceUtils.baseStreamsConfig) OrderValidation(io.confluent.examples.streams.avro.microservices.OrderValidation) Materialized(org.apache.kafka.streams.kstream.Materialized) KafkaStreams(org.apache.kafka.streams.KafkaStreams) KafkaStreams(org.apache.kafka.streams.KafkaStreams) OrderValidation(io.confluent.examples.streams.avro.microservices.OrderValidation)

Example 8 with Order

use of io.confluent.examples.streams.avro.microservices.Order in project kafka-streams-examples by confluentinc.

the class FraudServiceTest method shouldValidateWhetherOrderAmountExceedsFraudLimitOverWindow.

@Test
public void shouldValidateWhetherOrderAmountExceedsFraudLimitOverWindow() throws Exception {
    // Given
    fraudService = new FraudService();
    List<Order> orders = asList(new Order(id(0L), 0L, CREATED, UNDERPANTS, 3, 5.00d), new Order(id(1L), 0L, CREATED, JUMPERS, 1, 75.00d), new Order(id(2L), 1L, CREATED, JUMPERS, 1, 75.00d), new Order(id(3L), 1L, CREATED, JUMPERS, 1, 75.00d), // Should fail as over limit
    new Order(id(4L), 1L, CREATED, JUMPERS, 50, 75.00d), // First should pass
    new Order(id(5L), 2L, CREATED, UNDERPANTS, 10, 100.00d), // Second should fail as rolling total by customer is over limit
    new Order(id(6L), 2L, CREATED, UNDERPANTS, 10, 100.00d), // Third should fail as rolling total by customer is still over limit
    new Order(id(7L), 2L, CREATED, UNDERPANTS, 1, 5.00d));
    sendOrders(orders);
    // When
    fraudService.start(CLUSTER.bootstrapServers());
    // Then there should be failures for the two orders that push customers over their limit.
    List<OrderValidation> expected = asList(new OrderValidation(id(0L), FRAUD_CHECK, PASS), new OrderValidation(id(1L), FRAUD_CHECK, PASS), new OrderValidation(id(2L), FRAUD_CHECK, PASS), new OrderValidation(id(3L), FRAUD_CHECK, PASS), new OrderValidation(id(4L), FRAUD_CHECK, FAIL), new OrderValidation(id(5L), FRAUD_CHECK, PASS), new OrderValidation(id(6L), FRAUD_CHECK, FAIL), new OrderValidation(id(7L), FRAUD_CHECK, FAIL));
    List<OrderValidation> read = read(Topics.ORDER_VALIDATIONS, 8, CLUSTER.bootstrapServers());
    assertThat(read).isEqualTo(expected);
}
Also used : Order(io.confluent.examples.streams.avro.microservices.Order) OrderValidation(io.confluent.examples.streams.avro.microservices.OrderValidation) Test(org.junit.Test)

Example 9 with Order

use of io.confluent.examples.streams.avro.microservices.Order in project kafka-streams-examples by confluentinc.

the class OrdersServiceTest method shouldGetValidatedOrderOnRequest.

@Test
public void shouldGetValidatedOrderOnRequest() {
    Order orderV1 = new Order(id(1L), 3L, OrderState.CREATED, Product.JUMPERS, 10, 100d);
    OrderBean beanV1 = OrderBean.toBean(orderV1);
    final Client client = ClientBuilder.newClient();
    // Given a rest service
    rest = new OrdersService("localhost");
    rest.start(CLUSTER.bootstrapServers());
    Paths paths = new Paths("localhost", rest.port());
    // When we post an order
    client.target(paths.urlPost()).request(APPLICATION_JSON_TYPE).post(Entity.json(beanV1));
    // Simulate the order being validated
    MicroserviceTestUtils.sendOrders(Collections.singletonList(newBuilder(orderV1).setState(OrderState.VALIDATED).build()));
    // When we GET the order from the returned location
    OrderBean returnedBean = client.target(paths.urlGetValidated(beanV1.getId())).queryParam("timeout", MIN / 2).request(APPLICATION_JSON_TYPE).get(new GenericType<OrderBean>() {
    });
    // Then status should be Validated
    assertThat(returnedBean.getState()).isEqualTo(OrderState.VALIDATED);
}
Also used : Order(io.confluent.examples.streams.avro.microservices.Order) Paths(io.confluent.examples.streams.microservices.util.Paths) OrderBean(io.confluent.examples.streams.microservices.domain.beans.OrderBean) Client(javax.ws.rs.client.Client) Test(org.junit.Test)

Example 10 with Order

use of io.confluent.examples.streams.avro.microservices.Order in project kafka-streams-examples by confluentinc.

the class ValidationsAggregatorServiceTest method shouldAggregateRuleSuccesses.

@Test
public void shouldAggregateRuleSuccesses() throws Exception {
    // Given
    ordersService = new ValidationsAggregatorService();
    orders = asList(new Order(id(0L), 0L, CREATED, UNDERPANTS, 3, 5.00d), new Order(id(1L), 0L, CREATED, JUMPERS, 1, 75.00d));
    sendOrders(orders);
    ruleResults = asList(new OrderValidation(id(0L), OrderValidationType.FRAUD_CHECK, OrderValidationResult.PASS), new OrderValidation(id(0L), OrderValidationType.ORDER_DETAILS_CHECK, OrderValidationResult.PASS), new OrderValidation(id(0L), OrderValidationType.INVENTORY_CHECK, OrderValidationResult.PASS), new OrderValidation(id(1L), OrderValidationType.FRAUD_CHECK, OrderValidationResult.PASS), new OrderValidation(id(1L), OrderValidationType.ORDER_DETAILS_CHECK, OrderValidationResult.FAIL), new OrderValidation(id(1L), OrderValidationType.INVENTORY_CHECK, OrderValidationResult.PASS));
    sendOrderValuations(ruleResults);
    // When
    ordersService.start(CLUSTER.bootstrapServers());
    // Then
    List<KeyValue<String, Order>> finalOrders = MicroserviceTestUtils.readKeyValues(Topics.ORDERS, 4, CLUSTER.bootstrapServers());
    assertThat(finalOrders.size()).isEqualTo(4);
    // And the first order should have been validated but the second should have failed
    assertThat(finalOrders.stream().map(kv -> kv.value).collect(Collectors.toList())).contains(new Order(id(0L), 0L, VALIDATED, UNDERPANTS, 3, 5.00d), new Order(id(1L), 0L, FAILED, JUMPERS, 1, 75.00d));
}
Also used : Order(io.confluent.examples.streams.avro.microservices.Order) MicroserviceTestUtils(io.confluent.examples.streams.microservices.util.MicroserviceTestUtils) OrderValidationResult(io.confluent.examples.streams.avro.microservices.OrderValidationResult) OrderValidationType(io.confluent.examples.streams.avro.microservices.OrderValidationType) JUMPERS(io.confluent.examples.streams.avro.microservices.Product.JUMPERS) BeforeClass(org.junit.BeforeClass) Assertions.assertThat(org.assertj.core.api.Assertions.assertThat) KeyValue(org.apache.kafka.streams.KeyValue) OrderId.id(io.confluent.examples.streams.microservices.domain.beans.OrderId.id) FAILED(io.confluent.examples.streams.avro.microservices.OrderState.FAILED) Test(org.junit.Test) Collectors(java.util.stream.Collectors) UNDERPANTS(io.confluent.examples.streams.avro.microservices.Product.UNDERPANTS) Topics(io.confluent.examples.streams.microservices.domain.Schemas.Topics) Schemas(io.confluent.examples.streams.microservices.domain.Schemas) List(java.util.List) Arrays.asList(java.util.Arrays.asList) After(org.junit.After) VALIDATED(io.confluent.examples.streams.avro.microservices.OrderState.VALIDATED) OrderValidation(io.confluent.examples.streams.avro.microservices.OrderValidation) CREATED(io.confluent.examples.streams.avro.microservices.OrderState.CREATED) Order(io.confluent.examples.streams.avro.microservices.Order) KeyValue(org.apache.kafka.streams.KeyValue) OrderValidation(io.confluent.examples.streams.avro.microservices.OrderValidation) Test(org.junit.Test)

Aggregations

Order (io.confluent.examples.streams.avro.microservices.Order)11 OrderValidation (io.confluent.examples.streams.avro.microservices.OrderValidation)5 MicroserviceUtils.addShutdownHookAndBlock (io.confluent.examples.streams.microservices.util.MicroserviceUtils.addShutdownHookAndBlock)4 MicroserviceUtils.parseArgsAndConfigure (io.confluent.examples.streams.microservices.util.MicroserviceUtils.parseArgsAndConfigure)4 KafkaStreams (org.apache.kafka.streams.KafkaStreams)4 KStream (org.apache.kafka.streams.kstream.KStream)4 Logger (org.slf4j.Logger)4 LoggerFactory (org.slf4j.LoggerFactory)4 OrderState (io.confluent.examples.streams.avro.microservices.OrderState)3 FAIL (io.confluent.examples.streams.avro.microservices.OrderValidationResult.FAIL)3 PASS (io.confluent.examples.streams.avro.microservices.OrderValidationResult.PASS)3 Schemas (io.confluent.examples.streams.microservices.domain.Schemas)3 Topics (io.confluent.examples.streams.microservices.domain.Schemas.Topics)3 ORDERS (io.confluent.examples.streams.microservices.domain.Schemas.Topics.ORDERS)3 MicroserviceUtils.baseStreamsConfig (io.confluent.examples.streams.microservices.util.MicroserviceUtils.baseStreamsConfig)3 Joined (org.apache.kafka.streams.kstream.Joined)3 Test (org.junit.Test)3 Customer (io.confluent.examples.streams.avro.microservices.Customer)2 CREATED (io.confluent.examples.streams.avro.microservices.OrderState.CREATED)2 VALIDATED (io.confluent.examples.streams.avro.microservices.OrderState.VALIDATED)2