Search in sources :

Example 1 with Grouped

use of org.apache.kafka.streams.kstream.Grouped in project kafka by apache.

the class StreamsNamedRepartitionTest method main.

public static void main(final String[] args) throws Exception {
    if (args.length < 1) {
        System.err.println("StreamsNamedRepartitionTest requires one argument (properties-file) but none provided: ");
    }
    final String propFileName = args[0];
    final Properties streamsProperties = Utils.loadProps(propFileName);
    System.out.println("StreamsTest instance started NAMED_REPARTITION_TEST");
    System.out.println("props=" + streamsProperties);
    final String inputTopic = (String) (Objects.requireNonNull(streamsProperties.remove("input.topic")));
    final String aggregationTopic = (String) (Objects.requireNonNull(streamsProperties.remove("aggregation.topic")));
    final boolean addOperators = Boolean.valueOf(Objects.requireNonNull((String) streamsProperties.remove("add.operations")));
    final Initializer<Integer> initializer = () -> 0;
    final Aggregator<String, String, Integer> aggregator = (k, v, agg) -> agg + Integer.parseInt(v);
    final Function<String, String> keyFunction = s -> Integer.toString(Integer.parseInt(s) % 9);
    final StreamsBuilder builder = new StreamsBuilder();
    final KStream<String, String> sourceStream = builder.stream(inputTopic, Consumed.with(Serdes.String(), Serdes.String()));
    sourceStream.peek((k, v) -> System.out.println(String.format("input data key=%s, value=%s", k, v)));
    final KStream<String, String> mappedStream = sourceStream.selectKey((k, v) -> keyFunction.apply(v));
    final KStream<String, String> maybeUpdatedStream;
    if (addOperators) {
        maybeUpdatedStream = mappedStream.filter((k, v) -> true).mapValues(v -> Integer.toString(Integer.parseInt(v) + 1));
    } else {
        maybeUpdatedStream = mappedStream;
    }
    maybeUpdatedStream.groupByKey(Grouped.with("grouped-stream", Serdes.String(), Serdes.String())).aggregate(initializer, aggregator, Materialized.<String, Integer, KeyValueStore<Bytes, byte[]>>as("count-store").withKeySerde(Serdes.String()).withValueSerde(Serdes.Integer())).toStream().peek((k, v) -> System.out.println(String.format("AGGREGATED key=%s value=%s", k, v))).to(aggregationTopic, Produced.with(Serdes.String(), Serdes.Integer()));
    final Properties config = new Properties();
    config.setProperty(StreamsConfig.APPLICATION_ID_CONFIG, "StreamsNamedRepartitionTest");
    config.setProperty(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, "0");
    config.setProperty(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
    config.setProperty(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
    config.putAll(streamsProperties);
    final Topology topology = builder.build(config);
    final KafkaStreams streams = new KafkaStreams(topology, config);
    streams.setStateListener((newState, oldState) -> {
        if (oldState == State.REBALANCING && newState == State.RUNNING) {
            if (addOperators) {
                System.out.println("UPDATED Topology");
            } else {
                System.out.println("REBALANCING -> RUNNING");
            }
            System.out.flush();
        }
    });
    streams.start();
    Exit.addShutdownHook("streams-shutdown-hook", () -> {
        System.out.println("closing Kafka Streams instance");
        System.out.flush();
        streams.close(Duration.ofMillis(5000));
        System.out.println("NAMED_REPARTITION_TEST Streams Stopped");
        System.out.flush();
    });
}
Also used : Utils(org.apache.kafka.common.utils.Utils) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Properties(java.util.Properties) Exit(org.apache.kafka.common.utils.Exit) Produced(org.apache.kafka.streams.kstream.Produced) Consumed(org.apache.kafka.streams.kstream.Consumed) KStream(org.apache.kafka.streams.kstream.KStream) State(org.apache.kafka.streams.KafkaStreams.State) Function(java.util.function.Function) Grouped(org.apache.kafka.streams.kstream.Grouped) Bytes(org.apache.kafka.common.utils.Bytes) Objects(java.util.Objects) Initializer(org.apache.kafka.streams.kstream.Initializer) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) Duration(java.time.Duration) Materialized(org.apache.kafka.streams.kstream.Materialized) Serdes(org.apache.kafka.common.serialization.Serdes) KafkaStreams(org.apache.kafka.streams.KafkaStreams) Aggregator(org.apache.kafka.streams.kstream.Aggregator) Topology(org.apache.kafka.streams.Topology) KafkaStreams(org.apache.kafka.streams.KafkaStreams) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) Topology(org.apache.kafka.streams.Topology) Properties(java.util.Properties) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder)

Aggregations

Duration (java.time.Duration)1 Objects (java.util.Objects)1 Properties (java.util.Properties)1 Function (java.util.function.Function)1 Serdes (org.apache.kafka.common.serialization.Serdes)1 Bytes (org.apache.kafka.common.utils.Bytes)1 Exit (org.apache.kafka.common.utils.Exit)1 Utils (org.apache.kafka.common.utils.Utils)1 KafkaStreams (org.apache.kafka.streams.KafkaStreams)1 State (org.apache.kafka.streams.KafkaStreams.State)1 StreamsBuilder (org.apache.kafka.streams.StreamsBuilder)1 StreamsConfig (org.apache.kafka.streams.StreamsConfig)1 Topology (org.apache.kafka.streams.Topology)1 Aggregator (org.apache.kafka.streams.kstream.Aggregator)1 Consumed (org.apache.kafka.streams.kstream.Consumed)1 Grouped (org.apache.kafka.streams.kstream.Grouped)1 Initializer (org.apache.kafka.streams.kstream.Initializer)1 KStream (org.apache.kafka.streams.kstream.KStream)1 Materialized (org.apache.kafka.streams.kstream.Materialized)1 Produced (org.apache.kafka.streams.kstream.Produced)1