Search in sources :

Example 46 with Topology

use of org.apache.kafka.streams.Topology in project kafka by apache.

the class StreamsGraphTest method shouldNotOptimizeWhenRepartitionOperationIsDone.

@Test
public void shouldNotOptimizeWhenRepartitionOperationIsDone() {
    final Topology attemptedOptimize = getTopologyWithRepartitionOperation(StreamsConfig.OPTIMIZE);
    final Topology noOptimziation = getTopologyWithRepartitionOperation(StreamsConfig.NO_OPTIMIZATION);
    assertEquals(attemptedOptimize.describe().toString(), noOptimziation.describe().toString());
    assertEquals(2, getCountOfRepartitionTopicsFound(attemptedOptimize.describe().toString()));
    assertEquals(2, getCountOfRepartitionTopicsFound(noOptimziation.describe().toString()));
}
Also used : Topology(org.apache.kafka.streams.Topology) Test(org.junit.Test)

Example 47 with Topology

use of org.apache.kafka.streams.Topology in project kafka by apache.

the class StreamsGraphTest method shouldNotThrowNPEWithMergeNodes.

@Test
public // Topology in this test from https://issues.apache.org/jira/browse/KAFKA-9739
void shouldNotThrowNPEWithMergeNodes() {
    final Properties properties = new Properties();
    properties.setProperty(StreamsConfig.APPLICATION_ID_CONFIG, "test-application");
    properties.setProperty(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
    properties.setProperty(StreamsConfig.TOPOLOGY_OPTIMIZATION_CONFIG, StreamsConfig.OPTIMIZE);
    final StreamsBuilder builder = new StreamsBuilder();
    initializer = () -> "";
    aggregator = (aggKey, value, aggregate) -> aggregate + value.length();
    final TransformerSupplier<String, String, KeyValue<String, String>> transformSupplier = () -> new Transformer<String, String, KeyValue<String, String>>() {

        @Override
        public void init(final ProcessorContext context) {
        }

        @Override
        public KeyValue<String, String> transform(final String key, final String value) {
            return KeyValue.pair(key, value);
        }

        @Override
        public void close() {
        }
    };
    final KStream<String, String> retryStream = builder.stream("retryTopic", Consumed.with(Serdes.String(), Serdes.String())).transform(transformSupplier).groupByKey(Grouped.with(Serdes.String(), Serdes.String())).aggregate(initializer, aggregator, Materialized.with(Serdes.String(), Serdes.String())).suppress(Suppressed.untilTimeLimit(Duration.ofSeconds(500), Suppressed.BufferConfig.maxBytes(64_000_000))).toStream().flatMap((k, v) -> new ArrayList<>());
    final KTable<String, String> idTable = builder.stream("id-table-topic", Consumed.with(Serdes.String(), Serdes.String())).flatMap((k, v) -> new ArrayList<KeyValue<String, String>>()).peek((subscriptionId, recipientId) -> System.out.println("data " + subscriptionId + " " + recipientId)).groupByKey(Grouped.with(Serdes.String(), Serdes.String())).aggregate(initializer, aggregator, Materialized.with(Serdes.String(), Serdes.String()));
    final KStream<String, String> joinStream = builder.stream("internal-topic-command", Consumed.with(Serdes.String(), Serdes.String())).peek((subscriptionId, command) -> System.out.println("stdoutput")).mapValues((k, v) -> v).merge(retryStream).leftJoin(idTable, (v1, v2) -> v1 + v2, Joined.with(Serdes.String(), Serdes.String(), Serdes.String()));
    joinStream.split().branch((k, v) -> v.equals("some-value"), Branched.withConsumer(ks -> ks.map(KeyValue::pair).peek((recipientId, command) -> System.out.println("printing out")).to("external-command", Produced.with(Serdes.String(), Serdes.String())))).defaultBranch(Branched.withConsumer(ks -> {
        ks.filter((k, v) -> v != null).peek((subscriptionId, wrapper) -> System.out.println("Printing output")).mapValues((k, v) -> v).to("dlq-topic", Produced.with(Serdes.String(), Serdes.String()));
        ks.map(KeyValue::pair).to("retryTopic", Produced.with(Serdes.String(), Serdes.String()));
    }));
    final Topology topology = builder.build(properties);
    assertEquals(expectedComplexMergeOptimizeTopology, topology.describe().toString());
}
Also used : StreamsConfig(org.apache.kafka.streams.StreamsConfig) Arrays(java.util.Arrays) Produced(org.apache.kafka.streams.kstream.Produced) KStream(org.apache.kafka.streams.kstream.KStream) Joined(org.apache.kafka.streams.kstream.Joined) ArrayList(java.util.ArrayList) Initializer(org.apache.kafka.streams.kstream.Initializer) JoinWindows(org.apache.kafka.streams.kstream.JoinWindows) Matcher(java.util.regex.Matcher) TransformerSupplier(org.apache.kafka.streams.kstream.TransformerSupplier) Locale(java.util.Locale) Duration(java.time.Duration) Serdes(org.apache.kafka.common.serialization.Serdes) Aggregator(org.apache.kafka.streams.kstream.Aggregator) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) KTable(org.apache.kafka.streams.kstream.KTable) Properties(java.util.Properties) Consumed(org.apache.kafka.streams.kstream.Consumed) Transformer(org.apache.kafka.streams.kstream.Transformer) KeyValue(org.apache.kafka.streams.KeyValue) Suppressed(org.apache.kafka.streams.kstream.Suppressed) Test(org.junit.Test) Branched(org.apache.kafka.streams.kstream.Branched) Grouped(org.apache.kafka.streams.kstream.Grouped) ProcessorContext(org.apache.kafka.streams.processor.ProcessorContext) List(java.util.List) TimeWindows(org.apache.kafka.streams.kstream.TimeWindows) ValueJoiner(org.apache.kafka.streams.kstream.ValueJoiner) Materialized(org.apache.kafka.streams.kstream.Materialized) Pattern(java.util.regex.Pattern) Duration.ofMillis(java.time.Duration.ofMillis) Topology(org.apache.kafka.streams.Topology) Assert.assertEquals(org.junit.Assert.assertEquals) KeyValue(org.apache.kafka.streams.KeyValue) Transformer(org.apache.kafka.streams.kstream.Transformer) ArrayList(java.util.ArrayList) Topology(org.apache.kafka.streams.Topology) Properties(java.util.Properties) ProcessorContext(org.apache.kafka.streams.processor.ProcessorContext) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) Test(org.junit.Test)

Example 48 with Topology

use of org.apache.kafka.streams.Topology in project kafka by apache.

the class StreamsOptimizedTest method main.

public static void main(final String[] args) throws Exception {
    if (args.length < 1) {
        System.err.println("StreamsOptimizedTest requires one argument (properties-file) but no provided: ");
    }
    final String propFileName = args[0];
    final Properties streamsProperties = Utils.loadProps(propFileName);
    System.out.println("StreamsTest instance started StreamsOptimizedTest");
    System.out.println("props=" + streamsProperties);
    final String inputTopic = (String) Objects.requireNonNull(streamsProperties.remove("input.topic"));
    final String aggregationTopic = (String) Objects.requireNonNull(streamsProperties.remove("aggregation.topic"));
    final String reduceTopic = (String) Objects.requireNonNull(streamsProperties.remove("reduce.topic"));
    final String joinTopic = (String) Objects.requireNonNull(streamsProperties.remove("join.topic"));
    final Pattern repartitionTopicPattern = Pattern.compile("Sink: .*-repartition");
    final Initializer<Integer> initializer = () -> 0;
    final Aggregator<String, String, Integer> aggregator = (k, v, agg) -> agg + v.length();
    final Reducer<String> reducer = (v1, v2) -> Integer.toString(Integer.parseInt(v1) + Integer.parseInt(v2));
    final Function<String, String> keyFunction = s -> Integer.toString(Integer.parseInt(s) % 9);
    final StreamsBuilder builder = new StreamsBuilder();
    final KStream<String, String> sourceStream = builder.stream(inputTopic, Consumed.with(Serdes.String(), Serdes.String()));
    final KStream<String, String> mappedStream = sourceStream.selectKey((k, v) -> keyFunction.apply(v));
    final KStream<String, Long> countStream = mappedStream.groupByKey().count(Materialized.with(Serdes.String(), Serdes.Long())).toStream();
    mappedStream.groupByKey().aggregate(initializer, aggregator, Materialized.with(Serdes.String(), Serdes.Integer())).toStream().peek((k, v) -> System.out.println(String.format("AGGREGATED key=%s value=%s", k, v))).to(aggregationTopic, Produced.with(Serdes.String(), Serdes.Integer()));
    mappedStream.groupByKey().reduce(reducer, Materialized.with(Serdes.String(), Serdes.String())).toStream().peek((k, v) -> System.out.println(String.format("REDUCED key=%s value=%s", k, v))).to(reduceTopic, Produced.with(Serdes.String(), Serdes.String()));
    mappedStream.join(countStream, (v1, v2) -> v1 + ":" + v2.toString(), JoinWindows.of(ofMillis(500)), StreamJoined.with(Serdes.String(), Serdes.String(), Serdes.Long())).peek((k, v) -> System.out.println(String.format("JOINED key=%s value=%s", k, v))).to(joinTopic, Produced.with(Serdes.String(), Serdes.String()));
    final Properties config = new Properties();
    config.setProperty(StreamsConfig.APPLICATION_ID_CONFIG, "StreamsOptimizedTest");
    config.setProperty(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, "0");
    config.setProperty(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
    config.setProperty(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
    config.setProperty(StreamsConfig.adminClientPrefix(AdminClientConfig.RETRIES_CONFIG), "100");
    config.putAll(streamsProperties);
    final Topology topology = builder.build(config);
    final KafkaStreams streams = new KafkaStreams(topology, config);
    streams.setStateListener((newState, oldState) -> {
        if (oldState == State.REBALANCING && newState == State.RUNNING) {
            final int repartitionTopicCount = getCountOfRepartitionTopicsFound(topology.describe().toString(), repartitionTopicPattern);
            System.out.println(String.format("REBALANCING -> RUNNING with REPARTITION TOPIC COUNT=%d", repartitionTopicCount));
            System.out.flush();
        }
    });
    streams.cleanUp();
    streams.start();
    Exit.addShutdownHook("streams-shutdown-hook", () -> {
        System.out.println("closing Kafka Streams instance");
        System.out.flush();
        streams.close(Duration.ofMillis(5000));
        System.out.println("OPTIMIZE_TEST Streams Stopped");
        System.out.flush();
    });
}
Also used : StreamsConfig(org.apache.kafka.streams.StreamsConfig) Exit(org.apache.kafka.common.utils.Exit) Produced(org.apache.kafka.streams.kstream.Produced) KStream(org.apache.kafka.streams.kstream.KStream) Function(java.util.function.Function) StreamJoined(org.apache.kafka.streams.kstream.StreamJoined) ArrayList(java.util.ArrayList) Initializer(org.apache.kafka.streams.kstream.Initializer) JoinWindows(org.apache.kafka.streams.kstream.JoinWindows) Matcher(java.util.regex.Matcher) Duration(java.time.Duration) Serdes(org.apache.kafka.common.serialization.Serdes) Aggregator(org.apache.kafka.streams.kstream.Aggregator) Utils(org.apache.kafka.common.utils.Utils) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) Properties(java.util.Properties) Consumed(org.apache.kafka.streams.kstream.Consumed) AdminClientConfig(org.apache.kafka.clients.admin.AdminClientConfig) State(org.apache.kafka.streams.KafkaStreams.State) Objects(java.util.Objects) List(java.util.List) Reducer(org.apache.kafka.streams.kstream.Reducer) Materialized(org.apache.kafka.streams.kstream.Materialized) KafkaStreams(org.apache.kafka.streams.KafkaStreams) Pattern(java.util.regex.Pattern) Duration.ofMillis(java.time.Duration.ofMillis) Topology(org.apache.kafka.streams.Topology) Pattern(java.util.regex.Pattern) KafkaStreams(org.apache.kafka.streams.KafkaStreams) Topology(org.apache.kafka.streams.Topology) Properties(java.util.Properties) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder)

Example 49 with Topology

use of org.apache.kafka.streams.Topology in project kafka by apache.

the class SessionStoreFetchTest method testStoreConfig.

@Test
public void testStoreConfig() {
    final Materialized<String, Long, SessionStore<Bytes, byte[]>> stateStoreConfig = getStoreConfig(storeType, STORE_NAME, enableLogging, enableCaching);
    final StreamsBuilder builder = new StreamsBuilder();
    final KStream<String, String> stream = builder.stream("input", Consumed.with(Serdes.String(), Serdes.String()));
    stream.groupByKey(Grouped.with(Serdes.String(), Serdes.String())).windowedBy(SessionWindows.ofInactivityGapWithNoGrace(ofMillis(WINDOW_SIZE))).count(stateStoreConfig).toStream().to("output");
    final Topology topology = builder.build();
    try (final TopologyTestDriver driver = new TopologyTestDriver(topology)) {
        // get input topic and stateStore
        final TestInputTopic<String, String> input = driver.createInputTopic("input", new StringSerializer(), new StringSerializer());
        final SessionStore<String, Long> stateStore = driver.getSessionStore(STORE_NAME);
        // write some data
        final int medium = DATA_SIZE / 2 * 2;
        for (int i = 0; i < records.size(); i++) {
            final KeyValue<String, String> kv = records.get(i);
            final long windowStartTime = i < medium ? 0 : 1500;
            input.pipeInput(kv.key, kv.value, windowStartTime);
            input.pipeInput(kv.key, kv.value, windowStartTime + WINDOW_SIZE);
        }
        verifyNormalQuery(stateStore);
        verifyInfiniteQuery(stateStore);
        verifyRangeQuery(stateStore);
    }
}
Also used : TopologyTestDriver(org.apache.kafka.streams.TopologyTestDriver) Topology(org.apache.kafka.streams.Topology) SessionStore(org.apache.kafka.streams.state.SessionStore) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) Test(org.junit.Test)

Example 50 with Topology

use of org.apache.kafka.streams.Topology in project kafka by apache.

the class WindowStoreFetchTest method testStoreConfig.

@Test
public void testStoreConfig() {
    final Materialized<String, Long, WindowStore<Bytes, byte[]>> stateStoreConfig = getStoreConfig(storeType, STORE_NAME, enableLogging, enableCaching);
    // Create topology: table from input topic
    final StreamsBuilder builder = new StreamsBuilder();
    final KStream<String, String> stream = builder.stream("input", Consumed.with(Serdes.String(), Serdes.String()));
    stream.groupByKey(Grouped.with(Serdes.String(), Serdes.String())).windowedBy(TimeWindows.ofSizeWithNoGrace(ofMillis(WINDOW_SIZE))).count(stateStoreConfig).toStream().to("output");
    final Topology topology = builder.build();
    try (final TopologyTestDriver driver = new TopologyTestDriver(topology)) {
        // get input topic and stateStore
        final TestInputTopic<String, String> input = driver.createInputTopic("input", new StringSerializer(), new StringSerializer());
        final WindowStore<String, Long> stateStore = driver.getWindowStore(STORE_NAME);
        // write some data
        final int medium = DATA_SIZE / 2 * 2;
        for (int i = 0; i < records.size(); i++) {
            final KeyValue<String, String> kv = records.get(i);
            final long windowStartTime = i < medium ? 0 : WINDOW_SIZE;
            input.pipeInput(kv.key, kv.value, windowStartTime + i);
        }
        // query the state store
        try (final KeyValueIterator<Windowed<String>, Long> scanIterator = forward ? stateStore.fetchAll(0, Long.MAX_VALUE) : stateStore.backwardFetchAll(0, Long.MAX_VALUE)) {
            final Iterator<KeyValue<Windowed<String>, Long>> dataIterator = forward ? expectedRecords.iterator() : expectedRecords.descendingIterator();
            TestUtils.checkEquals(scanIterator, dataIterator);
        }
        try (final KeyValueIterator<Windowed<String>, Long> scanIterator = forward ? stateStore.fetch(null, null, 0, Long.MAX_VALUE) : stateStore.backwardFetch(null, null, 0, Long.MAX_VALUE)) {
            final Iterator<KeyValue<Windowed<String>, Long>> dataIterator = forward ? expectedRecords.iterator() : expectedRecords.descendingIterator();
            TestUtils.checkEquals(scanIterator, dataIterator);
        }
        testRange("range", stateStore, innerLow, innerHigh, forward);
        testRange("until", stateStore, null, middle, forward);
        testRange("from", stateStore, middle, null, forward);
        testRange("untilBetween", stateStore, null, innerHighBetween, forward);
        testRange("fromBetween", stateStore, innerLowBetween, null, forward);
    }
}
Also used : KeyValue(org.apache.kafka.streams.KeyValue) TopologyTestDriver(org.apache.kafka.streams.TopologyTestDriver) Topology(org.apache.kafka.streams.Topology) WindowStore(org.apache.kafka.streams.state.WindowStore) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) Windowed(org.apache.kafka.streams.kstream.Windowed) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) Test(org.junit.Test)

Aggregations

Topology (org.apache.kafka.streams.Topology)127 Test (org.junit.Test)106 StreamsBuilder (org.apache.kafka.streams.StreamsBuilder)93 KafkaStreams (org.apache.kafka.streams.KafkaStreams)53 TopologyTestDriver (org.apache.kafka.streams.TopologyTestDriver)53 Properties (java.util.Properties)47 StringSerializer (org.apache.kafka.common.serialization.StringSerializer)46 KeyValue (org.apache.kafka.streams.KeyValue)40 Serdes (org.apache.kafka.common.serialization.Serdes)39 StreamsConfig (org.apache.kafka.streams.StreamsConfig)33 List (java.util.List)29 MutableSpan (brave.handler.MutableSpan)28 Consumed (org.apache.kafka.streams.kstream.Consumed)28 Produced (org.apache.kafka.streams.kstream.Produced)26 Arrays (java.util.Arrays)25 StringDeserializer (org.apache.kafka.common.serialization.StringDeserializer)25 ArrayList (java.util.ArrayList)23 ProcessorContext (org.apache.kafka.streams.processor.ProcessorContext)23 Duration (java.time.Duration)22 KStream (org.apache.kafka.streams.kstream.KStream)22