Search in sources :

Example 1 with TransformerSupplier

use of org.apache.kafka.streams.kstream.TransformerSupplier in project kafka by apache.

the class EosIntegrationTest method getKafkaStreams.

// the threads should no longer fail one thread one at a time
@SuppressWarnings("deprecation")
private KafkaStreams getKafkaStreams(final String dummyHostName, final boolean withState, final String appDir, final int numberOfStreamsThreads, final String eosConfig, final int maxPollIntervalMs) {
    commitRequested = new AtomicInteger(0);
    errorInjected = new AtomicBoolean(false);
    stallInjected = new AtomicBoolean(false);
    stallingHost = new AtomicReference<>();
    final StreamsBuilder builder = new StreamsBuilder();
    String[] storeNames = new String[0];
    if (withState) {
        storeNames = new String[] { storeName };
        final StoreBuilder<KeyValueStore<Long, Long>> storeBuilder = Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore(storeName), Serdes.Long(), Serdes.Long()).withCachingEnabled();
        builder.addStateStore(storeBuilder);
    }
    final KStream<Long, Long> input = builder.stream(MULTI_PARTITION_INPUT_TOPIC);
    input.transform(new TransformerSupplier<Long, Long, KeyValue<Long, Long>>() {

        @SuppressWarnings("unchecked")
        @Override
        public Transformer<Long, Long, KeyValue<Long, Long>> get() {
            return new Transformer<Long, Long, KeyValue<Long, Long>>() {

                ProcessorContext context;

                KeyValueStore<Long, Long> state = null;

                @Override
                public void init(final ProcessorContext context) {
                    this.context = context;
                    if (withState) {
                        state = (KeyValueStore<Long, Long>) context.getStateStore(storeName);
                    }
                }

                @Override
                public KeyValue<Long, Long> transform(final Long key, final Long value) {
                    if (stallInjected.compareAndSet(true, false)) {
                        LOG.info(dummyHostName + " is executing the injected stall");
                        stallingHost.set(dummyHostName);
                        while (doStall) {
                            final StreamThread thread = (StreamThread) Thread.currentThread();
                            if (thread.isInterrupted() || !thread.isRunning()) {
                                throw new RuntimeException("Detected we've been interrupted.");
                            }
                            try {
                                Thread.sleep(100);
                            } catch (final InterruptedException e) {
                                throw new RuntimeException(e);
                            }
                        }
                    }
                    if ((value + 1) % 10 == 0) {
                        context.commit();
                        commitRequested.incrementAndGet();
                    }
                    if (state != null) {
                        Long sum = state.get(key);
                        if (sum == null) {
                            sum = value;
                        } else {
                            sum += value;
                        }
                        state.put(key, sum);
                        state.flush();
                    }
                    if (errorInjected.compareAndSet(true, false)) {
                        // only tries to fail once on one of the task
                        throw new RuntimeException("Injected test exception.");
                    }
                    if (state != null) {
                        return new KeyValue<>(key, state.get(key));
                    } else {
                        return new KeyValue<>(key, value);
                    }
                }

                @Override
                public void close() {
                }
            };
        }
    }, storeNames).to(SINGLE_PARTITION_OUTPUT_TOPIC);
    stateTmpDir = TestUtils.tempDirectory().getPath() + File.separator;
    final Properties properties = new Properties();
    // Set commit interval to a larger value to avoid affection of controlled stream commit,
    // but not too large as we need to have a relatively low transaction timeout such
    // that it should help trigger the timed out transaction in time.
    final long commitIntervalMs = 20_000L;
    properties.put(StreamsConfig.PROCESSING_GUARANTEE_CONFIG, eosConfig);
    properties.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, numberOfStreamsThreads);
    properties.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, commitIntervalMs);
    properties.put(StreamsConfig.producerPrefix(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG), (int) commitIntervalMs);
    properties.put(StreamsConfig.consumerPrefix(ConsumerConfig.METADATA_MAX_AGE_CONFIG), "1000");
    properties.put(StreamsConfig.consumerPrefix(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG), "earliest");
    properties.put(StreamsConfig.consumerPrefix(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG), maxPollIntervalMs);
    properties.put(StreamsConfig.consumerPrefix(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG), maxPollIntervalMs - 1);
    properties.put(StreamsConfig.consumerPrefix(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG), maxPollIntervalMs);
    properties.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0);
    properties.put(StreamsConfig.STATE_DIR_CONFIG, stateTmpDir + appDir);
    properties.put(StreamsConfig.APPLICATION_SERVER_CONFIG, dummyHostName + ":2142");
    final Properties config = StreamsTestUtils.getStreamsConfig(applicationId, CLUSTER.bootstrapServers(), Serdes.LongSerde.class.getName(), Serdes.LongSerde.class.getName(), properties);
    final KafkaStreams streams = new KafkaStreams(builder.build(), config);
    streams.setUncaughtExceptionHandler((t, e) -> {
        if (uncaughtException != null || !e.getMessage().contains("Injected test exception")) {
            e.printStackTrace(System.err);
            hasUnexpectedError = true;
        }
        uncaughtException = e;
    });
    return streams;
}
Also used : KafkaStreams(org.apache.kafka.streams.KafkaStreams) KeyValue(org.apache.kafka.streams.KeyValue) Transformer(org.apache.kafka.streams.kstream.Transformer) StreamThread(org.apache.kafka.streams.processor.internals.StreamThread) TransformerSupplier(org.apache.kafka.streams.kstream.TransformerSupplier) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) ReadOnlyKeyValueStore(org.apache.kafka.streams.state.ReadOnlyKeyValueStore) MockKeyValueStore(org.apache.kafka.test.MockKeyValueStore) Properties(java.util.Properties) ProcessorContext(org.apache.kafka.streams.processor.ProcessorContext) MockInternalProcessorContext(org.apache.kafka.test.MockInternalProcessorContext) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) AtomicInteger(java.util.concurrent.atomic.AtomicInteger)

Example 2 with TransformerSupplier

use of org.apache.kafka.streams.kstream.TransformerSupplier in project kafka by apache.

the class EosV2UpgradeIntegrationTest method getKafkaStreams.

private KafkaStreams getKafkaStreams(final String appDir, final String processingGuarantee) {
    final StreamsBuilder builder = new StreamsBuilder();
    final String[] storeNames = new String[] { storeName };
    final StoreBuilder<KeyValueStore<Long, Long>> storeBuilder = Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore(storeName), Serdes.Long(), Serdes.Long()).withCachingEnabled();
    builder.addStateStore(storeBuilder);
    final KStream<Long, Long> input = builder.stream(MULTI_PARTITION_INPUT_TOPIC);
    input.transform(new TransformerSupplier<Long, Long, KeyValue<Long, Long>>() {

        @Override
        public Transformer<Long, Long, KeyValue<Long, Long>> get() {
            return new Transformer<Long, Long, KeyValue<Long, Long>>() {

                ProcessorContext context;

                KeyValueStore<Long, Long> state = null;

                AtomicBoolean crash;

                AtomicInteger sharedCommit;

                @Override
                public void init(final ProcessorContext context) {
                    this.context = context;
                    state = context.getStateStore(storeName);
                    final String clientId = context.appConfigs().get(StreamsConfig.CLIENT_ID_CONFIG).toString();
                    if (APP_DIR_1.equals(clientId)) {
                        crash = errorInjectedClient1;
                        sharedCommit = commitCounterClient1;
                    } else {
                        crash = errorInjectedClient2;
                        sharedCommit = commitCounterClient2;
                    }
                }

                @Override
                public KeyValue<Long, Long> transform(final Long key, final Long value) {
                    if ((value + 1) % 10 == 0) {
                        if (sharedCommit.get() < 0 || sharedCommit.incrementAndGet() == 2) {
                            context.commit();
                        }
                        commitRequested.incrementAndGet();
                    }
                    Long sum = state.get(key);
                    if (sum == null) {
                        sum = value;
                    } else {
                        sum += value;
                    }
                    state.put(key, sum);
                    state.flush();
                    if (// potentially crash when processing 5th, 15th, or 25th record (etc.)
                    value % 10 == 4 && crash != null && crash.compareAndSet(true, false)) {
                        // only crash a single task
                        throw new RuntimeException("Injected test exception.");
                    }
                    return new KeyValue<>(key, state.get(key));
                }

                @Override
                public void close() {
                }
            };
        }
    }, storeNames).to(MULTI_PARTITION_OUTPUT_TOPIC);
    final Properties properties = new Properties();
    properties.put(StreamsConfig.CLIENT_ID_CONFIG, appDir);
    properties.put(StreamsConfig.PROCESSING_GUARANTEE_CONFIG, processingGuarantee);
    final long commitInterval = Duration.ofMinutes(1L).toMillis();
    properties.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, commitInterval);
    properties.put(StreamsConfig.consumerPrefix(ConsumerConfig.METADATA_MAX_AGE_CONFIG), Duration.ofSeconds(1L).toMillis());
    properties.put(StreamsConfig.consumerPrefix(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG), "earliest");
    properties.put(StreamsConfig.consumerPrefix(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG), (int) Duration.ofSeconds(5L).toMillis());
    properties.put(StreamsConfig.consumerPrefix(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG), (int) Duration.ofSeconds(5L).minusMillis(1L).toMillis());
    properties.put(StreamsConfig.consumerPrefix(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG), MAX_POLL_INTERVAL_MS);
    properties.put(StreamsConfig.producerPrefix(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG), (int) commitInterval);
    properties.put(StreamsConfig.producerPrefix(ProducerConfig.PARTITIONER_CLASS_CONFIG), KeyPartitioner.class);
    properties.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0);
    properties.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getPath() + File.separator + appDir);
    properties.put(InternalConfig.ASSIGNMENT_LISTENER, assignmentListener);
    final Properties config = StreamsTestUtils.getStreamsConfig(applicationId, CLUSTER.bootstrapServers(), Serdes.LongSerde.class.getName(), Serdes.LongSerde.class.getName(), properties);
    final KafkaStreams streams = new KafkaStreams(builder.build(), config, new TestKafkaClientSupplier());
    streams.setUncaughtExceptionHandler(e -> {
        if (!injectError) {
            // we don't expect any exception thrown in stop case
            e.printStackTrace(System.err);
            hasUnexpectedError = true;
        } else {
            int exceptionCount = (int) exceptionCounts.get(appDir);
            // should only have our injected exception or commit exception, and 2 exceptions for each stream
            if (++exceptionCount > 2 || !(e instanceof RuntimeException) || !(e.getMessage().contains("test exception"))) {
                // The exception won't cause the test fail since we actually "expected" exception thrown and failed the stream.
                // So, log to stderr for debugging when the exception is not what we expected, and fail in the main thread
                e.printStackTrace(System.err);
                hasUnexpectedError = true;
            }
            exceptionCounts.put(appDir, exceptionCount);
        }
        return StreamsUncaughtExceptionHandler.StreamThreadExceptionResponse.SHUTDOWN_CLIENT;
    });
    return streams;
}
Also used : KafkaStreams(org.apache.kafka.streams.KafkaStreams) KeyValue(org.apache.kafka.streams.KeyValue) Transformer(org.apache.kafka.streams.kstream.Transformer) TransformerSupplier(org.apache.kafka.streams.kstream.TransformerSupplier) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) ReadOnlyKeyValueStore(org.apache.kafka.streams.state.ReadOnlyKeyValueStore) Properties(java.util.Properties) ProcessorContext(org.apache.kafka.streams.processor.ProcessorContext) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) AtomicInteger(java.util.concurrent.atomic.AtomicInteger)

Example 3 with TransformerSupplier

use of org.apache.kafka.streams.kstream.TransformerSupplier in project kafka by apache.

the class StreamsGraphTest method shouldNotThrowNPEWithMergeNodes.

@Test
public // Topology in this test from https://issues.apache.org/jira/browse/KAFKA-9739
void shouldNotThrowNPEWithMergeNodes() {
    final Properties properties = new Properties();
    properties.setProperty(StreamsConfig.APPLICATION_ID_CONFIG, "test-application");
    properties.setProperty(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
    properties.setProperty(StreamsConfig.TOPOLOGY_OPTIMIZATION_CONFIG, StreamsConfig.OPTIMIZE);
    final StreamsBuilder builder = new StreamsBuilder();
    initializer = () -> "";
    aggregator = (aggKey, value, aggregate) -> aggregate + value.length();
    final TransformerSupplier<String, String, KeyValue<String, String>> transformSupplier = () -> new Transformer<String, String, KeyValue<String, String>>() {

        @Override
        public void init(final ProcessorContext context) {
        }

        @Override
        public KeyValue<String, String> transform(final String key, final String value) {
            return KeyValue.pair(key, value);
        }

        @Override
        public void close() {
        }
    };
    final KStream<String, String> retryStream = builder.stream("retryTopic", Consumed.with(Serdes.String(), Serdes.String())).transform(transformSupplier).groupByKey(Grouped.with(Serdes.String(), Serdes.String())).aggregate(initializer, aggregator, Materialized.with(Serdes.String(), Serdes.String())).suppress(Suppressed.untilTimeLimit(Duration.ofSeconds(500), Suppressed.BufferConfig.maxBytes(64_000_000))).toStream().flatMap((k, v) -> new ArrayList<>());
    final KTable<String, String> idTable = builder.stream("id-table-topic", Consumed.with(Serdes.String(), Serdes.String())).flatMap((k, v) -> new ArrayList<KeyValue<String, String>>()).peek((subscriptionId, recipientId) -> System.out.println("data " + subscriptionId + " " + recipientId)).groupByKey(Grouped.with(Serdes.String(), Serdes.String())).aggregate(initializer, aggregator, Materialized.with(Serdes.String(), Serdes.String()));
    final KStream<String, String> joinStream = builder.stream("internal-topic-command", Consumed.with(Serdes.String(), Serdes.String())).peek((subscriptionId, command) -> System.out.println("stdoutput")).mapValues((k, v) -> v).merge(retryStream).leftJoin(idTable, (v1, v2) -> v1 + v2, Joined.with(Serdes.String(), Serdes.String(), Serdes.String()));
    joinStream.split().branch((k, v) -> v.equals("some-value"), Branched.withConsumer(ks -> ks.map(KeyValue::pair).peek((recipientId, command) -> System.out.println("printing out")).to("external-command", Produced.with(Serdes.String(), Serdes.String())))).defaultBranch(Branched.withConsumer(ks -> {
        ks.filter((k, v) -> v != null).peek((subscriptionId, wrapper) -> System.out.println("Printing output")).mapValues((k, v) -> v).to("dlq-topic", Produced.with(Serdes.String(), Serdes.String()));
        ks.map(KeyValue::pair).to("retryTopic", Produced.with(Serdes.String(), Serdes.String()));
    }));
    final Topology topology = builder.build(properties);
    assertEquals(expectedComplexMergeOptimizeTopology, topology.describe().toString());
}
Also used : StreamsConfig(org.apache.kafka.streams.StreamsConfig) Arrays(java.util.Arrays) Produced(org.apache.kafka.streams.kstream.Produced) KStream(org.apache.kafka.streams.kstream.KStream) Joined(org.apache.kafka.streams.kstream.Joined) ArrayList(java.util.ArrayList) Initializer(org.apache.kafka.streams.kstream.Initializer) JoinWindows(org.apache.kafka.streams.kstream.JoinWindows) Matcher(java.util.regex.Matcher) TransformerSupplier(org.apache.kafka.streams.kstream.TransformerSupplier) Locale(java.util.Locale) Duration(java.time.Duration) Serdes(org.apache.kafka.common.serialization.Serdes) Aggregator(org.apache.kafka.streams.kstream.Aggregator) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) KTable(org.apache.kafka.streams.kstream.KTable) Properties(java.util.Properties) Consumed(org.apache.kafka.streams.kstream.Consumed) Transformer(org.apache.kafka.streams.kstream.Transformer) KeyValue(org.apache.kafka.streams.KeyValue) Suppressed(org.apache.kafka.streams.kstream.Suppressed) Test(org.junit.Test) Branched(org.apache.kafka.streams.kstream.Branched) Grouped(org.apache.kafka.streams.kstream.Grouped) ProcessorContext(org.apache.kafka.streams.processor.ProcessorContext) List(java.util.List) TimeWindows(org.apache.kafka.streams.kstream.TimeWindows) ValueJoiner(org.apache.kafka.streams.kstream.ValueJoiner) Materialized(org.apache.kafka.streams.kstream.Materialized) Pattern(java.util.regex.Pattern) Duration.ofMillis(java.time.Duration.ofMillis) Topology(org.apache.kafka.streams.Topology) Assert.assertEquals(org.junit.Assert.assertEquals) KeyValue(org.apache.kafka.streams.KeyValue) Transformer(org.apache.kafka.streams.kstream.Transformer) ArrayList(java.util.ArrayList) Topology(org.apache.kafka.streams.Topology) Properties(java.util.Properties) ProcessorContext(org.apache.kafka.streams.processor.ProcessorContext) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) Test(org.junit.Test)

Example 4 with TransformerSupplier

use of org.apache.kafka.streams.kstream.TransformerSupplier in project apache-kafka-on-k8s by banzaicloud.

the class EosIntegrationTest method getKafkaStreams.

private KafkaStreams getKafkaStreams(final boolean withState, final String appDir, final int numberOfStreamsThreads) {
    commitRequested = new AtomicInteger(0);
    errorInjected = new AtomicBoolean(false);
    gcInjected = new AtomicBoolean(false);
    final StreamsBuilder builder = new StreamsBuilder();
    String[] storeNames = null;
    if (withState) {
        storeNames = new String[] { storeName };
        final StoreBuilder<KeyValueStore<Long, Long>> storeBuilder = Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore(storeName), Serdes.Long(), Serdes.Long()).withCachingEnabled();
        builder.addStateStore(storeBuilder);
    }
    final KStream<Long, Long> input = builder.stream(MULTI_PARTITION_INPUT_TOPIC);
    input.transform(new TransformerSupplier<Long, Long, KeyValue<Long, Long>>() {

        @SuppressWarnings("unchecked")
        @Override
        public Transformer<Long, Long, KeyValue<Long, Long>> get() {
            return new Transformer<Long, Long, KeyValue<Long, Long>>() {

                ProcessorContext context;

                KeyValueStore<Long, Long> state = null;

                @Override
                public void init(final ProcessorContext context) {
                    this.context = context;
                    if (withState) {
                        state = (KeyValueStore<Long, Long>) context.getStateStore(storeName);
                    }
                }

                @Override
                public KeyValue<Long, Long> transform(final Long key, final Long value) {
                    if (errorInjected.compareAndSet(true, false)) {
                        // only tries to fail once on one of the task
                        throw new RuntimeException("Injected test exception.");
                    }
                    if (gcInjected.compareAndSet(true, false)) {
                        while (doGC) {
                            try {
                                Thread.sleep(100);
                            } catch (final InterruptedException e) {
                                throw new RuntimeException(e);
                            }
                        }
                    }
                    if ((value + 1) % 10 == 0) {
                        context.commit();
                        commitRequested.incrementAndGet();
                    }
                    if (state != null) {
                        Long sum = state.get(key);
                        if (sum == null) {
                            sum = value;
                        } else {
                            sum += value;
                        }
                        state.put(key, sum);
                        context.forward(key, sum);
                        return null;
                    }
                    return new KeyValue<>(key, value);
                }

                @Override
                public KeyValue<Long, Long> punctuate(final long timestamp) {
                    return null;
                }

                @Override
                public void close() {
                }
            };
        }
    }, storeNames).to(SINGLE_PARTITION_OUTPUT_TOPIC);
    final KafkaStreams streams = new KafkaStreams(builder.build(), StreamsTestUtils.getStreamsConfig(applicationId, CLUSTER.bootstrapServers(), Serdes.LongSerde.class.getName(), Serdes.LongSerde.class.getName(), new Properties() {

        {
            put(StreamsConfig.PROCESSING_GUARANTEE_CONFIG, StreamsConfig.EXACTLY_ONCE);
            put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, numberOfStreamsThreads);
            put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, -1);
            put(StreamsConfig.consumerPrefix(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG), 5 * 1000);
            put(StreamsConfig.consumerPrefix(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG), 5 * 1000 - 1);
            put(StreamsConfig.consumerPrefix(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG), MAX_POLL_INTERVAL_MS);
            put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0);
            put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getPath() + File.separator + appDir);
            put(StreamsConfig.APPLICATION_SERVER_CONFIG, "dummy:2142");
        }
    }));
    streams.setUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() {

        @Override
        public void uncaughtException(final Thread t, final Throwable e) {
            if (uncaughtException != null) {
                e.printStackTrace(System.err);
                fail("Should only get one uncaught exception from Streams.");
            }
            uncaughtException = e;
        }
    });
    return streams;
}
Also used : KafkaStreams(org.apache.kafka.streams.KafkaStreams) KeyValue(org.apache.kafka.streams.KeyValue) Transformer(org.apache.kafka.streams.kstream.Transformer) TransformerSupplier(org.apache.kafka.streams.kstream.TransformerSupplier) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) ReadOnlyKeyValueStore(org.apache.kafka.streams.state.ReadOnlyKeyValueStore) Properties(java.util.Properties) ProcessorContext(org.apache.kafka.streams.processor.ProcessorContext) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Serdes(org.apache.kafka.common.serialization.Serdes) AtomicInteger(java.util.concurrent.atomic.AtomicInteger)

Example 5 with TransformerSupplier

use of org.apache.kafka.streams.kstream.TransformerSupplier in project apache-kafka-on-k8s by banzaicloud.

the class KStreamTransformTest method testTransform.

@Test
public void testTransform() {
    StreamsBuilder builder = new StreamsBuilder();
    TransformerSupplier<Number, Number, KeyValue<Integer, Integer>> transformerSupplier = new TransformerSupplier<Number, Number, KeyValue<Integer, Integer>>() {

        public Transformer<Number, Number, KeyValue<Integer, Integer>> get() {
            return new Transformer<Number, Number, KeyValue<Integer, Integer>>() {

                private int total = 0;

                @Override
                public void init(ProcessorContext context) {
                }

                @Override
                public KeyValue<Integer, Integer> transform(Number key, Number value) {
                    total += value.intValue();
                    return KeyValue.pair(key.intValue() * 2, total);
                }

                @Override
                public KeyValue<Integer, Integer> punctuate(long timestamp) {
                    return KeyValue.pair(-1, (int) timestamp);
                }

                @Override
                public void close() {
                }
            };
        }
    };
    final int[] expectedKeys = { 1, 10, 100, 1000 };
    MockProcessorSupplier<Integer, Integer> processor = new MockProcessorSupplier<>();
    KStream<Integer, Integer> stream = builder.stream(topicName, Consumed.with(intSerde, intSerde));
    stream.transform(transformerSupplier).process(processor);
    driver.setUp(builder);
    for (int expectedKey : expectedKeys) {
        driver.process(topicName, expectedKey, expectedKey * 10);
    }
    driver.punctuate(2);
    driver.punctuate(3);
    assertEquals(6, processor.processed.size());
    String[] expected = { "2:10", "20:110", "200:1110", "2000:11110", "-1:2", "-1:3" };
    for (int i = 0; i < expected.length; i++) {
        assertEquals(expected[i], processor.processed.get(i));
    }
}
Also used : KeyValue(org.apache.kafka.streams.KeyValue) Transformer(org.apache.kafka.streams.kstream.Transformer) TransformerSupplier(org.apache.kafka.streams.kstream.TransformerSupplier) ProcessorContext(org.apache.kafka.streams.processor.ProcessorContext) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) MockProcessorSupplier(org.apache.kafka.test.MockProcessorSupplier) Test(org.junit.Test)

Aggregations

KeyValue (org.apache.kafka.streams.KeyValue)6 StreamsBuilder (org.apache.kafka.streams.StreamsBuilder)6 Transformer (org.apache.kafka.streams.kstream.Transformer)6 TransformerSupplier (org.apache.kafka.streams.kstream.TransformerSupplier)6 ProcessorContext (org.apache.kafka.streams.processor.ProcessorContext)6 Properties (java.util.Properties)5 KeyValueStore (org.apache.kafka.streams.state.KeyValueStore)4 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)3 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)3 Serdes (org.apache.kafka.common.serialization.Serdes)3 KafkaStreams (org.apache.kafka.streams.KafkaStreams)3 Duration (java.time.Duration)2 Duration.ofMillis (java.time.Duration.ofMillis)2 ArrayList (java.util.ArrayList)2 Arrays (java.util.Arrays)2 List (java.util.List)2 Matcher (java.util.regex.Matcher)2 Pattern (java.util.regex.Pattern)2 Topology (org.apache.kafka.streams.Topology)2 ReadOnlyKeyValueStore (org.apache.kafka.streams.state.ReadOnlyKeyValueStore)2