Search in sources :

Example 1 with ValueAndTimestamp

use of org.apache.kafka.streams.state.ValueAndTimestamp in project kafka by apache.

the class KGroupedStreamImplTest method doReduceSessionWindows.

private void doReduceSessionWindows(final MockApiProcessorSupplier<Windowed<String>, String, Void, Void> supplier) {
    try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) {
        final TestInputTopic<String, String> inputTopic = driver.createInputTopic(TOPIC, new StringSerializer(), new StringSerializer());
        inputTopic.pipeInput("1", "A", 10);
        inputTopic.pipeInput("2", "Z", 15);
        inputTopic.pipeInput("1", "B", 30);
        inputTopic.pipeInput("1", "A", 70);
        inputTopic.pipeInput("1", "B", 100);
        inputTopic.pipeInput("1", "C", 90);
    }
    final Map<Windowed<String>, ValueAndTimestamp<String>> result = supplier.theCapturedProcessor().lastValueAndTimestampPerKey();
    assertEquals(ValueAndTimestamp.make("A:B", 30L), result.get(new Windowed<>("1", new SessionWindow(10L, 30L))));
    assertEquals(ValueAndTimestamp.make("Z", 15L), result.get(new Windowed<>("2", new SessionWindow(15L, 15L))));
    assertEquals(ValueAndTimestamp.make("A:B:C", 100L), result.get(new Windowed<>("1", new SessionWindow(70L, 100L))));
}
Also used : Windowed(org.apache.kafka.streams.kstream.Windowed) ValueAndTimestamp(org.apache.kafka.streams.state.ValueAndTimestamp) TopologyTestDriver(org.apache.kafka.streams.TopologyTestDriver) StringSerializer(org.apache.kafka.common.serialization.StringSerializer)

Example 2 with ValueAndTimestamp

use of org.apache.kafka.streams.state.ValueAndTimestamp in project kafka by apache.

the class KGroupedTableImplTest method shouldAggregateAndMaterializeResults.

@Test
public void shouldAggregateAndMaterializeResults() {
    builder.table(topic, Consumed.with(Serdes.String(), Serdes.String())).groupBy(MockMapper.selectValueKeyValueMapper(), Grouped.with(Serdes.String(), Serdes.String())).aggregate(MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER, MockAggregator.TOSTRING_REMOVER, Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as("aggregate").withValueSerde(Serdes.String()).withKeySerde(Serdes.String()));
    try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) {
        processData(topic, driver);
        {
            {
                final KeyValueStore<String, String> aggregate = driver.getKeyValueStore("aggregate");
                assertThat(aggregate.get("1"), equalTo("0+1+1+1"));
                assertThat(aggregate.get("2"), equalTo("0+2+2"));
            }
            {
                final KeyValueStore<String, ValueAndTimestamp<String>> aggregate = driver.getTimestampedKeyValueStore("aggregate");
                assertThat(aggregate.get("1"), equalTo(ValueAndTimestamp.make("0+1+1+1", 50L)));
                assertThat(aggregate.get("2"), equalTo(ValueAndTimestamp.make("0+2+2", 60L)));
            }
        }
    }
}
Also used : ValueAndTimestamp(org.apache.kafka.streams.state.ValueAndTimestamp) Bytes(org.apache.kafka.common.utils.Bytes) TopologyTestDriver(org.apache.kafka.streams.TopologyTestDriver) Test(org.junit.Test)

Example 3 with ValueAndTimestamp

use of org.apache.kafka.streams.state.ValueAndTimestamp in project kafka by apache.

the class KStreamSlidingWindowAggregateTest method testAggregateRandomInput.

@Test
public void testAggregateRandomInput() {
    final StreamsBuilder builder = new StreamsBuilder();
    final String topic1 = "topic1";
    final WindowBytesStoreSupplier storeSupplier = inOrderIterator ? new InOrderMemoryWindowStoreSupplier("InOrder", 50000L, 10L, false) : Stores.inMemoryWindowStore("Reverse", Duration.ofMillis(50000), Duration.ofMillis(10), false);
    final KTable<Windowed<String>, String> table = builder.stream(topic1, Consumed.with(Serdes.String(), Serdes.String())).groupByKey(Grouped.with(Serdes.String(), Serdes.String())).windowedBy(SlidingWindows.ofTimeDifferenceAndGrace(ofMillis(10), ofMillis(10000))).aggregate(() -> "", (key, value, aggregate) -> {
        aggregate += value;
        final char[] ch = aggregate.toCharArray();
        Arrays.sort(ch);
        aggregate = String.valueOf(ch);
        return aggregate;
    }, Materialized.as(storeSupplier));
    final MockApiProcessorSupplier<Windowed<String>, String, Void, Void> supplier = new MockApiProcessorSupplier<>();
    table.toStream().process(supplier);
    final long seed = new Random().nextLong();
    final Random shuffle = new Random(seed);
    try {
        final List<ValueAndTimestamp<String>> input = Arrays.asList(ValueAndTimestamp.make("A", 10L), ValueAndTimestamp.make("B", 15L), ValueAndTimestamp.make("C", 16L), ValueAndTimestamp.make("D", 18L), ValueAndTimestamp.make("E", 30L), ValueAndTimestamp.make("F", 40L), ValueAndTimestamp.make("G", 55L), ValueAndTimestamp.make("H", 56L), ValueAndTimestamp.make("I", 58L), ValueAndTimestamp.make("J", 58L), ValueAndTimestamp.make("K", 62L), ValueAndTimestamp.make("L", 63L), ValueAndTimestamp.make("M", 63L), ValueAndTimestamp.make("N", 63L), ValueAndTimestamp.make("O", 76L), ValueAndTimestamp.make("P", 77L), ValueAndTimestamp.make("Q", 80L), ValueAndTimestamp.make("R", 2L), ValueAndTimestamp.make("S", 3L), ValueAndTimestamp.make("T", 5L), ValueAndTimestamp.make("U", 8L));
        Collections.shuffle(input, shuffle);
        try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) {
            final TestInputTopic<String, String> inputTopic1 = driver.createInputTopic(topic1, new StringSerializer(), new StringSerializer());
            for (final ValueAndTimestamp<String> i : input) {
                inputTopic1.pipeInput("A", i.value(), i.timestamp());
            }
        }
        final Map<Long, ValueAndTimestamp<String>> results = new HashMap<>();
        for (final KeyValueTimestamp<Windowed<String>, String> entry : supplier.theCapturedProcessor().processed()) {
            final Windowed<String> window = entry.key();
            final Long start = window.window().start();
            final ValueAndTimestamp<String> valueAndTimestamp = ValueAndTimestamp.make(entry.value(), entry.timestamp());
            if (results.putIfAbsent(start, valueAndTimestamp) != null) {
                results.replace(start, valueAndTimestamp);
            }
        }
        verifyRandomTestResults(results);
    } catch (final AssertionError t) {
        throw new AssertionError("Assertion failed in randomized test. Reproduce with seed: " + seed + ".", t);
    } catch (final Throwable t) {
        final String msg = "Exception in randomized scenario. Reproduce with seed: " + seed + ".";
        throw new AssertionError(msg, t);
    }
}
Also used : MockApiProcessorSupplier(org.apache.kafka.test.MockApiProcessorSupplier) HashMap(java.util.HashMap) TopologyTestDriver(org.apache.kafka.streams.TopologyTestDriver) Random(java.util.Random) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) InMemoryWindowBytesStoreSupplier(org.apache.kafka.streams.state.internals.InMemoryWindowBytesStoreSupplier) WindowBytesStoreSupplier(org.apache.kafka.streams.state.WindowBytesStoreSupplier) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) Windowed(org.apache.kafka.streams.kstream.Windowed) ValueAndTimestamp(org.apache.kafka.streams.state.ValueAndTimestamp) Test(org.junit.Test)

Example 4 with ValueAndTimestamp

use of org.apache.kafka.streams.state.ValueAndTimestamp in project kafka by apache.

the class SessionWindowedKStreamImplTest method shouldCountSessionWindowed.

private void shouldCountSessionWindowed() {
    final MockApiProcessorSupplier<Windowed<String>, Long, Void, Void> supplier = new MockApiProcessorSupplier<>();
    stream.count().toStream().process(supplier);
    try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) {
        processData(driver);
    }
    final Map<Windowed<String>, ValueAndTimestamp<Long>> result = supplier.theCapturedProcessor().lastValueAndTimestampPerKey();
    assertThat(result.size(), equalTo(3));
    assertThat(result.get(new Windowed<>("1", new SessionWindow(10L, 15L))), equalTo(ValueAndTimestamp.make(2L, 15L)));
    assertThat(result.get(new Windowed<>("2", new SessionWindow(599L, 600L))), equalTo(ValueAndTimestamp.make(2L, 600L)));
    assertThat(result.get(new Windowed<>("1", new SessionWindow(600L, 600L))), equalTo(ValueAndTimestamp.make(1L, 600L)));
}
Also used : Windowed(org.apache.kafka.streams.kstream.Windowed) ValueAndTimestamp(org.apache.kafka.streams.state.ValueAndTimestamp) MockApiProcessorSupplier(org.apache.kafka.test.MockApiProcessorSupplier) TopologyTestDriver(org.apache.kafka.streams.TopologyTestDriver)

Example 5 with ValueAndTimestamp

use of org.apache.kafka.streams.state.ValueAndTimestamp in project kafka by apache.

the class SubscriptionStoreReceiveProcessorSupplier method get.

@Override
public Processor<KO, SubscriptionWrapper<K>, CombinedKey<KO, K>, Change<ValueAndTimestamp<SubscriptionWrapper<K>>>> get() {
    return new ContextualProcessor<KO, SubscriptionWrapper<K>, CombinedKey<KO, K>, Change<ValueAndTimestamp<SubscriptionWrapper<K>>>>() {

        private TimestampedKeyValueStore<Bytes, SubscriptionWrapper<K>> store;

        private Sensor droppedRecordsSensor;

        @Override
        public void init(final ProcessorContext<CombinedKey<KO, K>, Change<ValueAndTimestamp<SubscriptionWrapper<K>>>> context) {
            super.init(context);
            final InternalProcessorContext<?, ?> internalProcessorContext = (InternalProcessorContext<?, ?>) context;
            droppedRecordsSensor = TaskMetrics.droppedRecordsSensor(Thread.currentThread().getName(), internalProcessorContext.taskId().toString(), internalProcessorContext.metrics());
            store = internalProcessorContext.getStateStore(storeBuilder);
            keySchema.init(context);
        }

        @Override
        public void process(final Record<KO, SubscriptionWrapper<K>> record) {
            if (record.key() == null) {
                if (context().recordMetadata().isPresent()) {
                    final RecordMetadata recordMetadata = context().recordMetadata().get();
                    LOG.warn("Skipping record due to null foreign key. " + "topic=[{}] partition=[{}] offset=[{}]", recordMetadata.topic(), recordMetadata.partition(), recordMetadata.offset());
                } else {
                    LOG.warn("Skipping record due to null foreign key. Topic, partition, and offset not known.");
                }
                droppedRecordsSensor.record();
                return;
            }
            if (record.value().getVersion() != SubscriptionWrapper.CURRENT_VERSION) {
                // from older SubscriptionWrapper versions to newer versions.
                throw new UnsupportedVersionException("SubscriptionWrapper is of an incompatible version.");
            }
            final Bytes subscriptionKey = keySchema.toBytes(record.key(), record.value().getPrimaryKey());
            final ValueAndTimestamp<SubscriptionWrapper<K>> newValue = ValueAndTimestamp.make(record.value(), record.timestamp());
            final ValueAndTimestamp<SubscriptionWrapper<K>> oldValue = store.get(subscriptionKey);
            // This store is used by the prefix scanner in ForeignJoinSubscriptionProcessorSupplier
            if (record.value().getInstruction().equals(SubscriptionWrapper.Instruction.DELETE_KEY_AND_PROPAGATE) || record.value().getInstruction().equals(SubscriptionWrapper.Instruction.DELETE_KEY_NO_PROPAGATE)) {
                store.delete(subscriptionKey);
            } else {
                store.put(subscriptionKey, newValue);
            }
            final Change<ValueAndTimestamp<SubscriptionWrapper<K>>> change = new Change<>(newValue, oldValue);
            // note: key is non-nullable
            // note: newValue is non-nullable
            context().forward(record.withKey(new CombinedKey<>(record.key(), record.value().getPrimaryKey())).withValue(change).withTimestamp(newValue.timestamp()));
        }
    };
}
Also used : InternalProcessorContext(org.apache.kafka.streams.processor.internals.InternalProcessorContext) Change(org.apache.kafka.streams.kstream.internals.Change) ProcessorContext(org.apache.kafka.streams.processor.api.ProcessorContext) InternalProcessorContext(org.apache.kafka.streams.processor.internals.InternalProcessorContext) ValueAndTimestamp(org.apache.kafka.streams.state.ValueAndTimestamp) RecordMetadata(org.apache.kafka.streams.processor.api.RecordMetadata) Bytes(org.apache.kafka.common.utils.Bytes) TimestampedKeyValueStore(org.apache.kafka.streams.state.TimestampedKeyValueStore) Record(org.apache.kafka.streams.processor.api.Record) ContextualProcessor(org.apache.kafka.streams.processor.api.ContextualProcessor) Sensor(org.apache.kafka.common.metrics.Sensor) UnsupportedVersionException(org.apache.kafka.common.errors.UnsupportedVersionException)

Aggregations

ValueAndTimestamp (org.apache.kafka.streams.state.ValueAndTimestamp)20 Test (org.junit.Test)15 TopologyTestDriver (org.apache.kafka.streams.TopologyTestDriver)14 Windowed (org.apache.kafka.streams.kstream.Windowed)12 StringSerializer (org.apache.kafka.common.serialization.StringSerializer)10 MockApiProcessorSupplier (org.apache.kafka.test.MockApiProcessorSupplier)9 StreamsBuilder (org.apache.kafka.streams.StreamsBuilder)8 HashMap (java.util.HashMap)6 Bytes (org.apache.kafka.common.utils.Bytes)4 WindowBytesStoreSupplier (org.apache.kafka.streams.state.WindowBytesStoreSupplier)4 List (java.util.List)3 WindowStore (org.apache.kafka.streams.state.WindowStore)3 InMemoryWindowBytesStoreSupplier (org.apache.kafka.streams.state.internals.InMemoryWindowBytesStoreSupplier)3 IntegrationTest (org.apache.kafka.test.IntegrationTest)3 LinkedList (java.util.LinkedList)2 Map (java.util.Map)2 KafkaMetric (org.apache.kafka.common.metrics.KafkaMetric)2 KeyValueStore (org.apache.kafka.streams.state.KeyValueStore)2 InMemoryWindowStore (org.apache.kafka.streams.state.internals.InMemoryWindowStore)2 Field (java.lang.reflect.Field)1