Search in sources :

Example 11 with Record

use of org.apache.kafka.streams.processor.api.Record in project kafka by apache.

the class KTableSuppressProcessorTest method suppressShouldNotDropTombstonesForKTable.

/**
 * It's SUPER NOT OK to drop tombstones for non-windowed streams, since we may have emitted some results for
 * the key before getting the tombstone (see the {@link SuppressedInternal} javadoc).
 */
@Test
public void suppressShouldNotDropTombstonesForKTable() {
    final Harness<String, Long> harness = new Harness<>(untilTimeLimit(ofMillis(0), maxRecords(0)), String(), Long());
    final MockInternalNewProcessorContext<String, Change<Long>> context = harness.context;
    final long timestamp = 100L;
    context.setRecordMetadata("", 0, 0L);
    context.setTimestamp(timestamp);
    final String key = "hey";
    final Change<Long> value = new Change<>(null, ARBITRARY_LONG);
    harness.processor.process(new Record<>(key, value, timestamp));
    assertThat(context.forwarded(), hasSize(1));
    final MockProcessorContext.CapturedForward capturedForward = context.forwarded().get(0);
    assertThat(capturedForward.record(), is(new Record<>(key, value, timestamp)));
}
Also used : Long(org.apache.kafka.common.serialization.Serdes.Long) Record(org.apache.kafka.streams.processor.api.Record) String(org.apache.kafka.common.serialization.Serdes.String) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) Change(org.apache.kafka.streams.kstream.internals.Change) MockProcessorContext(org.apache.kafka.streams.processor.api.MockProcessorContext) Test(org.junit.Test)

Example 12 with Record

use of org.apache.kafka.streams.processor.api.Record in project kafka by apache.

the class GraphGraceSearchUtilTest method shouldExtractGraceFromSessionAncestorThroughStatefulParent.

@Test
public void shouldExtractGraceFromSessionAncestorThroughStatefulParent() {
    final SessionWindows windows = SessionWindows.ofInactivityGapAndGrace(ofMillis(10L), ofMillis(1234L));
    final StatefulProcessorNode<String, Long> graceGrandparent = new StatefulProcessorNode<>("asdf", new ProcessorParameters<>(new KStreamSessionWindowAggregate<String, Long, Integer>(windows, "asdf", null, null, null), "asdf"), (StoreBuilder<?>) null);
    final StatefulProcessorNode<String, Long> statefulParent = new StatefulProcessorNode<>("stateful", new ProcessorParameters<>(() -> new Processor<String, Long, String, Long>() {

        @Override
        public void init(final ProcessorContext<String, Long> context) {
        }

        @Override
        public void process(final Record<String, Long> record) {
        }

        @Override
        public void close() {
        }
    }, "dummy"), (StoreBuilder<?>) null);
    graceGrandparent.addChild(statefulParent);
    final ProcessorGraphNode<String, Long> node = new ProcessorGraphNode<>("stateless", null);
    statefulParent.addChild(node);
    final long extracted = GraphGraceSearchUtil.findAndVerifyWindowGrace(node);
    assertThat(extracted, is(windows.gracePeriodMs() + windows.inactivityGap()));
}
Also used : Processor(org.apache.kafka.streams.processor.api.Processor) ProcessorContext(org.apache.kafka.streams.processor.api.ProcessorContext) SessionWindows(org.apache.kafka.streams.kstream.SessionWindows) Record(org.apache.kafka.streams.processor.api.Record) KStreamSessionWindowAggregate(org.apache.kafka.streams.kstream.internals.KStreamSessionWindowAggregate) Test(org.junit.Test)

Example 13 with Record

use of org.apache.kafka.streams.processor.api.Record in project kafka by apache.

the class WindowedWordCountProcessorTest method shouldWorkWithPersistentStore.

@Test
public void shouldWorkWithPersistentStore() throws IOException {
    final File stateDir = TestUtils.tempDirectory();
    try {
        final MockProcessorContext<String, String> context = new MockProcessorContext<>(new Properties(), new TaskId(0, 0), stateDir);
        // Create, initialize, and register the state store.
        final WindowStore<String, Integer> store = Stores.windowStoreBuilder(Stores.persistentWindowStore("WindowedCounts", Duration.ofDays(24), Duration.ofMillis(100), false), Serdes.String(), Serdes.Integer()).withLoggingDisabled().withCachingDisabled().build();
        store.init(context.getStateStoreContext(), store);
        context.getStateStoreContext().register(store, null);
        // Create and initialize the processor under test
        final Processor<String, String, String, String> processor = new WindowedWordCountProcessorSupplier().get();
        processor.init(context);
        // send a record to the processor
        processor.process(new Record<>("key", "alpha beta gamma alpha", 101L));
        // send a record to the processor in a new window
        processor.process(new Record<>("key", "gamma delta", 221L));
        // note that the processor does not forward during process()
        assertThat(context.forwarded().isEmpty(), is(true));
        // now, we trigger the punctuator, which iterates over the state store and forwards the contents.
        context.scheduledPunctuators().get(0).getPunctuator().punctuate(1_000L);
        // finally, we can verify the output.
        final List<CapturedForward<? extends String, ? extends String>> capturedForwards = context.forwarded();
        final List<CapturedForward<? extends String, ? extends String>> expected = asList(new CapturedForward<>(new Record<>("[alpha@100/200]", "2", 1_000L)), new CapturedForward<>(new Record<>("[beta@100/200]", "1", 1_000L)), new CapturedForward<>(new Record<>("[delta@200/300]", "1", 1_000L)), new CapturedForward<>(new Record<>("[gamma@100/200]", "1", 1_000L)), new CapturedForward<>(new Record<>("[gamma@200/300]", "1", 1_000L)));
        assertThat(capturedForwards, is(expected));
        store.close();
    } finally {
        Utils.delete(stateDir);
    }
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) Properties(java.util.Properties) MockProcessorContext(org.apache.kafka.streams.processor.api.MockProcessorContext) CapturedForward(org.apache.kafka.streams.processor.api.MockProcessorContext.CapturedForward) Record(org.apache.kafka.streams.processor.api.Record) File(java.io.File) Test(org.junit.jupiter.api.Test)

Example 14 with Record

use of org.apache.kafka.streams.processor.api.Record in project kafka by apache.

the class SubscriptionStoreReceiveProcessorSupplier method get.

@Override
public Processor<KO, SubscriptionWrapper<K>, CombinedKey<KO, K>, Change<ValueAndTimestamp<SubscriptionWrapper<K>>>> get() {
    return new ContextualProcessor<KO, SubscriptionWrapper<K>, CombinedKey<KO, K>, Change<ValueAndTimestamp<SubscriptionWrapper<K>>>>() {

        private TimestampedKeyValueStore<Bytes, SubscriptionWrapper<K>> store;

        private Sensor droppedRecordsSensor;

        @Override
        public void init(final ProcessorContext<CombinedKey<KO, K>, Change<ValueAndTimestamp<SubscriptionWrapper<K>>>> context) {
            super.init(context);
            final InternalProcessorContext<?, ?> internalProcessorContext = (InternalProcessorContext<?, ?>) context;
            droppedRecordsSensor = TaskMetrics.droppedRecordsSensor(Thread.currentThread().getName(), internalProcessorContext.taskId().toString(), internalProcessorContext.metrics());
            store = internalProcessorContext.getStateStore(storeBuilder);
            keySchema.init(context);
        }

        @Override
        public void process(final Record<KO, SubscriptionWrapper<K>> record) {
            if (record.key() == null) {
                if (context().recordMetadata().isPresent()) {
                    final RecordMetadata recordMetadata = context().recordMetadata().get();
                    LOG.warn("Skipping record due to null foreign key. " + "topic=[{}] partition=[{}] offset=[{}]", recordMetadata.topic(), recordMetadata.partition(), recordMetadata.offset());
                } else {
                    LOG.warn("Skipping record due to null foreign key. Topic, partition, and offset not known.");
                }
                droppedRecordsSensor.record();
                return;
            }
            if (record.value().getVersion() != SubscriptionWrapper.CURRENT_VERSION) {
                // from older SubscriptionWrapper versions to newer versions.
                throw new UnsupportedVersionException("SubscriptionWrapper is of an incompatible version.");
            }
            final Bytes subscriptionKey = keySchema.toBytes(record.key(), record.value().getPrimaryKey());
            final ValueAndTimestamp<SubscriptionWrapper<K>> newValue = ValueAndTimestamp.make(record.value(), record.timestamp());
            final ValueAndTimestamp<SubscriptionWrapper<K>> oldValue = store.get(subscriptionKey);
            // This store is used by the prefix scanner in ForeignJoinSubscriptionProcessorSupplier
            if (record.value().getInstruction().equals(SubscriptionWrapper.Instruction.DELETE_KEY_AND_PROPAGATE) || record.value().getInstruction().equals(SubscriptionWrapper.Instruction.DELETE_KEY_NO_PROPAGATE)) {
                store.delete(subscriptionKey);
            } else {
                store.put(subscriptionKey, newValue);
            }
            final Change<ValueAndTimestamp<SubscriptionWrapper<K>>> change = new Change<>(newValue, oldValue);
            // note: key is non-nullable
            // note: newValue is non-nullable
            context().forward(record.withKey(new CombinedKey<>(record.key(), record.value().getPrimaryKey())).withValue(change).withTimestamp(newValue.timestamp()));
        }
    };
}
Also used : InternalProcessorContext(org.apache.kafka.streams.processor.internals.InternalProcessorContext) Change(org.apache.kafka.streams.kstream.internals.Change) ProcessorContext(org.apache.kafka.streams.processor.api.ProcessorContext) InternalProcessorContext(org.apache.kafka.streams.processor.internals.InternalProcessorContext) ValueAndTimestamp(org.apache.kafka.streams.state.ValueAndTimestamp) RecordMetadata(org.apache.kafka.streams.processor.api.RecordMetadata) Bytes(org.apache.kafka.common.utils.Bytes) TimestampedKeyValueStore(org.apache.kafka.streams.state.TimestampedKeyValueStore) Record(org.apache.kafka.streams.processor.api.Record) ContextualProcessor(org.apache.kafka.streams.processor.api.ContextualProcessor) Sensor(org.apache.kafka.common.metrics.Sensor) UnsupportedVersionException(org.apache.kafka.common.errors.UnsupportedVersionException)

Example 15 with Record

use of org.apache.kafka.streams.processor.api.Record in project kafka by apache.

the class KTableKTableRightJoinTest method shouldLogAndMeterSkippedRecordsDueToNullLeftKeyWithBuiltInMetricsVersionLatest.

@Test
public void shouldLogAndMeterSkippedRecordsDueToNullLeftKeyWithBuiltInMetricsVersionLatest() {
    final StreamsBuilder builder = new StreamsBuilder();
    @SuppressWarnings("unchecked") final Processor<String, Change<String>, String, Change<Object>> join = new KTableKTableRightJoin<>((KTableImpl<String, String, String>) builder.table("left", Consumed.with(Serdes.String(), Serdes.String())), (KTableImpl<String, String, String>) builder.table("right", Consumed.with(Serdes.String(), Serdes.String())), null).get();
    props.setProperty(StreamsConfig.BUILT_IN_METRICS_VERSION_CONFIG, StreamsConfig.METRICS_LATEST);
    final MockProcessorContext<String, Change<Object>> context = new MockProcessorContext<>(props);
    context.setRecordMetadata("left", -1, -2);
    join.init(context);
    try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KTableKTableRightJoin.class)) {
        join.process(new Record<>(null, new Change<>("new", "old"), 0));
        assertThat(appender.getEvents().stream().filter(e -> e.getLevel().equals("WARN")).map(Event::getMessage).collect(Collectors.toList()), hasItem("Skipping record due to null key. topic=[left] partition=[-1] offset=[-2]"));
    }
}
Also used : StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) StreamsConfig(org.apache.kafka.streams.StreamsConfig) CoreMatchers.hasItem(org.hamcrest.CoreMatchers.hasItem) Event(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender.Event) Properties(java.util.Properties) Consumed(org.apache.kafka.streams.kstream.Consumed) Test(org.junit.Test) MockProcessorContext(org.apache.kafka.streams.processor.api.MockProcessorContext) Collectors(java.util.stream.Collectors) LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) Serdes(org.apache.kafka.common.serialization.Serdes) Record(org.apache.kafka.streams.processor.api.Record) Processor(org.apache.kafka.streams.processor.api.Processor) StreamsTestUtils(org.apache.kafka.test.StreamsTestUtils) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) MockProcessorContext(org.apache.kafka.streams.processor.api.MockProcessorContext) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) Event(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender.Event) Test(org.junit.Test)

Aggregations

Record (org.apache.kafka.streams.processor.api.Record)24 Test (org.junit.Test)18 MockProcessorContext (org.apache.kafka.streams.processor.api.MockProcessorContext)16 Change (org.apache.kafka.streams.kstream.internals.Change)12 CoreMatchers.containsString (org.hamcrest.CoreMatchers.containsString)12 Long (org.apache.kafka.common.serialization.Serdes.Long)11 String (org.apache.kafka.common.serialization.Serdes.String)11 Windowed (org.apache.kafka.streams.kstream.Windowed)7 Processor (org.apache.kafka.streams.processor.api.Processor)6 TimeWindow (org.apache.kafka.streams.kstream.internals.TimeWindow)5 Metrics (org.apache.kafka.common.metrics.Metrics)4 MockTime (org.apache.kafka.common.utils.MockTime)4 ProcessorContext (org.apache.kafka.streams.processor.api.ProcessorContext)4 List (java.util.List)3 Properties (java.util.Properties)3 Serdes (org.apache.kafka.common.serialization.Serdes)3 StreamsConfig (org.apache.kafka.streams.StreamsConfig)3 Test (org.junit.jupiter.api.Test)3 File (java.io.File)2 Collectors (java.util.stream.Collectors)2