Search in sources :

Example 16 with Change

use of org.apache.kafka.streams.kstream.internals.Change in project kafka by apache.

the class TimeOrderedKeyValueBufferTest method shouldRespectEvictionPredicate.

@Test
public void shouldRespectEvictionPredicate() {
    final TimeOrderedKeyValueBuffer<String, String> buffer = bufferSupplier.apply(testName);
    final MockInternalProcessorContext context = makeContext();
    buffer.init((StateStoreContext) context, buffer);
    putRecord(buffer, context, 0L, 0L, "asdf", "eyt");
    putRecord(buffer, context, 1L, 0L, "zxcv", "rtg");
    assertThat(buffer.numRecords(), is(2));
    final List<Eviction<String, String>> evicted = new LinkedList<>();
    buffer.evictWhile(() -> buffer.numRecords() > 1, evicted::add);
    assertThat(buffer.numRecords(), is(1));
    assertThat(evicted, is(singletonList(new Eviction<>("asdf", new Change<>("eyt", null), getContext(0L)))));
    cleanup(context, buffer);
}
Also used : Eviction(org.apache.kafka.streams.state.internals.TimeOrderedKeyValueBuffer.Eviction) MockInternalProcessorContext(org.apache.kafka.test.MockInternalProcessorContext) Change(org.apache.kafka.streams.kstream.internals.Change) LinkedList(java.util.LinkedList) Test(org.junit.Test)

Example 17 with Change

use of org.apache.kafka.streams.kstream.internals.Change in project kafka by apache.

the class SubscriptionStoreReceiveProcessorSupplier method get.

@Override
public Processor<KO, SubscriptionWrapper<K>, CombinedKey<KO, K>, Change<ValueAndTimestamp<SubscriptionWrapper<K>>>> get() {
    return new ContextualProcessor<KO, SubscriptionWrapper<K>, CombinedKey<KO, K>, Change<ValueAndTimestamp<SubscriptionWrapper<K>>>>() {

        private TimestampedKeyValueStore<Bytes, SubscriptionWrapper<K>> store;

        private Sensor droppedRecordsSensor;

        @Override
        public void init(final ProcessorContext<CombinedKey<KO, K>, Change<ValueAndTimestamp<SubscriptionWrapper<K>>>> context) {
            super.init(context);
            final InternalProcessorContext<?, ?> internalProcessorContext = (InternalProcessorContext<?, ?>) context;
            droppedRecordsSensor = TaskMetrics.droppedRecordsSensor(Thread.currentThread().getName(), internalProcessorContext.taskId().toString(), internalProcessorContext.metrics());
            store = internalProcessorContext.getStateStore(storeBuilder);
            keySchema.init(context);
        }

        @Override
        public void process(final Record<KO, SubscriptionWrapper<K>> record) {
            if (record.key() == null) {
                if (context().recordMetadata().isPresent()) {
                    final RecordMetadata recordMetadata = context().recordMetadata().get();
                    LOG.warn("Skipping record due to null foreign key. " + "topic=[{}] partition=[{}] offset=[{}]", recordMetadata.topic(), recordMetadata.partition(), recordMetadata.offset());
                } else {
                    LOG.warn("Skipping record due to null foreign key. Topic, partition, and offset not known.");
                }
                droppedRecordsSensor.record();
                return;
            }
            if (record.value().getVersion() != SubscriptionWrapper.CURRENT_VERSION) {
                // from older SubscriptionWrapper versions to newer versions.
                throw new UnsupportedVersionException("SubscriptionWrapper is of an incompatible version.");
            }
            final Bytes subscriptionKey = keySchema.toBytes(record.key(), record.value().getPrimaryKey());
            final ValueAndTimestamp<SubscriptionWrapper<K>> newValue = ValueAndTimestamp.make(record.value(), record.timestamp());
            final ValueAndTimestamp<SubscriptionWrapper<K>> oldValue = store.get(subscriptionKey);
            // This store is used by the prefix scanner in ForeignJoinSubscriptionProcessorSupplier
            if (record.value().getInstruction().equals(SubscriptionWrapper.Instruction.DELETE_KEY_AND_PROPAGATE) || record.value().getInstruction().equals(SubscriptionWrapper.Instruction.DELETE_KEY_NO_PROPAGATE)) {
                store.delete(subscriptionKey);
            } else {
                store.put(subscriptionKey, newValue);
            }
            final Change<ValueAndTimestamp<SubscriptionWrapper<K>>> change = new Change<>(newValue, oldValue);
            // note: key is non-nullable
            // note: newValue is non-nullable
            context().forward(record.withKey(new CombinedKey<>(record.key(), record.value().getPrimaryKey())).withValue(change).withTimestamp(newValue.timestamp()));
        }
    };
}
Also used : InternalProcessorContext(org.apache.kafka.streams.processor.internals.InternalProcessorContext) Change(org.apache.kafka.streams.kstream.internals.Change) ProcessorContext(org.apache.kafka.streams.processor.api.ProcessorContext) InternalProcessorContext(org.apache.kafka.streams.processor.internals.InternalProcessorContext) ValueAndTimestamp(org.apache.kafka.streams.state.ValueAndTimestamp) RecordMetadata(org.apache.kafka.streams.processor.api.RecordMetadata) Bytes(org.apache.kafka.common.utils.Bytes) TimestampedKeyValueStore(org.apache.kafka.streams.state.TimestampedKeyValueStore) Record(org.apache.kafka.streams.processor.api.Record) ContextualProcessor(org.apache.kafka.streams.processor.api.ContextualProcessor) Sensor(org.apache.kafka.common.metrics.Sensor) UnsupportedVersionException(org.apache.kafka.common.errors.UnsupportedVersionException)

Example 18 with Change

use of org.apache.kafka.streams.kstream.internals.Change in project kafka by apache.

the class KTableSuppressProcessorMetricsTest method shouldRecordMetricsWithBuiltInMetricsVersionLatest.

@Test
public void shouldRecordMetricsWithBuiltInMetricsVersionLatest() {
    final String storeName = "test-store";
    final StateStore buffer = new InMemoryTimeOrderedKeyValueBuffer.Builder<>(storeName, Serdes.String(), Serdes.Long()).withLoggingDisabled().build();
    final KTableImpl<String, ?, Long> mock = EasyMock.mock(KTableImpl.class);
    final Processor<String, Change<Long>, String, Change<Long>> processor = new KTableSuppressProcessorSupplier<>((SuppressedInternal<String>) Suppressed.<String>untilTimeLimit(Duration.ofDays(100), maxRecords(1)), storeName, mock).get();
    streamsConfig.setProperty(StreamsConfig.BUILT_IN_METRICS_VERSION_CONFIG, StreamsConfig.METRICS_LATEST);
    final MockInternalNewProcessorContext<String, Change<Long>> context = new MockInternalNewProcessorContext<>(streamsConfig, TASK_ID, TestUtils.tempDirectory());
    final Time time = new SystemTime();
    context.setCurrentNode(new ProcessorNode("testNode"));
    context.setSystemTimeMs(time.milliseconds());
    buffer.init((StateStoreContext) context, buffer);
    processor.init(context);
    final long timestamp = 100L;
    context.setRecordMetadata("", 0, 0L);
    context.setTimestamp(timestamp);
    final String key = "longKey";
    final Change<Long> value = new Change<>(null, ARBITRARY_LONG);
    processor.process(new Record<>(key, value, timestamp));
    final MetricName evictionRateMetric = evictionRateMetricLatest;
    final MetricName evictionTotalMetric = evictionTotalMetricLatest;
    final MetricName bufferSizeAvgMetric = bufferSizeAvgMetricLatest;
    final MetricName bufferSizeMaxMetric = bufferSizeMaxMetricLatest;
    final MetricName bufferCountAvgMetric = bufferCountAvgMetricLatest;
    final MetricName bufferCountMaxMetric = bufferCountMaxMetricLatest;
    {
        final Map<MetricName, ? extends Metric> metrics = context.metrics().metrics();
        verifyMetric(metrics, evictionRateMetric, is(0.0));
        verifyMetric(metrics, evictionTotalMetric, is(0.0));
        verifyMetric(metrics, bufferSizeAvgMetric, is(21.5));
        verifyMetric(metrics, bufferSizeMaxMetric, is(43.0));
        verifyMetric(metrics, bufferCountAvgMetric, is(0.5));
        verifyMetric(metrics, bufferCountMaxMetric, is(1.0));
    }
    context.setRecordMetadata("", 0, 1L);
    context.setTimestamp(timestamp + 1);
    processor.process(new Record<>("key", value, timestamp + 1));
    {
        final Map<MetricName, ? extends Metric> metrics = context.metrics().metrics();
        verifyMetric(metrics, evictionRateMetric, greaterThan(0.0));
        verifyMetric(metrics, evictionTotalMetric, is(1.0));
        verifyMetric(metrics, bufferSizeAvgMetric, is(41.0));
        verifyMetric(metrics, bufferSizeMaxMetric, is(82.0));
        verifyMetric(metrics, bufferCountAvgMetric, is(1.0));
        verifyMetric(metrics, bufferCountMaxMetric, is(2.0));
    }
}
Also used : StateStore(org.apache.kafka.streams.processor.StateStore) Time(org.apache.kafka.common.utils.Time) SystemTime(org.apache.kafka.common.utils.SystemTime) Change(org.apache.kafka.streams.kstream.internals.Change) MetricName(org.apache.kafka.common.MetricName) ProcessorNode(org.apache.kafka.streams.processor.internals.ProcessorNode) MockInternalNewProcessorContext(org.apache.kafka.test.MockInternalNewProcessorContext) Metric(org.apache.kafka.common.Metric) Utils.mkMap(org.apache.kafka.common.utils.Utils.mkMap) Map(java.util.Map) SystemTime(org.apache.kafka.common.utils.SystemTime) Test(org.junit.Test)

Example 19 with Change

use of org.apache.kafka.streams.kstream.internals.Change in project kafka by apache.

the class KTableSuppressProcessorTest method suppressShouldShutDownWhenOverByteCapacity.

@Test
public void suppressShouldShutDownWhenOverByteCapacity() {
    final Harness<String, Long> harness = new Harness<>(untilTimeLimit(Duration.ofDays(100), maxBytes(60L).shutDownWhenFull()), String(), Long());
    final MockInternalNewProcessorContext<String, Change<Long>> context = harness.context;
    final long timestamp = 100L;
    context.setRecordMetadata("", 0, 0L);
    context.setTimestamp(timestamp);
    context.setCurrentNode(new ProcessorNode("testNode"));
    final String key = "hey";
    final Change<Long> value = new Change<>(null, ARBITRARY_LONG);
    harness.processor.process(new Record<>(key, value, timestamp));
    context.setRecordMetadata("", 0, 1L);
    context.setTimestamp(1L);
    try {
        harness.processor.process(new Record<>("dummyKey", value, timestamp));
        fail("expected an exception");
    } catch (final StreamsException e) {
        assertThat(e.getMessage(), containsString("buffer exceeded its max capacity"));
    }
}
Also used : ProcessorNode(org.apache.kafka.streams.processor.internals.ProcessorNode) StreamsException(org.apache.kafka.streams.errors.StreamsException) Long(org.apache.kafka.common.serialization.Serdes.Long) String(org.apache.kafka.common.serialization.Serdes.String) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) Change(org.apache.kafka.streams.kstream.internals.Change) Test(org.junit.Test)

Example 20 with Change

use of org.apache.kafka.streams.kstream.internals.Change in project kafka by apache.

the class KTableSuppressProcessorTest method finalResultsShouldDropTombstonesForSessionWindows.

/**
 * It's desirable to drop tombstones for final-results windowed streams, since (as described in the
 * {@link SuppressedInternal} javadoc), they are unnecessary to emit.
 */
@Test
public void finalResultsShouldDropTombstonesForSessionWindows() {
    final Harness<Windowed<String>, Long> harness = new Harness<>(finalResults(ofMillis(0L)), sessionWindowedSerdeFrom(String.class), Long());
    final MockInternalNewProcessorContext<Windowed<String>, Change<Long>> context = harness.context;
    final long timestamp = 100L;
    context.setRecordMetadata("", 0, 0L);
    context.setTimestamp(timestamp);
    final Windowed<String> key = new Windowed<>("hey", new SessionWindow(0L, 0L));
    final Change<Long> value = new Change<>(null, ARBITRARY_LONG);
    harness.processor.process(new Record<>(key, value, timestamp));
    assertThat(context.forwarded(), hasSize(0));
}
Also used : Windowed(org.apache.kafka.streams.kstream.Windowed) Long(org.apache.kafka.common.serialization.Serdes.Long) String(org.apache.kafka.common.serialization.Serdes.String) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) Change(org.apache.kafka.streams.kstream.internals.Change) SessionWindow(org.apache.kafka.streams.kstream.internals.SessionWindow) Test(org.junit.Test)

Aggregations

Change (org.apache.kafka.streams.kstream.internals.Change)28 Test (org.junit.Test)23 Long (org.apache.kafka.common.serialization.Serdes.Long)15 String (org.apache.kafka.common.serialization.Serdes.String)15 CoreMatchers.containsString (org.hamcrest.CoreMatchers.containsString)15 Record (org.apache.kafka.streams.processor.api.Record)12 MockProcessorContext (org.apache.kafka.streams.processor.api.MockProcessorContext)11 ProcessorRecordContext (org.apache.kafka.streams.processor.internals.ProcessorRecordContext)10 Windowed (org.apache.kafka.streams.kstream.Windowed)8 MockInternalProcessorContext (org.apache.kafka.test.MockInternalProcessorContext)7 LinkedList (java.util.LinkedList)6 RecordHeaders (org.apache.kafka.common.header.internals.RecordHeaders)6 TimeWindow (org.apache.kafka.streams.kstream.internals.TimeWindow)6 Eviction (org.apache.kafka.streams.state.internals.TimeOrderedKeyValueBuffer.Eviction)6 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)5 RecordBatchingStateRestoreCallback (org.apache.kafka.streams.processor.internals.RecordBatchingStateRestoreCallback)5 RecordHeader (org.apache.kafka.common.header.internals.RecordHeader)4 Bytes (org.apache.kafka.common.utils.Bytes)3 ProcessorNode (org.apache.kafka.streams.processor.internals.ProcessorNode)3 StreamsException (org.apache.kafka.streams.errors.StreamsException)2