Search in sources :

Example 1 with Record

use of org.apache.kafka.streams.processor.api.Record in project kafka by apache.

the class KStreamSessionWindowAggregateProcessorTest method shouldLogAndMeterWhenSkippingNullKeyWithBuiltInMetrics.

@Test
public void shouldLogAndMeterWhenSkippingNullKeyWithBuiltInMetrics() {
    setup(false);
    context.setRecordContext(new ProcessorRecordContext(-1, -2, -3, "topic", new RecordHeaders()));
    try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KStreamSessionWindowAggregate.class)) {
        processor.process(new Record<>(null, "1", 0L));
        assertThat(appender.getEvents().stream().filter(e -> e.getLevel().equals("WARN")).map(Event::getMessage).collect(Collectors.toList()), hasItem("Skipping record due to null key. topic=[topic] partition=[-3] offset=[-2]"));
    }
    assertEquals(1.0, getMetricByName(context.metrics().metrics(), "dropped-records-total", "stream-task-metrics").metricValue());
}
Also used : CoreMatchers.is(org.hamcrest.CoreMatchers.is) MockTime(org.apache.kafka.common.utils.MockTime) Arrays(java.util.Arrays) CoreMatchers.hasItem(org.hamcrest.CoreMatchers.hasItem) TaskMetrics(org.apache.kafka.streams.processor.internals.metrics.TaskMetrics) Stores(org.apache.kafka.streams.state.Stores) Utils.mkMap(org.apache.kafka.common.utils.Utils.mkMap) LogContext(org.apache.kafka.common.utils.LogContext) After(org.junit.After) MetricName(org.apache.kafka.common.MetricName) Serdes(org.apache.kafka.common.serialization.Serdes) StreamsMetricsImpl(org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl) Aggregator(org.apache.kafka.streams.kstream.Aggregator) Event(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender.Event) Time(org.apache.kafka.common.utils.Time) TestUtils(org.apache.kafka.test.TestUtils) ThreadCache(org.apache.kafka.streams.state.internals.ThreadCache) KeyValue(org.apache.kafka.streams.KeyValue) Collectors(java.util.stream.Collectors) List(java.util.List) Metrics(org.apache.kafka.common.metrics.Metrics) Utils.mkEntry(org.apache.kafka.common.utils.Utils.mkEntry) Assert.assertFalse(org.junit.Assert.assertFalse) Matchers.greaterThan(org.hamcrest.Matchers.greaterThan) Duration.ofMillis(java.time.Duration.ofMillis) StreamsConfig(org.apache.kafka.streams.StreamsConfig) SessionWindows(org.apache.kafka.streams.kstream.SessionWindows) ProcessorRecordContext(org.apache.kafka.streams.processor.internals.ProcessorRecordContext) ArrayList(java.util.ArrayList) Initializer(org.apache.kafka.streams.kstream.Initializer) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) StateStoreContext(org.apache.kafka.streams.processor.StateStoreContext) Windowed(org.apache.kafka.streams.kstream.Windowed) Record(org.apache.kafka.streams.processor.api.Record) Processor(org.apache.kafka.streams.processor.api.Processor) SessionStore(org.apache.kafka.streams.state.SessionStore) MockRecordCollector(org.apache.kafka.test.MockRecordCollector) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) StreamsTestUtils.getMetricByName(org.apache.kafka.test.StreamsTestUtils.getMetricByName) Before(org.junit.Before) InternalMockProcessorContext(org.apache.kafka.test.InternalMockProcessorContext) Merger(org.apache.kafka.streams.kstream.Merger) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) StoreBuilder(org.apache.kafka.streams.state.StoreBuilder) KeyValueTimestamp(org.apache.kafka.streams.KeyValueTimestamp) KeyValueIterator(org.apache.kafka.streams.state.KeyValueIterator) LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) StreamsTestUtils(org.apache.kafka.test.StreamsTestUtils) Assert.assertEquals(org.junit.Assert.assertEquals) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) ProcessorRecordContext(org.apache.kafka.streams.processor.internals.ProcessorRecordContext) LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) Event(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender.Event) Test(org.junit.Test)

Example 2 with Record

use of org.apache.kafka.streams.processor.api.Record in project kafka by apache.

the class StreamTaskTest method shouldRecordE2ELatencyOnSourceNodeAndTerminalNodes.

@Test
public void shouldRecordE2ELatencyOnSourceNodeAndTerminalNodes() {
    time = new MockTime(0L, 0L, 0L);
    metrics = new Metrics(new MetricConfig().recordLevel(Sensor.RecordingLevel.INFO), time);
    // Create a processor that only forwards even keys to test the metrics at the source and terminal nodes
    final MockSourceNode<Integer, Integer> evenKeyForwardingSourceNode = new MockSourceNode<Integer, Integer>(intDeserializer, intDeserializer) {

        InternalProcessorContext<Integer, Integer> context;

        @Override
        public void init(final InternalProcessorContext<Integer, Integer> context) {
            this.context = context;
            super.init(context);
        }

        @Override
        public void process(final Record<Integer, Integer> record) {
            if (record.key() % 2 == 0) {
                context.forward(record);
            }
        }
    };
    task = createStatelessTaskWithForwardingTopology(evenKeyForwardingSourceNode);
    task.initializeIfNeeded();
    task.completeRestoration(noOpResetter -> {
    });
    final String sourceNodeName = evenKeyForwardingSourceNode.name();
    final String terminalNodeName = processorStreamTime.name();
    final Metric sourceAvg = getProcessorMetric("record-e2e-latency", "%s-avg", task.id().toString(), sourceNodeName, StreamsConfig.METRICS_LATEST);
    final Metric sourceMin = getProcessorMetric("record-e2e-latency", "%s-min", task.id().toString(), sourceNodeName, StreamsConfig.METRICS_LATEST);
    final Metric sourceMax = getProcessorMetric("record-e2e-latency", "%s-max", task.id().toString(), sourceNodeName, StreamsConfig.METRICS_LATEST);
    final Metric terminalAvg = getProcessorMetric("record-e2e-latency", "%s-avg", task.id().toString(), terminalNodeName, StreamsConfig.METRICS_LATEST);
    final Metric terminalMin = getProcessorMetric("record-e2e-latency", "%s-min", task.id().toString(), terminalNodeName, StreamsConfig.METRICS_LATEST);
    final Metric terminalMax = getProcessorMetric("record-e2e-latency", "%s-max", task.id().toString(), terminalNodeName, StreamsConfig.METRICS_LATEST);
    // e2e latency = 10
    task.addRecords(partition1, singletonList(getConsumerRecordWithOffsetAsTimestamp(0, 0L)));
    task.process(10L);
    assertThat(sourceAvg.metricValue(), equalTo(10.0));
    assertThat(sourceMin.metricValue(), equalTo(10.0));
    assertThat(sourceMax.metricValue(), equalTo(10.0));
    // key 0: reaches terminal node
    assertThat(terminalAvg.metricValue(), equalTo(10.0));
    assertThat(terminalMin.metricValue(), equalTo(10.0));
    assertThat(terminalMax.metricValue(), equalTo(10.0));
    // e2e latency = 15
    task.addRecords(partition1, singletonList(getConsumerRecordWithOffsetAsTimestamp(1, 0L)));
    task.process(15L);
    assertThat(sourceAvg.metricValue(), equalTo(12.5));
    assertThat(sourceMin.metricValue(), equalTo(10.0));
    assertThat(sourceMax.metricValue(), equalTo(15.0));
    // key 1: stops at source, doesn't affect terminal node metrics
    assertThat(terminalAvg.metricValue(), equalTo(10.0));
    assertThat(terminalMin.metricValue(), equalTo(10.0));
    assertThat(terminalMax.metricValue(), equalTo(10.0));
    // e2e latency = 23
    task.addRecords(partition1, singletonList(getConsumerRecordWithOffsetAsTimestamp(2, 0L)));
    task.process(23L);
    assertThat(sourceAvg.metricValue(), equalTo(16.0));
    assertThat(sourceMin.metricValue(), equalTo(10.0));
    assertThat(sourceMax.metricValue(), equalTo(23.0));
    // key 2: reaches terminal node
    assertThat(terminalAvg.metricValue(), equalTo(16.5));
    assertThat(terminalMin.metricValue(), equalTo(10.0));
    assertThat(terminalMax.metricValue(), equalTo(23.0));
    // e2e latency = 5
    task.addRecords(partition1, singletonList(getConsumerRecordWithOffsetAsTimestamp(3, 0L)));
    task.process(5L);
    assertThat(sourceAvg.metricValue(), equalTo(13.25));
    assertThat(sourceMin.metricValue(), equalTo(5.0));
    assertThat(sourceMax.metricValue(), equalTo(23.0));
    // key 3: stops at source, doesn't affect terminal node metrics
    assertThat(terminalAvg.metricValue(), equalTo(16.5));
    assertThat(terminalMin.metricValue(), equalTo(10.0));
    assertThat(terminalMax.metricValue(), equalTo(23.0));
}
Also used : MetricConfig(org.apache.kafka.common.metrics.MetricConfig) Metrics(org.apache.kafka.common.metrics.Metrics) MockSourceNode(org.apache.kafka.test.MockSourceNode) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Record(org.apache.kafka.streams.processor.api.Record) Metric(org.apache.kafka.common.Metric) KafkaMetric(org.apache.kafka.common.metrics.KafkaMetric) MockTime(org.apache.kafka.common.utils.MockTime) Test(org.junit.Test)

Example 3 with Record

use of org.apache.kafka.streams.processor.api.Record in project kafka by apache.

the class GlobalStreamThreadTest method before.

@Before
public void before() {
    final MaterializedInternal<Object, Object, KeyValueStore<Bytes, byte[]>> materialized = new MaterializedInternal<>(Materialized.with(null, null), new InternalNameProvider() {

        @Override
        public String newProcessorName(final String prefix) {
            return "processorName";
        }

        @Override
        public String newStoreName(final String prefix) {
            return GLOBAL_STORE_NAME;
        }
    }, "store-");
    final ProcessorSupplier<Object, Object, Void, Void> processorSupplier = () -> new ContextualProcessor<Object, Object, Void, Void>() {

        @Override
        public void process(final Record<Object, Object> record) {
        }
    };
    builder.addGlobalStore(new TimestampedKeyValueStoreMaterializer<>(materialized).materialize().withLoggingDisabled(), "sourceName", null, null, null, GLOBAL_STORE_TOPIC_NAME, "processorName", processorSupplier);
    baseDirectoryName = TestUtils.tempDirectory().getAbsolutePath();
    final HashMap<String, Object> properties = new HashMap<>();
    properties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "blah");
    properties.put(StreamsConfig.APPLICATION_ID_CONFIG, "testAppId");
    properties.put(StreamsConfig.STATE_DIR_CONFIG, baseDirectoryName);
    properties.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.ByteArraySerde.class.getName());
    properties.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.ByteArraySerde.class.getName());
    config = new StreamsConfig(properties);
    globalStreamThread = new GlobalStreamThread(builder.rewriteTopology(config).buildGlobalStateTopology(), config, mockConsumer, new StateDirectory(config, time, true, false), 0, new StreamsMetricsImpl(new Metrics(), "test-client", StreamsConfig.METRICS_LATEST, time), time, "clientId", stateRestoreListener, e -> {
    });
}
Also used : CoreMatchers.is(org.hamcrest.CoreMatchers.is) StreamsConfig(org.apache.kafka.streams.StreamsConfig) MockTime(org.apache.kafka.common.utils.MockTime) InternalNameProvider(org.apache.kafka.streams.kstream.internals.InternalNameProvider) IsInstanceOf.instanceOf(org.hamcrest.core.IsInstanceOf.instanceOf) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) CoreMatchers.equalTo(org.hamcrest.CoreMatchers.equalTo) HashMap(java.util.HashMap) StreamsException(org.apache.kafka.streams.errors.StreamsException) OffsetResetStrategy(org.apache.kafka.clients.consumer.OffsetResetStrategy) DEAD(org.apache.kafka.streams.processor.internals.GlobalStreamThread.State.DEAD) ContextualProcessor(org.apache.kafka.streams.processor.api.ContextualProcessor) ProcessorSupplier(org.apache.kafka.streams.processor.api.ProcessorSupplier) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) Serdes(org.apache.kafka.common.serialization.Serdes) Record(org.apache.kafka.streams.processor.api.Record) StreamsMetricsImpl(org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl) Assert.fail(org.junit.Assert.fail) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) Before(org.junit.Before) TopicPartition(org.apache.kafka.common.TopicPartition) RUNNING(org.apache.kafka.streams.processor.internals.GlobalStreamThread.State.RUNNING) MockStateRestoreListener(org.apache.kafka.test.MockStateRestoreListener) TestUtils(org.apache.kafka.test.TestUtils) Set(java.util.Set) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) PartitionInfo(org.apache.kafka.common.PartitionInfo) MaterializedInternal(org.apache.kafka.streams.kstream.internals.MaterializedInternal) File(java.io.File) Bytes(org.apache.kafka.common.utils.Bytes) ConsumerRecordUtil.record(org.apache.kafka.streams.processor.internals.testutil.ConsumerRecordUtil.record) List(java.util.List) Metrics(org.apache.kafka.common.metrics.Metrics) TimestampedKeyValueStoreMaterializer(org.apache.kafka.streams.kstream.internals.TimestampedKeyValueStoreMaterializer) StateStore(org.apache.kafka.streams.processor.StateStore) Assert.assertFalse(org.junit.Assert.assertFalse) Materialized(org.apache.kafka.streams.kstream.Materialized) InvalidOffsetException(org.apache.kafka.clients.consumer.InvalidOffsetException) Node(org.apache.kafka.common.Node) Collections(java.util.Collections) Assert.assertEquals(org.junit.Assert.assertEquals) HashMap(java.util.HashMap) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) InternalNameProvider(org.apache.kafka.streams.kstream.internals.InternalNameProvider) Metrics(org.apache.kafka.common.metrics.Metrics) Record(org.apache.kafka.streams.processor.api.Record) ContextualProcessor(org.apache.kafka.streams.processor.api.ContextualProcessor) MaterializedInternal(org.apache.kafka.streams.kstream.internals.MaterializedInternal) StreamsMetricsImpl(org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Before(org.junit.Before)

Example 4 with Record

use of org.apache.kafka.streams.processor.api.Record in project kafka by apache.

the class KTableSuppressProcessorTest method finalResultsSuppressionShouldBufferAndEmitAtGraceExpiration.

@Test
public void finalResultsSuppressionShouldBufferAndEmitAtGraceExpiration() {
    final Harness<Windowed<String>, Long> harness = new Harness<>(finalResults(ofMillis(1L)), timeWindowedSerdeFrom(String.class, 1L), Long());
    final MockInternalNewProcessorContext<Windowed<String>, Change<Long>> context = harness.context;
    final long windowStart = 99L;
    final long recordTime = 99L;
    final long windowEnd = 100L;
    context.setRecordMetadata("topic", 0, 0);
    context.setTimestamp(recordTime);
    final Windowed<String> key = new Windowed<>("hey", new TimeWindow(windowStart, windowEnd));
    final Change<Long> value = ARBITRARY_CHANGE;
    harness.processor.process(new Record<>(key, value, recordTime));
    assertThat(context.forwarded(), hasSize(0));
    // although the stream time is now 100, we have to wait 1 ms after the window *end* before we
    // emit "hey", so we don't emit yet.
    final long windowStart2 = 100L;
    final long recordTime2 = 100L;
    final long windowEnd2 = 101L;
    context.setRecordMetadata("topic", 0, 1);
    context.setTimestamp(recordTime2);
    harness.processor.process(new Record<>(new Windowed<>("dummyKey1", new TimeWindow(windowStart2, windowEnd2)), ARBITRARY_CHANGE, recordTime2));
    assertThat(context.forwarded(), hasSize(0));
    // ok, now it's time to emit "hey"
    final long windowStart3 = 101L;
    final long recordTime3 = 101L;
    final long windowEnd3 = 102L;
    context.setRecordMetadata("topic", 0, 1);
    context.setTimestamp(recordTime3);
    harness.processor.process(new Record<>(new Windowed<>("dummyKey2", new TimeWindow(windowStart3, windowEnd3)), ARBITRARY_CHANGE, recordTime3));
    assertThat(context.forwarded(), hasSize(1));
    final MockProcessorContext.CapturedForward capturedForward = context.forwarded().get(0);
    assertThat(capturedForward.record(), is(new Record<>(key, value, recordTime)));
}
Also used : String(org.apache.kafka.common.serialization.Serdes.String) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) Change(org.apache.kafka.streams.kstream.internals.Change) TimeWindow(org.apache.kafka.streams.kstream.internals.TimeWindow) MockProcessorContext(org.apache.kafka.streams.processor.api.MockProcessorContext) Windowed(org.apache.kafka.streams.kstream.Windowed) Long(org.apache.kafka.common.serialization.Serdes.Long) Record(org.apache.kafka.streams.processor.api.Record) Test(org.junit.Test)

Example 5 with Record

use of org.apache.kafka.streams.processor.api.Record in project kafka by apache.

the class KTableSuppressProcessorTest method windowedZeroTimeLimitShouldImmediatelyEmit.

@Test
public void windowedZeroTimeLimitShouldImmediatelyEmit() {
    final Harness<Windowed<String>, Long> harness = new Harness<>(untilTimeLimit(ZERO, unbounded()), timeWindowedSerdeFrom(String.class, 100L), Long());
    final MockInternalNewProcessorContext<Windowed<String>, Change<Long>> context = harness.context;
    final long timestamp = ARBITRARY_LONG;
    context.setRecordMetadata("", 0, 0L);
    context.setTimestamp(timestamp);
    final Windowed<String> key = new Windowed<>("hey", new TimeWindow(0L, 100L));
    final Change<Long> value = ARBITRARY_CHANGE;
    harness.processor.process(new Record<>(key, value, timestamp));
    assertThat(context.forwarded(), hasSize(1));
    final MockProcessorContext.CapturedForward capturedForward = context.forwarded().get(0);
    assertThat(capturedForward.record(), is(new Record<>(key, value, timestamp)));
}
Also used : String(org.apache.kafka.common.serialization.Serdes.String) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) Change(org.apache.kafka.streams.kstream.internals.Change) TimeWindow(org.apache.kafka.streams.kstream.internals.TimeWindow) MockProcessorContext(org.apache.kafka.streams.processor.api.MockProcessorContext) Windowed(org.apache.kafka.streams.kstream.Windowed) Long(org.apache.kafka.common.serialization.Serdes.Long) Record(org.apache.kafka.streams.processor.api.Record) Test(org.junit.Test)

Aggregations

Record (org.apache.kafka.streams.processor.api.Record)24 Test (org.junit.Test)18 MockProcessorContext (org.apache.kafka.streams.processor.api.MockProcessorContext)16 Change (org.apache.kafka.streams.kstream.internals.Change)12 CoreMatchers.containsString (org.hamcrest.CoreMatchers.containsString)12 Long (org.apache.kafka.common.serialization.Serdes.Long)11 String (org.apache.kafka.common.serialization.Serdes.String)11 Windowed (org.apache.kafka.streams.kstream.Windowed)7 Processor (org.apache.kafka.streams.processor.api.Processor)6 TimeWindow (org.apache.kafka.streams.kstream.internals.TimeWindow)5 Metrics (org.apache.kafka.common.metrics.Metrics)4 MockTime (org.apache.kafka.common.utils.MockTime)4 ProcessorContext (org.apache.kafka.streams.processor.api.ProcessorContext)4 List (java.util.List)3 Properties (java.util.Properties)3 Serdes (org.apache.kafka.common.serialization.Serdes)3 StreamsConfig (org.apache.kafka.streams.StreamsConfig)3 Test (org.junit.jupiter.api.Test)3 File (java.io.File)2 Collectors (java.util.stream.Collectors)2