use of org.apache.kafka.streams.processor.api.Record in project kafka by apache.
the class KStreamSessionWindowAggregateProcessorTest method shouldLogAndMeterWhenSkippingNullKeyWithBuiltInMetrics.
@Test
public void shouldLogAndMeterWhenSkippingNullKeyWithBuiltInMetrics() {
setup(false);
context.setRecordContext(new ProcessorRecordContext(-1, -2, -3, "topic", new RecordHeaders()));
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KStreamSessionWindowAggregate.class)) {
processor.process(new Record<>(null, "1", 0L));
assertThat(appender.getEvents().stream().filter(e -> e.getLevel().equals("WARN")).map(Event::getMessage).collect(Collectors.toList()), hasItem("Skipping record due to null key. topic=[topic] partition=[-3] offset=[-2]"));
}
assertEquals(1.0, getMetricByName(context.metrics().metrics(), "dropped-records-total", "stream-task-metrics").metricValue());
}
use of org.apache.kafka.streams.processor.api.Record in project kafka by apache.
the class StreamTaskTest method shouldRecordE2ELatencyOnSourceNodeAndTerminalNodes.
@Test
public void shouldRecordE2ELatencyOnSourceNodeAndTerminalNodes() {
time = new MockTime(0L, 0L, 0L);
metrics = new Metrics(new MetricConfig().recordLevel(Sensor.RecordingLevel.INFO), time);
// Create a processor that only forwards even keys to test the metrics at the source and terminal nodes
final MockSourceNode<Integer, Integer> evenKeyForwardingSourceNode = new MockSourceNode<Integer, Integer>(intDeserializer, intDeserializer) {
InternalProcessorContext<Integer, Integer> context;
@Override
public void init(final InternalProcessorContext<Integer, Integer> context) {
this.context = context;
super.init(context);
}
@Override
public void process(final Record<Integer, Integer> record) {
if (record.key() % 2 == 0) {
context.forward(record);
}
}
};
task = createStatelessTaskWithForwardingTopology(evenKeyForwardingSourceNode);
task.initializeIfNeeded();
task.completeRestoration(noOpResetter -> {
});
final String sourceNodeName = evenKeyForwardingSourceNode.name();
final String terminalNodeName = processorStreamTime.name();
final Metric sourceAvg = getProcessorMetric("record-e2e-latency", "%s-avg", task.id().toString(), sourceNodeName, StreamsConfig.METRICS_LATEST);
final Metric sourceMin = getProcessorMetric("record-e2e-latency", "%s-min", task.id().toString(), sourceNodeName, StreamsConfig.METRICS_LATEST);
final Metric sourceMax = getProcessorMetric("record-e2e-latency", "%s-max", task.id().toString(), sourceNodeName, StreamsConfig.METRICS_LATEST);
final Metric terminalAvg = getProcessorMetric("record-e2e-latency", "%s-avg", task.id().toString(), terminalNodeName, StreamsConfig.METRICS_LATEST);
final Metric terminalMin = getProcessorMetric("record-e2e-latency", "%s-min", task.id().toString(), terminalNodeName, StreamsConfig.METRICS_LATEST);
final Metric terminalMax = getProcessorMetric("record-e2e-latency", "%s-max", task.id().toString(), terminalNodeName, StreamsConfig.METRICS_LATEST);
// e2e latency = 10
task.addRecords(partition1, singletonList(getConsumerRecordWithOffsetAsTimestamp(0, 0L)));
task.process(10L);
assertThat(sourceAvg.metricValue(), equalTo(10.0));
assertThat(sourceMin.metricValue(), equalTo(10.0));
assertThat(sourceMax.metricValue(), equalTo(10.0));
// key 0: reaches terminal node
assertThat(terminalAvg.metricValue(), equalTo(10.0));
assertThat(terminalMin.metricValue(), equalTo(10.0));
assertThat(terminalMax.metricValue(), equalTo(10.0));
// e2e latency = 15
task.addRecords(partition1, singletonList(getConsumerRecordWithOffsetAsTimestamp(1, 0L)));
task.process(15L);
assertThat(sourceAvg.metricValue(), equalTo(12.5));
assertThat(sourceMin.metricValue(), equalTo(10.0));
assertThat(sourceMax.metricValue(), equalTo(15.0));
// key 1: stops at source, doesn't affect terminal node metrics
assertThat(terminalAvg.metricValue(), equalTo(10.0));
assertThat(terminalMin.metricValue(), equalTo(10.0));
assertThat(terminalMax.metricValue(), equalTo(10.0));
// e2e latency = 23
task.addRecords(partition1, singletonList(getConsumerRecordWithOffsetAsTimestamp(2, 0L)));
task.process(23L);
assertThat(sourceAvg.metricValue(), equalTo(16.0));
assertThat(sourceMin.metricValue(), equalTo(10.0));
assertThat(sourceMax.metricValue(), equalTo(23.0));
// key 2: reaches terminal node
assertThat(terminalAvg.metricValue(), equalTo(16.5));
assertThat(terminalMin.metricValue(), equalTo(10.0));
assertThat(terminalMax.metricValue(), equalTo(23.0));
// e2e latency = 5
task.addRecords(partition1, singletonList(getConsumerRecordWithOffsetAsTimestamp(3, 0L)));
task.process(5L);
assertThat(sourceAvg.metricValue(), equalTo(13.25));
assertThat(sourceMin.metricValue(), equalTo(5.0));
assertThat(sourceMax.metricValue(), equalTo(23.0));
// key 3: stops at source, doesn't affect terminal node metrics
assertThat(terminalAvg.metricValue(), equalTo(16.5));
assertThat(terminalMin.metricValue(), equalTo(10.0));
assertThat(terminalMax.metricValue(), equalTo(23.0));
}
use of org.apache.kafka.streams.processor.api.Record in project kafka by apache.
the class GlobalStreamThreadTest method before.
@Before
public void before() {
final MaterializedInternal<Object, Object, KeyValueStore<Bytes, byte[]>> materialized = new MaterializedInternal<>(Materialized.with(null, null), new InternalNameProvider() {
@Override
public String newProcessorName(final String prefix) {
return "processorName";
}
@Override
public String newStoreName(final String prefix) {
return GLOBAL_STORE_NAME;
}
}, "store-");
final ProcessorSupplier<Object, Object, Void, Void> processorSupplier = () -> new ContextualProcessor<Object, Object, Void, Void>() {
@Override
public void process(final Record<Object, Object> record) {
}
};
builder.addGlobalStore(new TimestampedKeyValueStoreMaterializer<>(materialized).materialize().withLoggingDisabled(), "sourceName", null, null, null, GLOBAL_STORE_TOPIC_NAME, "processorName", processorSupplier);
baseDirectoryName = TestUtils.tempDirectory().getAbsolutePath();
final HashMap<String, Object> properties = new HashMap<>();
properties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "blah");
properties.put(StreamsConfig.APPLICATION_ID_CONFIG, "testAppId");
properties.put(StreamsConfig.STATE_DIR_CONFIG, baseDirectoryName);
properties.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.ByteArraySerde.class.getName());
properties.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.ByteArraySerde.class.getName());
config = new StreamsConfig(properties);
globalStreamThread = new GlobalStreamThread(builder.rewriteTopology(config).buildGlobalStateTopology(), config, mockConsumer, new StateDirectory(config, time, true, false), 0, new StreamsMetricsImpl(new Metrics(), "test-client", StreamsConfig.METRICS_LATEST, time), time, "clientId", stateRestoreListener, e -> {
});
}
use of org.apache.kafka.streams.processor.api.Record in project kafka by apache.
the class KTableSuppressProcessorTest method finalResultsSuppressionShouldBufferAndEmitAtGraceExpiration.
@Test
public void finalResultsSuppressionShouldBufferAndEmitAtGraceExpiration() {
final Harness<Windowed<String>, Long> harness = new Harness<>(finalResults(ofMillis(1L)), timeWindowedSerdeFrom(String.class, 1L), Long());
final MockInternalNewProcessorContext<Windowed<String>, Change<Long>> context = harness.context;
final long windowStart = 99L;
final long recordTime = 99L;
final long windowEnd = 100L;
context.setRecordMetadata("topic", 0, 0);
context.setTimestamp(recordTime);
final Windowed<String> key = new Windowed<>("hey", new TimeWindow(windowStart, windowEnd));
final Change<Long> value = ARBITRARY_CHANGE;
harness.processor.process(new Record<>(key, value, recordTime));
assertThat(context.forwarded(), hasSize(0));
// although the stream time is now 100, we have to wait 1 ms after the window *end* before we
// emit "hey", so we don't emit yet.
final long windowStart2 = 100L;
final long recordTime2 = 100L;
final long windowEnd2 = 101L;
context.setRecordMetadata("topic", 0, 1);
context.setTimestamp(recordTime2);
harness.processor.process(new Record<>(new Windowed<>("dummyKey1", new TimeWindow(windowStart2, windowEnd2)), ARBITRARY_CHANGE, recordTime2));
assertThat(context.forwarded(), hasSize(0));
// ok, now it's time to emit "hey"
final long windowStart3 = 101L;
final long recordTime3 = 101L;
final long windowEnd3 = 102L;
context.setRecordMetadata("topic", 0, 1);
context.setTimestamp(recordTime3);
harness.processor.process(new Record<>(new Windowed<>("dummyKey2", new TimeWindow(windowStart3, windowEnd3)), ARBITRARY_CHANGE, recordTime3));
assertThat(context.forwarded(), hasSize(1));
final MockProcessorContext.CapturedForward capturedForward = context.forwarded().get(0);
assertThat(capturedForward.record(), is(new Record<>(key, value, recordTime)));
}
use of org.apache.kafka.streams.processor.api.Record in project kafka by apache.
the class KTableSuppressProcessorTest method windowedZeroTimeLimitShouldImmediatelyEmit.
@Test
public void windowedZeroTimeLimitShouldImmediatelyEmit() {
final Harness<Windowed<String>, Long> harness = new Harness<>(untilTimeLimit(ZERO, unbounded()), timeWindowedSerdeFrom(String.class, 100L), Long());
final MockInternalNewProcessorContext<Windowed<String>, Change<Long>> context = harness.context;
final long timestamp = ARBITRARY_LONG;
context.setRecordMetadata("", 0, 0L);
context.setTimestamp(timestamp);
final Windowed<String> key = new Windowed<>("hey", new TimeWindow(0L, 100L));
final Change<Long> value = ARBITRARY_CHANGE;
harness.processor.process(new Record<>(key, value, timestamp));
assertThat(context.forwarded(), hasSize(1));
final MockProcessorContext.CapturedForward capturedForward = context.forwarded().get(0);
assertThat(capturedForward.record(), is(new Record<>(key, value, timestamp)));
}
Aggregations