use of org.apache.kafka.streams.kstream.internals.Change in project kafka by apache.
the class TimeOrderedKeyValueBufferTest method shouldRespectEvictionPredicate.
@Test
public void shouldRespectEvictionPredicate() {
final TimeOrderedKeyValueBuffer<String, String> buffer = bufferSupplier.apply(testName);
final MockInternalProcessorContext context = makeContext();
buffer.init((StateStoreContext) context, buffer);
putRecord(buffer, context, 0L, 0L, "asdf", "eyt");
putRecord(buffer, context, 1L, 0L, "zxcv", "rtg");
assertThat(buffer.numRecords(), is(2));
final List<Eviction<String, String>> evicted = new LinkedList<>();
buffer.evictWhile(() -> buffer.numRecords() > 1, evicted::add);
assertThat(buffer.numRecords(), is(1));
assertThat(evicted, is(singletonList(new Eviction<>("asdf", new Change<>("eyt", null), getContext(0L)))));
cleanup(context, buffer);
}
use of org.apache.kafka.streams.kstream.internals.Change in project kafka by apache.
the class SubscriptionStoreReceiveProcessorSupplier method get.
@Override
public Processor<KO, SubscriptionWrapper<K>, CombinedKey<KO, K>, Change<ValueAndTimestamp<SubscriptionWrapper<K>>>> get() {
return new ContextualProcessor<KO, SubscriptionWrapper<K>, CombinedKey<KO, K>, Change<ValueAndTimestamp<SubscriptionWrapper<K>>>>() {
private TimestampedKeyValueStore<Bytes, SubscriptionWrapper<K>> store;
private Sensor droppedRecordsSensor;
@Override
public void init(final ProcessorContext<CombinedKey<KO, K>, Change<ValueAndTimestamp<SubscriptionWrapper<K>>>> context) {
super.init(context);
final InternalProcessorContext<?, ?> internalProcessorContext = (InternalProcessorContext<?, ?>) context;
droppedRecordsSensor = TaskMetrics.droppedRecordsSensor(Thread.currentThread().getName(), internalProcessorContext.taskId().toString(), internalProcessorContext.metrics());
store = internalProcessorContext.getStateStore(storeBuilder);
keySchema.init(context);
}
@Override
public void process(final Record<KO, SubscriptionWrapper<K>> record) {
if (record.key() == null) {
if (context().recordMetadata().isPresent()) {
final RecordMetadata recordMetadata = context().recordMetadata().get();
LOG.warn("Skipping record due to null foreign key. " + "topic=[{}] partition=[{}] offset=[{}]", recordMetadata.topic(), recordMetadata.partition(), recordMetadata.offset());
} else {
LOG.warn("Skipping record due to null foreign key. Topic, partition, and offset not known.");
}
droppedRecordsSensor.record();
return;
}
if (record.value().getVersion() != SubscriptionWrapper.CURRENT_VERSION) {
// from older SubscriptionWrapper versions to newer versions.
throw new UnsupportedVersionException("SubscriptionWrapper is of an incompatible version.");
}
final Bytes subscriptionKey = keySchema.toBytes(record.key(), record.value().getPrimaryKey());
final ValueAndTimestamp<SubscriptionWrapper<K>> newValue = ValueAndTimestamp.make(record.value(), record.timestamp());
final ValueAndTimestamp<SubscriptionWrapper<K>> oldValue = store.get(subscriptionKey);
// This store is used by the prefix scanner in ForeignJoinSubscriptionProcessorSupplier
if (record.value().getInstruction().equals(SubscriptionWrapper.Instruction.DELETE_KEY_AND_PROPAGATE) || record.value().getInstruction().equals(SubscriptionWrapper.Instruction.DELETE_KEY_NO_PROPAGATE)) {
store.delete(subscriptionKey);
} else {
store.put(subscriptionKey, newValue);
}
final Change<ValueAndTimestamp<SubscriptionWrapper<K>>> change = new Change<>(newValue, oldValue);
// note: key is non-nullable
// note: newValue is non-nullable
context().forward(record.withKey(new CombinedKey<>(record.key(), record.value().getPrimaryKey())).withValue(change).withTimestamp(newValue.timestamp()));
}
};
}
use of org.apache.kafka.streams.kstream.internals.Change in project kafka by apache.
the class KTableSuppressProcessorMetricsTest method shouldRecordMetricsWithBuiltInMetricsVersionLatest.
@Test
public void shouldRecordMetricsWithBuiltInMetricsVersionLatest() {
final String storeName = "test-store";
final StateStore buffer = new InMemoryTimeOrderedKeyValueBuffer.Builder<>(storeName, Serdes.String(), Serdes.Long()).withLoggingDisabled().build();
final KTableImpl<String, ?, Long> mock = EasyMock.mock(KTableImpl.class);
final Processor<String, Change<Long>, String, Change<Long>> processor = new KTableSuppressProcessorSupplier<>((SuppressedInternal<String>) Suppressed.<String>untilTimeLimit(Duration.ofDays(100), maxRecords(1)), storeName, mock).get();
streamsConfig.setProperty(StreamsConfig.BUILT_IN_METRICS_VERSION_CONFIG, StreamsConfig.METRICS_LATEST);
final MockInternalNewProcessorContext<String, Change<Long>> context = new MockInternalNewProcessorContext<>(streamsConfig, TASK_ID, TestUtils.tempDirectory());
final Time time = new SystemTime();
context.setCurrentNode(new ProcessorNode("testNode"));
context.setSystemTimeMs(time.milliseconds());
buffer.init((StateStoreContext) context, buffer);
processor.init(context);
final long timestamp = 100L;
context.setRecordMetadata("", 0, 0L);
context.setTimestamp(timestamp);
final String key = "longKey";
final Change<Long> value = new Change<>(null, ARBITRARY_LONG);
processor.process(new Record<>(key, value, timestamp));
final MetricName evictionRateMetric = evictionRateMetricLatest;
final MetricName evictionTotalMetric = evictionTotalMetricLatest;
final MetricName bufferSizeAvgMetric = bufferSizeAvgMetricLatest;
final MetricName bufferSizeMaxMetric = bufferSizeMaxMetricLatest;
final MetricName bufferCountAvgMetric = bufferCountAvgMetricLatest;
final MetricName bufferCountMaxMetric = bufferCountMaxMetricLatest;
{
final Map<MetricName, ? extends Metric> metrics = context.metrics().metrics();
verifyMetric(metrics, evictionRateMetric, is(0.0));
verifyMetric(metrics, evictionTotalMetric, is(0.0));
verifyMetric(metrics, bufferSizeAvgMetric, is(21.5));
verifyMetric(metrics, bufferSizeMaxMetric, is(43.0));
verifyMetric(metrics, bufferCountAvgMetric, is(0.5));
verifyMetric(metrics, bufferCountMaxMetric, is(1.0));
}
context.setRecordMetadata("", 0, 1L);
context.setTimestamp(timestamp + 1);
processor.process(new Record<>("key", value, timestamp + 1));
{
final Map<MetricName, ? extends Metric> metrics = context.metrics().metrics();
verifyMetric(metrics, evictionRateMetric, greaterThan(0.0));
verifyMetric(metrics, evictionTotalMetric, is(1.0));
verifyMetric(metrics, bufferSizeAvgMetric, is(41.0));
verifyMetric(metrics, bufferSizeMaxMetric, is(82.0));
verifyMetric(metrics, bufferCountAvgMetric, is(1.0));
verifyMetric(metrics, bufferCountMaxMetric, is(2.0));
}
}
use of org.apache.kafka.streams.kstream.internals.Change in project kafka by apache.
the class KTableSuppressProcessorTest method suppressShouldShutDownWhenOverByteCapacity.
@Test
public void suppressShouldShutDownWhenOverByteCapacity() {
final Harness<String, Long> harness = new Harness<>(untilTimeLimit(Duration.ofDays(100), maxBytes(60L).shutDownWhenFull()), String(), Long());
final MockInternalNewProcessorContext<String, Change<Long>> context = harness.context;
final long timestamp = 100L;
context.setRecordMetadata("", 0, 0L);
context.setTimestamp(timestamp);
context.setCurrentNode(new ProcessorNode("testNode"));
final String key = "hey";
final Change<Long> value = new Change<>(null, ARBITRARY_LONG);
harness.processor.process(new Record<>(key, value, timestamp));
context.setRecordMetadata("", 0, 1L);
context.setTimestamp(1L);
try {
harness.processor.process(new Record<>("dummyKey", value, timestamp));
fail("expected an exception");
} catch (final StreamsException e) {
assertThat(e.getMessage(), containsString("buffer exceeded its max capacity"));
}
}
use of org.apache.kafka.streams.kstream.internals.Change in project kafka by apache.
the class KTableSuppressProcessorTest method finalResultsShouldDropTombstonesForSessionWindows.
/**
* It's desirable to drop tombstones for final-results windowed streams, since (as described in the
* {@link SuppressedInternal} javadoc), they are unnecessary to emit.
*/
@Test
public void finalResultsShouldDropTombstonesForSessionWindows() {
final Harness<Windowed<String>, Long> harness = new Harness<>(finalResults(ofMillis(0L)), sessionWindowedSerdeFrom(String.class), Long());
final MockInternalNewProcessorContext<Windowed<String>, Change<Long>> context = harness.context;
final long timestamp = 100L;
context.setRecordMetadata("", 0, 0L);
context.setTimestamp(timestamp);
final Windowed<String> key = new Windowed<>("hey", new SessionWindow(0L, 0L));
final Change<Long> value = new Change<>(null, ARBITRARY_LONG);
harness.processor.process(new Record<>(key, value, timestamp));
assertThat(context.forwarded(), hasSize(0));
}
Aggregations