use of org.apache.kafka.streams.processor.api.RecordMetadata in project kafka by apache.
the class ChangeLoggingSessionBytesStoreTest method shouldLogPutsWithPosition.
@Test
public void shouldLogPutsWithPosition() {
EasyMock.expect(inner.getPosition()).andReturn(POSITION).anyTimes();
inner.put(key1, value1);
EasyMock.expectLastCall();
init();
final Bytes binaryKey = SessionKeySchema.toBinary(key1);
EasyMock.reset(context);
final RecordMetadata recordContext = new ProcessorRecordContext(0L, 1L, 0, "", new RecordHeaders());
EasyMock.expect(context.recordMetadata()).andStubReturn(Optional.of(recordContext));
EasyMock.expect(context.timestamp()).andStubReturn(0L);
context.logChange(store.name(), binaryKey, value1, 0L, POSITION);
EasyMock.replay(context);
store.put(key1, value1);
EasyMock.verify(inner, context);
}
use of org.apache.kafka.streams.processor.api.RecordMetadata in project kafka by apache.
the class ChangeLoggingWindowBytesStoreTest method shouldLogPutsWithPosition.
@Test
public void shouldLogPutsWithPosition() {
EasyMock.expect(inner.getPosition()).andReturn(POSITION).anyTimes();
inner.put(bytesKey, value, 0);
EasyMock.expectLastCall();
init();
final Bytes key = WindowKeySchema.toStoreKeyBinary(bytesKey, 0, 0);
EasyMock.reset(context);
final RecordMetadata recordContext = new ProcessorRecordContext(0L, 1L, 0, "", new RecordHeaders());
EasyMock.expect(context.recordMetadata()).andStubReturn(Optional.of(recordContext));
EasyMock.expect(context.timestamp()).andStubReturn(0L);
final Position position = Position.fromMap(mkMap(mkEntry("", mkMap(mkEntry(0, 1L)))));
context.logChange(store.name(), key, value, 0L, position);
EasyMock.replay(context);
store.put(bytesKey, value, context.timestamp());
EasyMock.verify(inner, context);
}
use of org.apache.kafka.streams.processor.api.RecordMetadata in project kafka by apache.
the class ChangeLoggingTimestampedWindowBytesStoreTest method shouldLogPutsWithPosition.
@Test
public void shouldLogPutsWithPosition() {
EasyMock.expect(inner.getPosition()).andReturn(POSITION).anyTimes();
inner.put(bytesKey, valueAndTimestamp, 0);
EasyMock.expectLastCall();
init();
final Bytes key = WindowKeySchema.toStoreKeyBinary(bytesKey, 0, 0);
EasyMock.reset(context);
final RecordMetadata recordContext = new ProcessorRecordContext(0L, 1L, 0, "", new RecordHeaders());
EasyMock.expect(context.recordMetadata()).andStubReturn(Optional.of(recordContext));
final Position position = Position.fromMap(mkMap(mkEntry("", mkMap(mkEntry(0, 1L)))));
context.logChange(store.name(), key, value, 42, position);
EasyMock.replay(context);
store.put(bytesKey, valueAndTimestamp, context.timestamp());
EasyMock.verify(inner, context);
}
use of org.apache.kafka.streams.processor.api.RecordMetadata in project kafka by apache.
the class SubscriptionStoreReceiveProcessorSupplier method get.
@Override
public Processor<KO, SubscriptionWrapper<K>, CombinedKey<KO, K>, Change<ValueAndTimestamp<SubscriptionWrapper<K>>>> get() {
return new ContextualProcessor<KO, SubscriptionWrapper<K>, CombinedKey<KO, K>, Change<ValueAndTimestamp<SubscriptionWrapper<K>>>>() {
private TimestampedKeyValueStore<Bytes, SubscriptionWrapper<K>> store;
private Sensor droppedRecordsSensor;
@Override
public void init(final ProcessorContext<CombinedKey<KO, K>, Change<ValueAndTimestamp<SubscriptionWrapper<K>>>> context) {
super.init(context);
final InternalProcessorContext<?, ?> internalProcessorContext = (InternalProcessorContext<?, ?>) context;
droppedRecordsSensor = TaskMetrics.droppedRecordsSensor(Thread.currentThread().getName(), internalProcessorContext.taskId().toString(), internalProcessorContext.metrics());
store = internalProcessorContext.getStateStore(storeBuilder);
keySchema.init(context);
}
@Override
public void process(final Record<KO, SubscriptionWrapper<K>> record) {
if (record.key() == null) {
if (context().recordMetadata().isPresent()) {
final RecordMetadata recordMetadata = context().recordMetadata().get();
LOG.warn("Skipping record due to null foreign key. " + "topic=[{}] partition=[{}] offset=[{}]", recordMetadata.topic(), recordMetadata.partition(), recordMetadata.offset());
} else {
LOG.warn("Skipping record due to null foreign key. Topic, partition, and offset not known.");
}
droppedRecordsSensor.record();
return;
}
if (record.value().getVersion() != SubscriptionWrapper.CURRENT_VERSION) {
// from older SubscriptionWrapper versions to newer versions.
throw new UnsupportedVersionException("SubscriptionWrapper is of an incompatible version.");
}
final Bytes subscriptionKey = keySchema.toBytes(record.key(), record.value().getPrimaryKey());
final ValueAndTimestamp<SubscriptionWrapper<K>> newValue = ValueAndTimestamp.make(record.value(), record.timestamp());
final ValueAndTimestamp<SubscriptionWrapper<K>> oldValue = store.get(subscriptionKey);
// This store is used by the prefix scanner in ForeignJoinSubscriptionProcessorSupplier
if (record.value().getInstruction().equals(SubscriptionWrapper.Instruction.DELETE_KEY_AND_PROPAGATE) || record.value().getInstruction().equals(SubscriptionWrapper.Instruction.DELETE_KEY_NO_PROPAGATE)) {
store.delete(subscriptionKey);
} else {
store.put(subscriptionKey, newValue);
}
final Change<ValueAndTimestamp<SubscriptionWrapper<K>>> change = new Change<>(newValue, oldValue);
// note: key is non-nullable
// note: newValue is non-nullable
context().forward(record.withKey(new CombinedKey<>(record.key(), record.value().getPrimaryKey())).withValue(change).withTimestamp(newValue.timestamp()));
}
};
}
use of org.apache.kafka.streams.processor.api.RecordMetadata in project kafka by apache.
the class KStreamKTableJoinProcessor method process.
@Override
public void process(final Record<K1, V1> record) {
// we do join iff the join keys are equal, thus, if {@code keyMapper} returns {@code null} we
// cannot join and just ignore the record. Note for KTables, this is the same as having a null key
// since keyMapper just returns the key, but for GlobalKTables we can have other keyMappers
//
// we also ignore the record if value is null, because in a key-value data model a null-value indicates
// an empty message (ie, there is nothing to be joined) -- this contrast SQL NULL semantics
// furthermore, on left/outer joins 'null' in ValueJoiner#apply() indicates a missing record --
// thus, to be consistent and to avoid ambiguous null semantics, null values are ignored
final K2 mappedKey = keyMapper.apply(record.key(), record.value());
if (mappedKey == null || record.value() == null) {
if (context().recordMetadata().isPresent()) {
final RecordMetadata recordMetadata = context().recordMetadata().get();
LOG.warn("Skipping record due to null join key or value. " + "topic=[{}] partition=[{}] offset=[{}]", recordMetadata.topic(), recordMetadata.partition(), recordMetadata.offset());
} else {
LOG.warn("Skipping record due to null join key or value. Topic, partition, and offset not known.");
}
droppedRecordsSensor.record();
} else {
final V2 value2 = getValueOrNull(valueGetter.get(mappedKey));
if (leftJoin || value2 != null) {
context().forward(record.withValue(joiner.apply(record.key(), record.value(), value2)));
}
}
}
Aggregations