use of org.apache.kafka.streams.processor.internals.InternalProcessorContext in project kafka by apache.
the class MeteredSessionStore method init.
@Override
public void init(final StateStoreContext context, final StateStore root) {
this.context = context instanceof InternalProcessorContext ? (InternalProcessorContext<?, ?>) context : null;
taskId = context.taskId();
initStoreSerde(context);
streamsMetrics = (StreamsMetricsImpl) context.metrics();
registerMetrics();
final Sensor restoreSensor = StateStoreMetrics.restoreSensor(taskId.toString(), metricsScope, name(), streamsMetrics);
// register and possibly restore the state from the logs
maybeMeasureLatency(() -> super.init(context, root), time, restoreSensor);
}
use of org.apache.kafka.streams.processor.internals.InternalProcessorContext in project kafka by apache.
the class TopologyTestDriver method setupTask.
@SuppressWarnings("deprecation")
private void setupTask(final StreamsConfig streamsConfig, final StreamsMetricsImpl streamsMetrics, final ThreadCache cache, final TaskConfig taskConfig) {
if (!partitionsByInputTopic.isEmpty()) {
consumer.assign(partitionsByInputTopic.values());
final Map<TopicPartition, Long> startOffsets = new HashMap<>();
for (final TopicPartition topicPartition : partitionsByInputTopic.values()) {
startOffsets.put(topicPartition, 0L);
}
consumer.updateBeginningOffsets(startOffsets);
final ProcessorStateManager stateManager = new ProcessorStateManager(TASK_ID, Task.TaskType.ACTIVE, StreamsConfig.EXACTLY_ONCE.equals(streamsConfig.getString(StreamsConfig.PROCESSING_GUARANTEE_CONFIG)), logContext, stateDirectory, new MockChangelogRegister(), processorTopology.storeToChangelogTopic(), new HashSet<>(partitionsByInputTopic.values()));
final RecordCollector recordCollector = new RecordCollectorImpl(logContext, TASK_ID, testDriverProducer, streamsConfig.defaultProductionExceptionHandler(), streamsMetrics);
final InternalProcessorContext context = new ProcessorContextImpl(TASK_ID, streamsConfig, stateManager, streamsMetrics, cache);
task = new StreamTask(TASK_ID, new HashSet<>(partitionsByInputTopic.values()), processorTopology, consumer, taskConfig, streamsMetrics, stateDirectory, cache, mockWallClockTime, stateManager, recordCollector, context, logContext);
task.initializeIfNeeded();
task.completeRestoration(noOpResetter -> {
});
task.processorContext().setRecordContext(null);
} else {
task = null;
}
}
use of org.apache.kafka.streams.processor.internals.InternalProcessorContext in project apache-kafka-on-k8s by banzaicloud.
the class TopologyTestDriver method pipeInput.
/**
* Send an input message with the given key, value, and timestamp on the specified topic to the topology and then
* commit the messages.
*
* @param consumerRecord the record to be processed
*/
public void pipeInput(final ConsumerRecord<byte[], byte[]> consumerRecord) {
final String topicName = consumerRecord.topic();
final TopicPartition topicPartition = partitionsByTopic.get(topicName);
if (topicPartition != null) {
final long offset = offsetsByTopicPartition.get(topicPartition).incrementAndGet() - 1;
task.addRecords(topicPartition, Collections.singleton(new ConsumerRecord<>(topicName, topicPartition.partition(), offset, consumerRecord.timestamp(), consumerRecord.timestampType(), ConsumerRecord.NULL_CHECKSUM, consumerRecord.serializedKeySize(), consumerRecord.serializedValueSize(), consumerRecord.key(), consumerRecord.value())));
// Process the record ...
((InternalProcessorContext) task.context()).setRecordContext(new ProcessorRecordContext(consumerRecord.timestamp(), offset, topicPartition.partition(), topicName));
task.process();
task.maybePunctuateStreamTime();
task.commit();
captureOutputRecords();
} else {
final TopicPartition globalTopicPartition = globalPartitionsByTopic.get(topicName);
if (globalTopicPartition == null) {
throw new IllegalArgumentException("Unknown topic: " + topicName);
}
final long offset = offsetsByTopicPartition.get(globalTopicPartition).incrementAndGet() - 1;
globalStateTask.update(new ConsumerRecord<>(globalTopicPartition.topic(), globalTopicPartition.partition(), offset, consumerRecord.timestamp(), consumerRecord.timestampType(), ConsumerRecord.NULL_CHECKSUM, consumerRecord.serializedKeySize(), consumerRecord.serializedValueSize(), consumerRecord.key(), consumerRecord.value()));
globalStateTask.flushState();
}
}
use of org.apache.kafka.streams.processor.internals.InternalProcessorContext in project kafka by apache.
the class SubscriptionStoreReceiveProcessorSupplier method get.
@Override
public Processor<KO, SubscriptionWrapper<K>, CombinedKey<KO, K>, Change<ValueAndTimestamp<SubscriptionWrapper<K>>>> get() {
return new ContextualProcessor<KO, SubscriptionWrapper<K>, CombinedKey<KO, K>, Change<ValueAndTimestamp<SubscriptionWrapper<K>>>>() {
private TimestampedKeyValueStore<Bytes, SubscriptionWrapper<K>> store;
private Sensor droppedRecordsSensor;
@Override
public void init(final ProcessorContext<CombinedKey<KO, K>, Change<ValueAndTimestamp<SubscriptionWrapper<K>>>> context) {
super.init(context);
final InternalProcessorContext<?, ?> internalProcessorContext = (InternalProcessorContext<?, ?>) context;
droppedRecordsSensor = TaskMetrics.droppedRecordsSensor(Thread.currentThread().getName(), internalProcessorContext.taskId().toString(), internalProcessorContext.metrics());
store = internalProcessorContext.getStateStore(storeBuilder);
keySchema.init(context);
}
@Override
public void process(final Record<KO, SubscriptionWrapper<K>> record) {
if (record.key() == null) {
if (context().recordMetadata().isPresent()) {
final RecordMetadata recordMetadata = context().recordMetadata().get();
LOG.warn("Skipping record due to null foreign key. " + "topic=[{}] partition=[{}] offset=[{}]", recordMetadata.topic(), recordMetadata.partition(), recordMetadata.offset());
} else {
LOG.warn("Skipping record due to null foreign key. Topic, partition, and offset not known.");
}
droppedRecordsSensor.record();
return;
}
if (record.value().getVersion() != SubscriptionWrapper.CURRENT_VERSION) {
// from older SubscriptionWrapper versions to newer versions.
throw new UnsupportedVersionException("SubscriptionWrapper is of an incompatible version.");
}
final Bytes subscriptionKey = keySchema.toBytes(record.key(), record.value().getPrimaryKey());
final ValueAndTimestamp<SubscriptionWrapper<K>> newValue = ValueAndTimestamp.make(record.value(), record.timestamp());
final ValueAndTimestamp<SubscriptionWrapper<K>> oldValue = store.get(subscriptionKey);
// This store is used by the prefix scanner in ForeignJoinSubscriptionProcessorSupplier
if (record.value().getInstruction().equals(SubscriptionWrapper.Instruction.DELETE_KEY_AND_PROPAGATE) || record.value().getInstruction().equals(SubscriptionWrapper.Instruction.DELETE_KEY_NO_PROPAGATE)) {
store.delete(subscriptionKey);
} else {
store.put(subscriptionKey, newValue);
}
final Change<ValueAndTimestamp<SubscriptionWrapper<K>>> change = new Change<>(newValue, oldValue);
// note: key is non-nullable
// note: newValue is non-nullable
context().forward(record.withKey(new CombinedKey<>(record.key(), record.value().getPrimaryKey())).withValue(change).withTimestamp(newValue.timestamp()));
}
};
}
use of org.apache.kafka.streams.processor.internals.InternalProcessorContext in project kafka by apache.
the class MeteredSessionStore method init.
@Deprecated
@Override
public void init(final ProcessorContext context, final StateStore root) {
this.context = context instanceof InternalProcessorContext ? (InternalProcessorContext<?, ?>) context : null;
taskId = context.taskId();
initStoreSerde(context);
streamsMetrics = (StreamsMetricsImpl) context.metrics();
registerMetrics();
final Sensor restoreSensor = StateStoreMetrics.restoreSensor(taskId.toString(), metricsScope, name(), streamsMetrics);
// register and possibly restore the state from the logs
maybeMeasureLatency(() -> super.init(context, root), time, restoreSensor);
}
Aggregations