use of org.apache.kafka.streams.processor.internals.InternalProcessorContext in project kafka by apache.
the class ProcessorTopologyTestDriver method process.
/**
* Send an input message with the given key, value and timestamp on the specified topic to the topology, and then commit the messages.
*
* @param topicName the name of the topic on which the message is to be sent
* @param key the raw message key
* @param value the raw message value
* @param timestamp the raw message timestamp
*/
private void process(String topicName, byte[] key, byte[] value, long timestamp) {
TopicPartition tp = partitionsByTopic.get(topicName);
if (tp != null) {
// Add the record ...
long offset = offsetsByTopicPartition.get(tp).incrementAndGet();
task.addRecords(tp, records(new ConsumerRecord<>(tp.topic(), tp.partition(), offset, timestamp, TimestampType.CREATE_TIME, 0L, 0, 0, key, value)));
producer.clear();
// Process the record ...
task.process();
((InternalProcessorContext) task.context()).setRecordContext(new ProcessorRecordContext(timestamp, offset, tp.partition(), topicName));
task.commit();
// Capture all the records sent to the producer ...
for (ProducerRecord<byte[], byte[]> record : producer.history()) {
Queue<ProducerRecord<byte[], byte[]>> outputRecords = outputRecordsByTopic.get(record.topic());
if (outputRecords == null) {
outputRecords = new LinkedList<>();
outputRecordsByTopic.put(record.topic(), outputRecords);
}
outputRecords.add(record);
// Forward back into the topology if the produced record is to an internal or a source topic ...
if (internalTopics.contains(record.topic()) || topology.sourceTopics().contains(record.topic())) {
process(record.topic(), record.key(), record.value(), record.timestamp());
}
}
} else {
final TopicPartition global = globalPartitionsByTopic.get(topicName);
if (global == null) {
throw new IllegalArgumentException("Unexpected topic: " + topicName);
}
final long offset = offsetsByTopicPartition.get(global).incrementAndGet();
globalStateTask.update(new ConsumerRecord<>(global.topic(), global.partition(), offset, timestamp, TimestampType.CREATE_TIME, 0L, 0, 0, key, value));
globalStateTask.flushState();
}
}
use of org.apache.kafka.streams.processor.internals.InternalProcessorContext in project apache-kafka-on-k8s by banzaicloud.
the class ProcessorTopologyTestDriver method process.
/**
* Send an input message with the given key, value and timestamp on the specified topic to the topology, and then commit the messages.
*
* @param topicName the name of the topic on which the message is to be sent
* @param key the raw message key
* @param value the raw message value
* @param timestamp the raw message timestamp
*/
public void process(final String topicName, final byte[] key, final byte[] value, final long timestamp) {
final TopicPartition tp = partitionsByTopic.get(topicName);
if (tp != null) {
// Add the record ...
final long offset = offsetsByTopicPartition.get(tp).incrementAndGet();
task.addRecords(tp, records(new ConsumerRecord<>(tp.topic(), tp.partition(), offset, timestamp, TimestampType.CREATE_TIME, 0L, 0, 0, key, value)));
producer.clear();
// Process the record ...
task.process();
((InternalProcessorContext) task.context()).setRecordContext(new ProcessorRecordContext(timestamp, offset, tp.partition(), topicName));
task.commit();
// Capture all the records sent to the producer ...
for (final ProducerRecord<byte[], byte[]> record : producer.history()) {
Queue<ProducerRecord<byte[], byte[]>> outputRecords = outputRecordsByTopic.get(record.topic());
if (outputRecords == null) {
outputRecords = new LinkedList<>();
outputRecordsByTopic.put(record.topic(), outputRecords);
}
outputRecords.add(record);
// Forward back into the topology if the produced record is to an internal or a source topic ...
if (internalTopics.contains(record.topic()) || topology.sourceTopics().contains(record.topic())) {
process(record.topic(), record.key(), record.value(), record.timestamp());
}
}
} else {
final TopicPartition global = globalPartitionsByTopic.get(topicName);
if (global == null) {
throw new IllegalArgumentException("Unexpected topic: " + topicName);
}
final long offset = offsetsByTopicPartition.get(global).incrementAndGet();
globalStateTask.update(new ConsumerRecord<>(global.topic(), global.partition(), offset, timestamp, TimestampType.CREATE_TIME, 0L, 0, 0, key, value));
globalStateTask.flushState();
}
}
use of org.apache.kafka.streams.processor.internals.InternalProcessorContext in project kafka by apache.
the class InMemorySessionStore method init.
@Deprecated
@Override
public void init(final ProcessorContext context, final StateStore root) {
final String threadId = Thread.currentThread().getName();
final String taskName = context.taskId().toString();
// If it doesn't, we can't record this metric.
if (context instanceof InternalProcessorContext) {
this.context = (InternalProcessorContext) context;
final StreamsMetricsImpl metrics = this.context.metrics();
expiredRecordSensor = TaskMetrics.droppedRecordsSensor(threadId, taskName, metrics);
} else {
this.context = null;
expiredRecordSensor = null;
}
if (root != null) {
final boolean consistencyEnabled = StreamsConfig.InternalConfig.getBoolean(context.appConfigs(), IQ_CONSISTENCY_OFFSET_VECTOR_ENABLED, false);
context.register(root, (RecordBatchingStateRestoreCallback) records -> {
for (final ConsumerRecord<byte[], byte[]> record : records) {
put(SessionKeySchema.from(Bytes.wrap(record.key())), record.value());
ChangelogRecordDeserializationHelper.applyChecksAndUpdatePosition(record, consistencyEnabled, position);
}
});
}
open = true;
}
use of org.apache.kafka.streams.processor.internals.InternalProcessorContext in project kafka by apache.
the class MeteredWindowStore method init.
@Deprecated
@Override
public void init(final ProcessorContext context, final StateStore root) {
this.context = context instanceof InternalProcessorContext ? (InternalProcessorContext<?, ?>) context : null;
taskId = context.taskId();
initStoreSerde(context);
streamsMetrics = (StreamsMetricsImpl) context.metrics();
registerMetrics();
final Sensor restoreSensor = StateStoreMetrics.restoreSensor(taskId.toString(), metricsScope, name(), streamsMetrics);
// register and possibly restore the state from the logs
maybeMeasureLatency(() -> super.init(context, root), time, restoreSensor);
}
use of org.apache.kafka.streams.processor.internals.InternalProcessorContext in project kafka by apache.
the class MeteredKeyValueStore method init.
@Override
public void init(final StateStoreContext context, final StateStore root) {
this.context = context instanceof InternalProcessorContext ? (InternalProcessorContext<?, ?>) context : null;
taskId = context.taskId();
initStoreSerde(context);
streamsMetrics = (StreamsMetricsImpl) context.metrics();
registerMetrics();
final Sensor restoreSensor = StateStoreMetrics.restoreSensor(taskId.toString(), metricsScope, name(), streamsMetrics);
// register and possibly restore the state from the logs
maybeMeasureLatency(() -> super.init(context, root), time, restoreSensor);
}
Aggregations