Search in sources :

Example 1 with ProcessorRecordContext

use of org.apache.kafka.streams.processor.internals.ProcessorRecordContext in project kafka by apache.

the class CachingKeyValueStoreTest method setUp.

@Before
public void setUp() throws Exception {
    final String storeName = "store";
    underlyingStore = new InMemoryKeyValueStore<>(storeName, Serdes.Bytes(), Serdes.ByteArray());
    cacheFlushListener = new CacheFlushListenerStub<>();
    store = new CachingKeyValueStore<>(underlyingStore, Serdes.String(), Serdes.String());
    store.setFlushListener(cacheFlushListener);
    cache = new ThreadCache("testCache", maxCacheSizeBytes, new MockStreamsMetrics(new Metrics()));
    final MockProcessorContext context = new MockProcessorContext(null, null, null, (RecordCollector) null, cache);
    topic = "topic";
    context.setRecordContext(new ProcessorRecordContext(10, 0, 0, topic));
    store.init(context, null);
}
Also used : MockStreamsMetrics(org.apache.kafka.streams.processor.internals.MockStreamsMetrics) Metrics(org.apache.kafka.common.metrics.Metrics) ProcessorRecordContext(org.apache.kafka.streams.processor.internals.ProcessorRecordContext) MockStreamsMetrics(org.apache.kafka.streams.processor.internals.MockStreamsMetrics) MockProcessorContext(org.apache.kafka.test.MockProcessorContext) Before(org.junit.Before)

Example 2 with ProcessorRecordContext

use of org.apache.kafka.streams.processor.internals.ProcessorRecordContext in project kafka by apache.

the class ProcessorTopologyTestDriver method process.

/**
     * Send an input message with the given key, value and timestamp on the specified topic to the topology, and then commit the messages.
     *
     * @param topicName the name of the topic on which the message is to be sent
     * @param key the raw message key
     * @param value the raw message value
     * @param timestamp the raw message timestamp
     */
private void process(String topicName, byte[] key, byte[] value, long timestamp) {
    TopicPartition tp = partitionsByTopic.get(topicName);
    if (tp != null) {
        // Add the record ...
        long offset = offsetsByTopicPartition.get(tp).incrementAndGet();
        task.addRecords(tp, records(new ConsumerRecord<>(tp.topic(), tp.partition(), offset, timestamp, TimestampType.CREATE_TIME, 0L, 0, 0, key, value)));
        producer.clear();
        // Process the record ...
        task.process();
        ((InternalProcessorContext) task.context()).setRecordContext(new ProcessorRecordContext(timestamp, offset, tp.partition(), topicName));
        task.commit();
        // Capture all the records sent to the producer ...
        for (ProducerRecord<byte[], byte[]> record : producer.history()) {
            Queue<ProducerRecord<byte[], byte[]>> outputRecords = outputRecordsByTopic.get(record.topic());
            if (outputRecords == null) {
                outputRecords = new LinkedList<>();
                outputRecordsByTopic.put(record.topic(), outputRecords);
            }
            outputRecords.add(record);
            // Forward back into the topology if the produced record is to an internal or a source topic ...
            if (internalTopics.contains(record.topic()) || topology.sourceTopics().contains(record.topic())) {
                process(record.topic(), record.key(), record.value(), record.timestamp());
            }
        }
    } else {
        final TopicPartition global = globalPartitionsByTopic.get(topicName);
        if (global == null) {
            throw new IllegalArgumentException("Unexpected topic: " + topicName);
        }
        final long offset = offsetsByTopicPartition.get(global).incrementAndGet();
        globalStateTask.update(new ConsumerRecord<>(global.topic(), global.partition(), offset, timestamp, TimestampType.CREATE_TIME, 0L, 0, 0, key, value));
        globalStateTask.flushState();
    }
}
Also used : ProcessorRecordContext(org.apache.kafka.streams.processor.internals.ProcessorRecordContext) TopicPartition(org.apache.kafka.common.TopicPartition) InternalProcessorContext(org.apache.kafka.streams.processor.internals.InternalProcessorContext) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord)

Example 3 with ProcessorRecordContext

use of org.apache.kafka.streams.processor.internals.ProcessorRecordContext in project kafka by apache.

the class CachingWindowStoreTest method setUp.

@Before
public void setUp() throws Exception {
    keySchema = new WindowKeySchema();
    underlying = new RocksDBSegmentedBytesStore("test", 30000, 3, keySchema);
    final RocksDBWindowStore<Bytes, byte[]> windowStore = new RocksDBWindowStore<>(underlying, Serdes.Bytes(), Serdes.ByteArray(), false);
    cacheListener = new CachingKeyValueStoreTest.CacheFlushListenerStub<>();
    cachingStore = new CachingWindowStore<>(windowStore, Serdes.String(), Serdes.String(), WINDOW_SIZE);
    cachingStore.setFlushListener(cacheListener);
    cache = new ThreadCache("testCache", MAX_CACHE_SIZE_BYTES, new MockStreamsMetrics(new Metrics()));
    topic = "topic";
    final MockProcessorContext context = new MockProcessorContext(TestUtils.tempDirectory(), null, null, (RecordCollector) null, cache);
    context.setRecordContext(new ProcessorRecordContext(DEFAULT_TIMESTAMP, 0, 0, topic));
    cachingStore.init(context, cachingStore);
}
Also used : MockStreamsMetrics(org.apache.kafka.streams.processor.internals.MockStreamsMetrics) MockProcessorContext(org.apache.kafka.test.MockProcessorContext) Bytes(org.apache.kafka.common.utils.Bytes) MockStreamsMetrics(org.apache.kafka.streams.processor.internals.MockStreamsMetrics) Metrics(org.apache.kafka.common.metrics.Metrics) ProcessorRecordContext(org.apache.kafka.streams.processor.internals.ProcessorRecordContext) Before(org.junit.Before)

Example 4 with ProcessorRecordContext

use of org.apache.kafka.streams.processor.internals.ProcessorRecordContext in project kafka by apache.

the class CachingSessionStoreTest method setUp.

@Before
public void setUp() throws Exception {
    underlying = new RocksDBSegmentedBytesStore("test", 60000, 3, new SessionKeySchema());
    final RocksDBSessionStore<Bytes, byte[]> sessionStore = new RocksDBSessionStore<>(underlying, Serdes.Bytes(), Serdes.ByteArray());
    cachingStore = new CachingSessionStore<>(sessionStore, Serdes.String(), Serdes.Long());
    cache = new ThreadCache("testCache", MAX_CACHE_SIZE_BYTES, new MockStreamsMetrics(new Metrics()));
    final MockProcessorContext context = new MockProcessorContext(TestUtils.tempDirectory(), null, null, (RecordCollector) null, cache);
    context.setRecordContext(new ProcessorRecordContext(DEFAULT_TIMESTAMP, 0, 0, "topic"));
    cachingStore.init(context, cachingStore);
}
Also used : Bytes(org.apache.kafka.common.utils.Bytes) MockStreamsMetrics(org.apache.kafka.streams.processor.internals.MockStreamsMetrics) Metrics(org.apache.kafka.common.metrics.Metrics) ProcessorRecordContext(org.apache.kafka.streams.processor.internals.ProcessorRecordContext) MockStreamsMetrics(org.apache.kafka.streams.processor.internals.MockStreamsMetrics) MockProcessorContext(org.apache.kafka.test.MockProcessorContext) Before(org.junit.Before)

Aggregations

ProcessorRecordContext (org.apache.kafka.streams.processor.internals.ProcessorRecordContext)4 Metrics (org.apache.kafka.common.metrics.Metrics)3 MockStreamsMetrics (org.apache.kafka.streams.processor.internals.MockStreamsMetrics)3 MockProcessorContext (org.apache.kafka.test.MockProcessorContext)3 Before (org.junit.Before)3 Bytes (org.apache.kafka.common.utils.Bytes)2 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)1 ProducerRecord (org.apache.kafka.clients.producer.ProducerRecord)1 TopicPartition (org.apache.kafka.common.TopicPartition)1 InternalProcessorContext (org.apache.kafka.streams.processor.internals.InternalProcessorContext)1