Search in sources :

Example 1 with RecordCollector

use of org.apache.kafka.streams.processor.internals.RecordCollector in project apache-kafka-on-k8s by banzaicloud.

the class CachingKeyValueStoreTest method setUp.

@Before
public void setUp() {
    final String storeName = "store";
    underlyingStore = new InMemoryKeyValueStore<>(storeName, Serdes.Bytes(), Serdes.ByteArray());
    cacheFlushListener = new CacheFlushListenerStub<>();
    store = new CachingKeyValueStore<>(underlyingStore, Serdes.String(), Serdes.String());
    store.setFlushListener(cacheFlushListener, false);
    cache = new ThreadCache(new LogContext("testCache "), maxCacheSizeBytes, new MockStreamsMetrics(new Metrics()));
    context = new InternalMockProcessorContext(null, null, null, (RecordCollector) null, cache);
    topic = "topic";
    context.setRecordContext(new ProcessorRecordContext(10, 0, 0, topic));
    store.init(context, null);
}
Also used : MockStreamsMetrics(org.apache.kafka.streams.processor.internals.MockStreamsMetrics) Metrics(org.apache.kafka.common.metrics.Metrics) RecordCollector(org.apache.kafka.streams.processor.internals.RecordCollector) ProcessorRecordContext(org.apache.kafka.streams.processor.internals.ProcessorRecordContext) LogContext(org.apache.kafka.common.utils.LogContext) MockStreamsMetrics(org.apache.kafka.streams.processor.internals.MockStreamsMetrics) InternalMockProcessorContext(org.apache.kafka.test.InternalMockProcessorContext) Before(org.junit.Before)

Example 2 with RecordCollector

use of org.apache.kafka.streams.processor.internals.RecordCollector in project kafka by apache.

the class InMemoryTimeOrderedKeyValueBuffer method logValue.

private void logValue(final Bytes key, final BufferKey bufferKey, final BufferValue value) {
    final int sizeOfBufferTime = Long.BYTES;
    final ByteBuffer buffer = value.serialize(sizeOfBufferTime);
    buffer.putLong(bufferKey.time());
    final byte[] array = buffer.array();
    ((RecordCollector.Supplier) context).recordCollector().send(changelogTopic, key, array, CHANGELOG_HEADERS, partition, null, KEY_SERIALIZER, VALUE_SERIALIZER);
}
Also used : RecordCollector(org.apache.kafka.streams.processor.internals.RecordCollector) ByteBuffer(java.nio.ByteBuffer)

Example 3 with RecordCollector

use of org.apache.kafka.streams.processor.internals.RecordCollector in project kafka by apache.

the class TopologyTestDriver method setupTask.

@SuppressWarnings("deprecation")
private void setupTask(final StreamsConfig streamsConfig, final StreamsMetricsImpl streamsMetrics, final ThreadCache cache, final TaskConfig taskConfig) {
    if (!partitionsByInputTopic.isEmpty()) {
        consumer.assign(partitionsByInputTopic.values());
        final Map<TopicPartition, Long> startOffsets = new HashMap<>();
        for (final TopicPartition topicPartition : partitionsByInputTopic.values()) {
            startOffsets.put(topicPartition, 0L);
        }
        consumer.updateBeginningOffsets(startOffsets);
        final ProcessorStateManager stateManager = new ProcessorStateManager(TASK_ID, Task.TaskType.ACTIVE, StreamsConfig.EXACTLY_ONCE.equals(streamsConfig.getString(StreamsConfig.PROCESSING_GUARANTEE_CONFIG)), logContext, stateDirectory, new MockChangelogRegister(), processorTopology.storeToChangelogTopic(), new HashSet<>(partitionsByInputTopic.values()));
        final RecordCollector recordCollector = new RecordCollectorImpl(logContext, TASK_ID, testDriverProducer, streamsConfig.defaultProductionExceptionHandler(), streamsMetrics);
        final InternalProcessorContext context = new ProcessorContextImpl(TASK_ID, streamsConfig, stateManager, streamsMetrics, cache);
        task = new StreamTask(TASK_ID, new HashSet<>(partitionsByInputTopic.values()), processorTopology, consumer, taskConfig, streamsMetrics, stateDirectory, cache, mockWallClockTime, stateManager, recordCollector, context, logContext);
        task.initializeIfNeeded();
        task.completeRestoration(noOpResetter -> {
        });
        task.processorContext().setRecordContext(null);
    } else {
        task = null;
    }
}
Also used : RecordCollector(org.apache.kafka.streams.processor.internals.RecordCollector) HashMap(java.util.HashMap) InternalProcessorContext(org.apache.kafka.streams.processor.internals.InternalProcessorContext) GlobalProcessorContextImpl(org.apache.kafka.streams.processor.internals.GlobalProcessorContextImpl) ProcessorContextImpl(org.apache.kafka.streams.processor.internals.ProcessorContextImpl) RecordCollectorImpl(org.apache.kafka.streams.processor.internals.RecordCollectorImpl) TopicPartition(org.apache.kafka.common.TopicPartition) AtomicLong(java.util.concurrent.atomic.AtomicLong) ProcessorStateManager(org.apache.kafka.streams.processor.internals.ProcessorStateManager) StreamTask(org.apache.kafka.streams.processor.internals.StreamTask) HashSet(java.util.HashSet)

Example 4 with RecordCollector

use of org.apache.kafka.streams.processor.internals.RecordCollector in project apache-kafka-on-k8s by banzaicloud.

the class CachingWindowStoreTest method setUp.

@Before
public void setUp() {
    keySchema = new WindowKeySchema();
    final int retention = 30000;
    final int numSegments = 3;
    underlying = new RocksDBSegmentedBytesStore("test", retention, numSegments, keySchema);
    final RocksDBWindowStore<Bytes, byte[]> windowStore = new RocksDBWindowStore<>(underlying, Serdes.Bytes(), Serdes.ByteArray(), false, WINDOW_SIZE);
    cacheListener = new CachingKeyValueStoreTest.CacheFlushListenerStub<>();
    cachingStore = new CachingWindowStore<>(windowStore, Serdes.String(), Serdes.String(), WINDOW_SIZE, Segments.segmentInterval(retention, numSegments));
    cachingStore.setFlushListener(cacheListener, false);
    cache = new ThreadCache(new LogContext("testCache "), MAX_CACHE_SIZE_BYTES, new MockStreamsMetrics(new Metrics()));
    topic = "topic";
    context = new InternalMockProcessorContext(TestUtils.tempDirectory(), null, null, (RecordCollector) null, cache);
    context.setRecordContext(new ProcessorRecordContext(DEFAULT_TIMESTAMP, 0, 0, topic));
    cachingStore.init(context, cachingStore);
}
Also used : RecordCollector(org.apache.kafka.streams.processor.internals.RecordCollector) LogContext(org.apache.kafka.common.utils.LogContext) MockStreamsMetrics(org.apache.kafka.streams.processor.internals.MockStreamsMetrics) Bytes(org.apache.kafka.common.utils.Bytes) MockStreamsMetrics(org.apache.kafka.streams.processor.internals.MockStreamsMetrics) Metrics(org.apache.kafka.common.metrics.Metrics) ProcessorRecordContext(org.apache.kafka.streams.processor.internals.ProcessorRecordContext) InternalMockProcessorContext(org.apache.kafka.test.InternalMockProcessorContext) Before(org.junit.Before)

Example 5 with RecordCollector

use of org.apache.kafka.streams.processor.internals.RecordCollector in project kafka by apache.

the class StreamThreadStateStoreProviderTest method createStreamsTask.

private StreamTask createStreamsTask(final StreamsConfig streamsConfig, final MockClientSupplier clientSupplier, final ProcessorTopology topology, final TaskId taskId) {
    final Metrics metrics = new Metrics();
    final LogContext logContext = new LogContext("test-stream-task ");
    final Set<TopicPartition> partitions = Collections.singleton(new TopicPartition(topicName, taskId.partition()));
    final ProcessorStateManager stateManager = new ProcessorStateManager(taskId, Task.TaskType.ACTIVE, StreamsConfigUtils.eosEnabled(streamsConfig), logContext, stateDirectory, new StoreChangelogReader(new MockTime(), streamsConfig, logContext, clientSupplier.adminClient, clientSupplier.restoreConsumer, new MockStateRestoreListener()), topology.storeToChangelogTopic(), partitions);
    final RecordCollector recordCollector = new RecordCollectorImpl(logContext, taskId, new StreamsProducer(streamsConfig, "threadId", clientSupplier, new TaskId(0, 0), UUID.randomUUID(), logContext, Time.SYSTEM), streamsConfig.defaultProductionExceptionHandler(), new MockStreamsMetrics(metrics));
    final StreamsMetricsImpl streamsMetrics = new MockStreamsMetrics(metrics);
    final InternalProcessorContext context = new ProcessorContextImpl(taskId, streamsConfig, stateManager, streamsMetrics, null);
    return new StreamTask(taskId, partitions, topology, clientSupplier.consumer, new TopologyConfig(null, streamsConfig, new Properties()).getTaskConfig(), streamsMetrics, stateDirectory, EasyMock.createNiceMock(ThreadCache.class), new MockTime(), stateManager, recordCollector, context, logContext);
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) RecordCollector(org.apache.kafka.streams.processor.internals.RecordCollector) StreamsProducer(org.apache.kafka.streams.processor.internals.StreamsProducer) InternalProcessorContext(org.apache.kafka.streams.processor.internals.InternalProcessorContext) LogContext(org.apache.kafka.common.utils.LogContext) MockStreamsMetrics(org.apache.kafka.streams.processor.internals.MockStreamsMetrics) MockStateRestoreListener(org.apache.kafka.test.MockStateRestoreListener) ProcessorContextImpl(org.apache.kafka.streams.processor.internals.ProcessorContextImpl) Properties(java.util.Properties) RecordCollectorImpl(org.apache.kafka.streams.processor.internals.RecordCollectorImpl) MockStreamsMetrics(org.apache.kafka.streams.processor.internals.MockStreamsMetrics) Metrics(org.apache.kafka.common.metrics.Metrics) TopicPartition(org.apache.kafka.common.TopicPartition) StreamsMetricsImpl(org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl) StoreChangelogReader(org.apache.kafka.streams.processor.internals.StoreChangelogReader) ProcessorStateManager(org.apache.kafka.streams.processor.internals.ProcessorStateManager) MockTime(org.apache.kafka.common.utils.MockTime) StreamTask(org.apache.kafka.streams.processor.internals.StreamTask) TopologyConfig(org.apache.kafka.streams.processor.internals.namedtopology.TopologyConfig)

Aggregations

RecordCollector (org.apache.kafka.streams.processor.internals.RecordCollector)5 Metrics (org.apache.kafka.common.metrics.Metrics)3 LogContext (org.apache.kafka.common.utils.LogContext)3 MockStreamsMetrics (org.apache.kafka.streams.processor.internals.MockStreamsMetrics)3 TopicPartition (org.apache.kafka.common.TopicPartition)2 InternalProcessorContext (org.apache.kafka.streams.processor.internals.InternalProcessorContext)2 ProcessorContextImpl (org.apache.kafka.streams.processor.internals.ProcessorContextImpl)2 ProcessorRecordContext (org.apache.kafka.streams.processor.internals.ProcessorRecordContext)2 ProcessorStateManager (org.apache.kafka.streams.processor.internals.ProcessorStateManager)2 RecordCollectorImpl (org.apache.kafka.streams.processor.internals.RecordCollectorImpl)2 StreamTask (org.apache.kafka.streams.processor.internals.StreamTask)2 InternalMockProcessorContext (org.apache.kafka.test.InternalMockProcessorContext)2 Before (org.junit.Before)2 ByteBuffer (java.nio.ByteBuffer)1 HashMap (java.util.HashMap)1 HashSet (java.util.HashSet)1 Properties (java.util.Properties)1 AtomicLong (java.util.concurrent.atomic.AtomicLong)1 Bytes (org.apache.kafka.common.utils.Bytes)1 MockTime (org.apache.kafka.common.utils.MockTime)1