use of org.apache.kafka.streams.processor.internals.RecordCollector in project apache-kafka-on-k8s by banzaicloud.
the class CachingKeyValueStoreTest method setUp.
@Before
public void setUp() {
final String storeName = "store";
underlyingStore = new InMemoryKeyValueStore<>(storeName, Serdes.Bytes(), Serdes.ByteArray());
cacheFlushListener = new CacheFlushListenerStub<>();
store = new CachingKeyValueStore<>(underlyingStore, Serdes.String(), Serdes.String());
store.setFlushListener(cacheFlushListener, false);
cache = new ThreadCache(new LogContext("testCache "), maxCacheSizeBytes, new MockStreamsMetrics(new Metrics()));
context = new InternalMockProcessorContext(null, null, null, (RecordCollector) null, cache);
topic = "topic";
context.setRecordContext(new ProcessorRecordContext(10, 0, 0, topic));
store.init(context, null);
}
use of org.apache.kafka.streams.processor.internals.RecordCollector in project kafka by apache.
the class InMemoryTimeOrderedKeyValueBuffer method logValue.
private void logValue(final Bytes key, final BufferKey bufferKey, final BufferValue value) {
final int sizeOfBufferTime = Long.BYTES;
final ByteBuffer buffer = value.serialize(sizeOfBufferTime);
buffer.putLong(bufferKey.time());
final byte[] array = buffer.array();
((RecordCollector.Supplier) context).recordCollector().send(changelogTopic, key, array, CHANGELOG_HEADERS, partition, null, KEY_SERIALIZER, VALUE_SERIALIZER);
}
use of org.apache.kafka.streams.processor.internals.RecordCollector in project kafka by apache.
the class TopologyTestDriver method setupTask.
@SuppressWarnings("deprecation")
private void setupTask(final StreamsConfig streamsConfig, final StreamsMetricsImpl streamsMetrics, final ThreadCache cache, final TaskConfig taskConfig) {
if (!partitionsByInputTopic.isEmpty()) {
consumer.assign(partitionsByInputTopic.values());
final Map<TopicPartition, Long> startOffsets = new HashMap<>();
for (final TopicPartition topicPartition : partitionsByInputTopic.values()) {
startOffsets.put(topicPartition, 0L);
}
consumer.updateBeginningOffsets(startOffsets);
final ProcessorStateManager stateManager = new ProcessorStateManager(TASK_ID, Task.TaskType.ACTIVE, StreamsConfig.EXACTLY_ONCE.equals(streamsConfig.getString(StreamsConfig.PROCESSING_GUARANTEE_CONFIG)), logContext, stateDirectory, new MockChangelogRegister(), processorTopology.storeToChangelogTopic(), new HashSet<>(partitionsByInputTopic.values()));
final RecordCollector recordCollector = new RecordCollectorImpl(logContext, TASK_ID, testDriverProducer, streamsConfig.defaultProductionExceptionHandler(), streamsMetrics);
final InternalProcessorContext context = new ProcessorContextImpl(TASK_ID, streamsConfig, stateManager, streamsMetrics, cache);
task = new StreamTask(TASK_ID, new HashSet<>(partitionsByInputTopic.values()), processorTopology, consumer, taskConfig, streamsMetrics, stateDirectory, cache, mockWallClockTime, stateManager, recordCollector, context, logContext);
task.initializeIfNeeded();
task.completeRestoration(noOpResetter -> {
});
task.processorContext().setRecordContext(null);
} else {
task = null;
}
}
use of org.apache.kafka.streams.processor.internals.RecordCollector in project apache-kafka-on-k8s by banzaicloud.
the class CachingWindowStoreTest method setUp.
@Before
public void setUp() {
keySchema = new WindowKeySchema();
final int retention = 30000;
final int numSegments = 3;
underlying = new RocksDBSegmentedBytesStore("test", retention, numSegments, keySchema);
final RocksDBWindowStore<Bytes, byte[]> windowStore = new RocksDBWindowStore<>(underlying, Serdes.Bytes(), Serdes.ByteArray(), false, WINDOW_SIZE);
cacheListener = new CachingKeyValueStoreTest.CacheFlushListenerStub<>();
cachingStore = new CachingWindowStore<>(windowStore, Serdes.String(), Serdes.String(), WINDOW_SIZE, Segments.segmentInterval(retention, numSegments));
cachingStore.setFlushListener(cacheListener, false);
cache = new ThreadCache(new LogContext("testCache "), MAX_CACHE_SIZE_BYTES, new MockStreamsMetrics(new Metrics()));
topic = "topic";
context = new InternalMockProcessorContext(TestUtils.tempDirectory(), null, null, (RecordCollector) null, cache);
context.setRecordContext(new ProcessorRecordContext(DEFAULT_TIMESTAMP, 0, 0, topic));
cachingStore.init(context, cachingStore);
}
use of org.apache.kafka.streams.processor.internals.RecordCollector in project kafka by apache.
the class StreamThreadStateStoreProviderTest method createStreamsTask.
private StreamTask createStreamsTask(final StreamsConfig streamsConfig, final MockClientSupplier clientSupplier, final ProcessorTopology topology, final TaskId taskId) {
final Metrics metrics = new Metrics();
final LogContext logContext = new LogContext("test-stream-task ");
final Set<TopicPartition> partitions = Collections.singleton(new TopicPartition(topicName, taskId.partition()));
final ProcessorStateManager stateManager = new ProcessorStateManager(taskId, Task.TaskType.ACTIVE, StreamsConfigUtils.eosEnabled(streamsConfig), logContext, stateDirectory, new StoreChangelogReader(new MockTime(), streamsConfig, logContext, clientSupplier.adminClient, clientSupplier.restoreConsumer, new MockStateRestoreListener()), topology.storeToChangelogTopic(), partitions);
final RecordCollector recordCollector = new RecordCollectorImpl(logContext, taskId, new StreamsProducer(streamsConfig, "threadId", clientSupplier, new TaskId(0, 0), UUID.randomUUID(), logContext, Time.SYSTEM), streamsConfig.defaultProductionExceptionHandler(), new MockStreamsMetrics(metrics));
final StreamsMetricsImpl streamsMetrics = new MockStreamsMetrics(metrics);
final InternalProcessorContext context = new ProcessorContextImpl(taskId, streamsConfig, stateManager, streamsMetrics, null);
return new StreamTask(taskId, partitions, topology, clientSupplier.consumer, new TopologyConfig(null, streamsConfig, new Properties()).getTaskConfig(), streamsMetrics, stateDirectory, EasyMock.createNiceMock(ThreadCache.class), new MockTime(), stateManager, recordCollector, context, logContext);
}
Aggregations