use of org.apache.kafka.streams.processor.internals.namedtopology.TopologyConfig in project kafka by apache.
the class StreamTaskTest method createStatelessTaskWithForwardingTopology.
private StreamTask createStatelessTaskWithForwardingTopology(final SourceNode<Integer, Integer> sourceNode) {
final ProcessorTopology topology = withSources(asList(sourceNode, processorStreamTime), singletonMap(topic1, sourceNode));
sourceNode.addChild(processorStreamTime);
EasyMock.expect(stateManager.changelogPartitions()).andReturn(Collections.emptySet());
EasyMock.expect(recordCollector.offsets()).andReturn(Collections.emptyMap()).anyTimes();
EasyMock.replay(stateManager, recordCollector);
final StreamsConfig config = createConfig();
final InternalProcessorContext context = new ProcessorContextImpl(taskId, config, stateManager, streamsMetrics, null);
return new StreamTask(taskId, singleton(partition1), topology, consumer, new TopologyConfig(null, config, new Properties()).getTaskConfig(), new StreamsMetricsImpl(metrics, "test", StreamsConfig.METRICS_LATEST, time), stateDirectory, cache, time, stateManager, recordCollector, context, logContext);
}
use of org.apache.kafka.streams.processor.internals.namedtopology.TopologyConfig in project kafka by apache.
the class StreamThreadStateStoreProviderTest method createStreamsTask.
private StreamTask createStreamsTask(final StreamsConfig streamsConfig, final MockClientSupplier clientSupplier, final ProcessorTopology topology, final TaskId taskId) {
final Metrics metrics = new Metrics();
final LogContext logContext = new LogContext("test-stream-task ");
final Set<TopicPartition> partitions = Collections.singleton(new TopicPartition(topicName, taskId.partition()));
final ProcessorStateManager stateManager = new ProcessorStateManager(taskId, Task.TaskType.ACTIVE, StreamsConfigUtils.eosEnabled(streamsConfig), logContext, stateDirectory, new StoreChangelogReader(new MockTime(), streamsConfig, logContext, clientSupplier.adminClient, clientSupplier.restoreConsumer, new MockStateRestoreListener()), topology.storeToChangelogTopic(), partitions);
final RecordCollector recordCollector = new RecordCollectorImpl(logContext, taskId, new StreamsProducer(streamsConfig, "threadId", clientSupplier, new TaskId(0, 0), UUID.randomUUID(), logContext, Time.SYSTEM), streamsConfig.defaultProductionExceptionHandler(), new MockStreamsMetrics(metrics));
final StreamsMetricsImpl streamsMetrics = new MockStreamsMetrics(metrics);
final InternalProcessorContext context = new ProcessorContextImpl(taskId, streamsConfig, stateManager, streamsMetrics, null);
return new StreamTask(taskId, partitions, topology, clientSupplier.consumer, new TopologyConfig(null, streamsConfig, new Properties()).getTaskConfig(), streamsMetrics, stateDirectory, EasyMock.createNiceMock(ThreadCache.class), new MockTime(), stateManager, recordCollector, context, logContext);
}
Aggregations