use of org.apache.kafka.streams.processor.internals.namedtopology.TopologyConfig in project kafka by apache.
the class StreamTaskTest method shouldReturnOffsetsForRepartitionTopicsForPurging.
@Test
public void shouldReturnOffsetsForRepartitionTopicsForPurging() {
final TopicPartition repartition = new TopicPartition("repartition", 1);
final ProcessorTopology topology = withRepartitionTopics(asList(source1, source2), mkMap(mkEntry(topic1, source1), mkEntry(repartition.topic(), source2)), singleton(repartition.topic()));
consumer.assign(asList(partition1, repartition));
consumer.updateBeginningOffsets(mkMap(mkEntry(repartition, 0L)));
EasyMock.expect(stateManager.changelogPartitions()).andReturn(Collections.emptySet());
EasyMock.expect(recordCollector.offsets()).andReturn(emptyMap()).anyTimes();
EasyMock.replay(stateManager, recordCollector);
final StreamsConfig config = createConfig();
final InternalProcessorContext context = new ProcessorContextImpl(taskId, config, stateManager, streamsMetrics, null);
task = new StreamTask(taskId, mkSet(partition1, repartition), topology, consumer, new TopologyConfig(null, config, new Properties()).getTaskConfig(), streamsMetrics, stateDirectory, cache, time, stateManager, recordCollector, context, logContext);
task.initializeIfNeeded();
task.completeRestoration(noOpResetter -> {
});
task.addRecords(partition1, singletonList(getConsumerRecordWithOffsetAsTimestamp(partition1, 5L)));
task.addRecords(repartition, singletonList(getConsumerRecordWithOffsetAsTimestamp(repartition, 10L)));
assertTrue(task.process(0L));
assertTrue(task.process(0L));
task.prepareCommit();
final Map<TopicPartition, Long> map = task.purgeableOffsets();
assertThat(map, equalTo(singletonMap(repartition, 11L)));
}
use of org.apache.kafka.streams.processor.internals.namedtopology.TopologyConfig in project kafka by apache.
the class StreamTaskTest method createTimeoutTask.
private void createTimeoutTask(final String eosConfig) {
EasyMock.replay(stateManager);
final ProcessorTopology topology = withSources(singletonList(timeoutSource), mkMap(mkEntry(topic1, timeoutSource)));
final StreamsConfig config = createConfig(eosConfig, "0");
final InternalProcessorContext context = new ProcessorContextImpl(taskId, config, stateManager, streamsMetrics, null);
task = new StreamTask(taskId, mkSet(partition1), topology, consumer, new TopologyConfig(null, config, new Properties()).getTaskConfig(), streamsMetrics, stateDirectory, cache, time, stateManager, recordCollector, context, logContext);
}
use of org.apache.kafka.streams.processor.internals.namedtopology.TopologyConfig in project kafka by apache.
the class StreamTaskTest method createStatelessTask.
private StreamTask createStatelessTask(final StreamsConfig config) {
final ProcessorTopology topology = withSources(asList(source1, source2, processorStreamTime, processorSystemTime), mkMap(mkEntry(topic1, source1), mkEntry(topic2, source2)));
source1.addChild(processorStreamTime);
source2.addChild(processorStreamTime);
source1.addChild(processorSystemTime);
source2.addChild(processorSystemTime);
EasyMock.expect(stateManager.changelogPartitions()).andReturn(Collections.emptySet());
EasyMock.expect(stateManager.changelogOffsets()).andReturn(Collections.emptyMap()).anyTimes();
EasyMock.expect(recordCollector.offsets()).andReturn(Collections.emptyMap()).anyTimes();
EasyMock.replay(stateManager, recordCollector);
final InternalProcessorContext context = new ProcessorContextImpl(taskId, config, stateManager, streamsMetrics, null);
return new StreamTask(taskId, partitions, topology, consumer, new TopologyConfig(null, config, new Properties()).getTaskConfig(), new StreamsMetricsImpl(metrics, "test", StreamsConfig.METRICS_LATEST, time), stateDirectory, cache, time, stateManager, recordCollector, context, logContext);
}
use of org.apache.kafka.streams.processor.internals.namedtopology.TopologyConfig in project kafka by apache.
the class StreamTaskTest method shouldThrowTopologyExceptionIfTaskCreatedForUnknownTopic.
@Test
public void shouldThrowTopologyExceptionIfTaskCreatedForUnknownTopic() {
final InternalProcessorContext context = new ProcessorContextImpl(taskId, createConfig("100"), stateManager, streamsMetrics, null);
final StreamsMetricsImpl metrics = new StreamsMetricsImpl(this.metrics, "test", StreamsConfig.METRICS_LATEST, time);
EasyMock.expect(stateManager.changelogPartitions()).andReturn(Collections.emptySet());
EasyMock.replay(stateManager);
// The processor topology is missing the topics
final ProcessorTopology topology = withSources(emptyList(), mkMap());
final TopologyException exception = assertThrows(TopologyException.class, () -> new StreamTask(taskId, partitions, topology, consumer, new TopologyConfig(null, createConfig("100"), new Properties()).getTaskConfig(), metrics, stateDirectory, cache, time, stateManager, recordCollector, context, logContext));
assertThat(exception.getMessage(), equalTo("Invalid topology: " + "Topic is unknown to the topology. This may happen if different KafkaStreams instances of the same " + "application execute different Topologies. Note that Topologies are only identical if all operators " + "are added in the same order."));
}
use of org.apache.kafka.streams.processor.internals.namedtopology.TopologyConfig in project kafka by apache.
the class StreamTaskTest method createOptimizedStatefulTask.
private StreamTask createOptimizedStatefulTask(final StreamsConfig config, final Consumer<byte[], byte[]> consumer) {
final StateStore stateStore = new MockKeyValueStore(storeName, true);
final ProcessorTopology topology = ProcessorTopologyFactories.with(singletonList(source1), mkMap(mkEntry(topic1, source1)), singletonList(stateStore), Collections.singletonMap(storeName, topic1));
final InternalProcessorContext context = new ProcessorContextImpl(taskId, config, stateManager, streamsMetrics, null);
return new StreamTask(taskId, mkSet(partition1), topology, consumer, new TopologyConfig(null, config, new Properties()).getTaskConfig(), streamsMetrics, stateDirectory, cache, time, stateManager, recordCollector, context, logContext);
}
Aggregations