use of org.apache.kafka.streams.processor.internals.SourceNode in project kafka by apache.
the class TopologyBuilder method build.
private ProcessorTopology build(Set<String> nodeGroup) {
List<ProcessorNode> processorNodes = new ArrayList<>(nodeFactories.size());
Map<String, ProcessorNode> processorMap = new HashMap<>();
Map<String, SourceNode> topicSourceMap = new HashMap<>();
Map<String, SinkNode> topicSinkMap = new HashMap<>();
Map<String, StateStore> stateStoreMap = new LinkedHashMap<>();
// create processor nodes in a topological order ("nodeFactories" is already topologically sorted)
for (NodeFactory factory : nodeFactories.values()) {
if (nodeGroup == null || nodeGroup.contains(factory.name)) {
final ProcessorNode node = factory.build();
processorNodes.add(node);
processorMap.put(node.name(), node);
if (factory instanceof ProcessorNodeFactory) {
for (String parent : ((ProcessorNodeFactory) factory).parents) {
ProcessorNode<?, ?> parentNode = processorMap.get(parent);
parentNode.addChild(node);
}
for (String stateStoreName : ((ProcessorNodeFactory) factory).stateStoreNames) {
if (!stateStoreMap.containsKey(stateStoreName)) {
StateStore stateStore;
if (stateFactories.containsKey(stateStoreName)) {
final StateStoreSupplier supplier = stateFactories.get(stateStoreName).supplier;
stateStore = supplier.get();
// remember the changelog topic if this state store is change-logging enabled
if (supplier.loggingEnabled() && !storeToChangelogTopic.containsKey(stateStoreName)) {
final String changelogTopic = ProcessorStateManager.storeChangelogTopic(this.applicationId, stateStoreName);
storeToChangelogTopic.put(stateStoreName, changelogTopic);
}
} else {
stateStore = globalStateStores.get(stateStoreName);
}
stateStoreMap.put(stateStoreName, stateStore);
}
}
} else if (factory instanceof SourceNodeFactory) {
final SourceNodeFactory sourceNodeFactory = (SourceNodeFactory) factory;
final List<String> topics = (sourceNodeFactory.pattern != null) ? sourceNodeFactory.getTopics(subscriptionUpdates.getUpdates()) : sourceNodeFactory.topics;
for (String topic : topics) {
if (internalTopicNames.contains(topic)) {
// prefix the internal topic name with the application id
topicSourceMap.put(decorateTopic(topic), (SourceNode) node);
} else {
topicSourceMap.put(topic, (SourceNode) node);
}
}
} else if (factory instanceof SinkNodeFactory) {
final SinkNodeFactory sinkNodeFactory = (SinkNodeFactory) factory;
for (String parent : sinkNodeFactory.parents) {
processorMap.get(parent).addChild(node);
if (internalTopicNames.contains(sinkNodeFactory.topic)) {
// prefix the internal topic name with the application id
topicSinkMap.put(decorateTopic(sinkNodeFactory.topic), (SinkNode) node);
} else {
topicSinkMap.put(sinkNodeFactory.topic, (SinkNode) node);
}
}
} else {
throw new TopologyBuilderException("Unknown definition class: " + factory.getClass().getName());
}
}
}
return new ProcessorTopology(processorNodes, topicSourceMap, topicSinkMap, new ArrayList<>(stateStoreMap.values()), storeToChangelogTopic, new ArrayList<>(globalStateStores.values()));
}
use of org.apache.kafka.streams.processor.internals.SourceNode in project apache-kafka-on-k8s by banzaicloud.
the class KStreamImplTest method shouldUseRecordMetadataTimestampExtractorWhenInternalRepartitioningTopicCreated.
@Test
public // TODO: this test should be refactored when we removed KStreamBuilder so that the created Topology contains internal topics as well
void shouldUseRecordMetadataTimestampExtractorWhenInternalRepartitioningTopicCreated() {
final KStreamBuilder builder = new KStreamBuilder();
KStream<String, String> kStream = builder.stream(stringSerde, stringSerde, "topic-1");
ValueJoiner<String, String, String> valueJoiner = MockValueJoiner.instance(":");
long windowSize = TimeUnit.MILLISECONDS.convert(1, TimeUnit.DAYS);
final KStream<String, String> stream = kStream.map(new KeyValueMapper<String, String, KeyValue<? extends String, ? extends String>>() {
@Override
public KeyValue<? extends String, ? extends String> apply(String key, String value) {
return KeyValue.pair(value, value);
}
});
stream.join(kStream, valueJoiner, JoinWindows.of(windowSize).until(3 * windowSize), Joined.with(Serdes.String(), Serdes.String(), Serdes.String())).to(Serdes.String(), Serdes.String(), "output-topic");
ProcessorTopology processorTopology = builder.setApplicationId("X").build(null);
SourceNode originalSourceNode = processorTopology.source("topic-1");
for (SourceNode sourceNode : processorTopology.sources()) {
if (sourceNode.name().equals(originalSourceNode.name())) {
assertEquals(sourceNode.getTimestampExtractor(), null);
} else {
assertThat(sourceNode.getTimestampExtractor(), instanceOf(FailOnInvalidTimestamp.class));
}
}
}
use of org.apache.kafka.streams.processor.internals.SourceNode in project apache-kafka-on-k8s by banzaicloud.
the class KStreamBuilderTest method shouldAddTimestampExtractorToStreamWithKeyValSerdePerSource.
@Test
public void shouldAddTimestampExtractorToStreamWithKeyValSerdePerSource() {
builder.stream(new MockTimestampExtractor(), null, null, "topic");
final ProcessorTopology processorTopology = builder.build(null);
for (final SourceNode sourceNode : processorTopology.sources()) {
assertThat(sourceNode.getTimestampExtractor(), instanceOf(MockTimestampExtractor.class));
}
}
Aggregations