use of org.apache.kafka.streams.processor.internals.ProcessorNode in project kafka by apache.
the class TopologyBuilder method build.
private ProcessorTopology build(Set<String> nodeGroup) {
List<ProcessorNode> processorNodes = new ArrayList<>(nodeFactories.size());
Map<String, ProcessorNode> processorMap = new HashMap<>();
Map<String, SourceNode> topicSourceMap = new HashMap<>();
Map<String, SinkNode> topicSinkMap = new HashMap<>();
Map<String, StateStore> stateStoreMap = new LinkedHashMap<>();
// create processor nodes in a topological order ("nodeFactories" is already topologically sorted)
for (NodeFactory factory : nodeFactories.values()) {
if (nodeGroup == null || nodeGroup.contains(factory.name)) {
final ProcessorNode node = factory.build();
processorNodes.add(node);
processorMap.put(node.name(), node);
if (factory instanceof ProcessorNodeFactory) {
for (String parent : ((ProcessorNodeFactory) factory).parents) {
ProcessorNode<?, ?> parentNode = processorMap.get(parent);
parentNode.addChild(node);
}
for (String stateStoreName : ((ProcessorNodeFactory) factory).stateStoreNames) {
if (!stateStoreMap.containsKey(stateStoreName)) {
StateStore stateStore;
if (stateFactories.containsKey(stateStoreName)) {
final StateStoreSupplier supplier = stateFactories.get(stateStoreName).supplier;
stateStore = supplier.get();
// remember the changelog topic if this state store is change-logging enabled
if (supplier.loggingEnabled() && !storeToChangelogTopic.containsKey(stateStoreName)) {
final String changelogTopic = ProcessorStateManager.storeChangelogTopic(this.applicationId, stateStoreName);
storeToChangelogTopic.put(stateStoreName, changelogTopic);
}
} else {
stateStore = globalStateStores.get(stateStoreName);
}
stateStoreMap.put(stateStoreName, stateStore);
}
}
} else if (factory instanceof SourceNodeFactory) {
final SourceNodeFactory sourceNodeFactory = (SourceNodeFactory) factory;
final List<String> topics = (sourceNodeFactory.pattern != null) ? sourceNodeFactory.getTopics(subscriptionUpdates.getUpdates()) : sourceNodeFactory.topics;
for (String topic : topics) {
if (internalTopicNames.contains(topic)) {
// prefix the internal topic name with the application id
topicSourceMap.put(decorateTopic(topic), (SourceNode) node);
} else {
topicSourceMap.put(topic, (SourceNode) node);
}
}
} else if (factory instanceof SinkNodeFactory) {
final SinkNodeFactory sinkNodeFactory = (SinkNodeFactory) factory;
for (String parent : sinkNodeFactory.parents) {
processorMap.get(parent).addChild(node);
if (internalTopicNames.contains(sinkNodeFactory.topic)) {
// prefix the internal topic name with the application id
topicSinkMap.put(decorateTopic(sinkNodeFactory.topic), (SinkNode) node);
} else {
topicSinkMap.put(sinkNodeFactory.topic, (SinkNode) node);
}
}
} else {
throw new TopologyBuilderException("Unknown definition class: " + factory.getClass().getName());
}
}
}
return new ProcessorTopology(processorNodes, topicSourceMap, topicSinkMap, new ArrayList<>(stateStoreMap.values()), storeToChangelogTopic, new ArrayList<>(globalStateStores.values()));
}
use of org.apache.kafka.streams.processor.internals.ProcessorNode in project kafka by apache.
the class ForwardingCacheFlushListener method apply.
@Override
public void apply(final K key, final V newValue, final V oldValue) {
final ProcessorNode prev = context.currentNode();
context.setCurrentNode(myNode);
try {
if (sendOldValues) {
context.forward(key, new Change<>(newValue, oldValue));
} else {
context.forward(key, new Change<>(newValue, null));
}
} finally {
context.setCurrentNode(prev);
}
}
use of org.apache.kafka.streams.processor.internals.ProcessorNode in project kafka by apache.
the class KStreamTestDriver method process.
public void process(String topicName, Object key, Object value) {
final ProcessorNode prevNode = context.currentNode();
ProcessorNode currNode = topology.source(topicName);
if (currNode == null && globalTopology != null) {
currNode = globalTopology.source(topicName);
}
// if yes, skip
if (topicName.endsWith(ProcessorStateManager.STATE_CHANGELOG_TOPIC_SUFFIX)) {
return;
}
context.setRecordContext(createRecordContext(context.timestamp()));
context.setCurrentNode(currNode);
try {
context.forward(key, value);
} finally {
context.setCurrentNode(prevNode);
}
}
use of org.apache.kafka.streams.processor.internals.ProcessorNode in project kafka by apache.
the class KStreamTestDriver method allProcessorNames.
public Set<String> allProcessorNames() {
Set<String> names = new HashSet<>();
List<ProcessorNode> nodes = topology.processors();
for (ProcessorNode node : nodes) {
names.add(node.name());
}
return names;
}
use of org.apache.kafka.streams.processor.internals.ProcessorNode in project kafka by apache.
the class KStreamTestDriver method punctuate.
public void punctuate(long timestamp) {
final ProcessorNode prevNode = context.currentNode();
for (ProcessorNode processor : topology.processors()) {
if (processor.processor() != null) {
context.setRecordContext(createRecordContext(timestamp));
context.setCurrentNode(processor);
try {
processor.processor().punctuate(timestamp);
} finally {
context.setCurrentNode(prevNode);
}
}
}
}
Aggregations