use of org.apache.kafka.streams.errors.TopologyBuilderException in project kafka by apache.
the class TopologyBuilder method build.
private ProcessorTopology build(Set<String> nodeGroup) {
List<ProcessorNode> processorNodes = new ArrayList<>(nodeFactories.size());
Map<String, ProcessorNode> processorMap = new HashMap<>();
Map<String, SourceNode> topicSourceMap = new HashMap<>();
Map<String, SinkNode> topicSinkMap = new HashMap<>();
Map<String, StateStore> stateStoreMap = new LinkedHashMap<>();
// create processor nodes in a topological order ("nodeFactories" is already topologically sorted)
for (NodeFactory factory : nodeFactories.values()) {
if (nodeGroup == null || nodeGroup.contains(factory.name)) {
final ProcessorNode node = factory.build();
processorNodes.add(node);
processorMap.put(node.name(), node);
if (factory instanceof ProcessorNodeFactory) {
for (String parent : ((ProcessorNodeFactory) factory).parents) {
ProcessorNode<?, ?> parentNode = processorMap.get(parent);
parentNode.addChild(node);
}
for (String stateStoreName : ((ProcessorNodeFactory) factory).stateStoreNames) {
if (!stateStoreMap.containsKey(stateStoreName)) {
StateStore stateStore;
if (stateFactories.containsKey(stateStoreName)) {
final StateStoreSupplier supplier = stateFactories.get(stateStoreName).supplier;
stateStore = supplier.get();
// remember the changelog topic if this state store is change-logging enabled
if (supplier.loggingEnabled() && !storeToChangelogTopic.containsKey(stateStoreName)) {
final String changelogTopic = ProcessorStateManager.storeChangelogTopic(this.applicationId, stateStoreName);
storeToChangelogTopic.put(stateStoreName, changelogTopic);
}
} else {
stateStore = globalStateStores.get(stateStoreName);
}
stateStoreMap.put(stateStoreName, stateStore);
}
}
} else if (factory instanceof SourceNodeFactory) {
final SourceNodeFactory sourceNodeFactory = (SourceNodeFactory) factory;
final List<String> topics = (sourceNodeFactory.pattern != null) ? sourceNodeFactory.getTopics(subscriptionUpdates.getUpdates()) : sourceNodeFactory.topics;
for (String topic : topics) {
if (internalTopicNames.contains(topic)) {
// prefix the internal topic name with the application id
topicSourceMap.put(decorateTopic(topic), (SourceNode) node);
} else {
topicSourceMap.put(topic, (SourceNode) node);
}
}
} else if (factory instanceof SinkNodeFactory) {
final SinkNodeFactory sinkNodeFactory = (SinkNodeFactory) factory;
for (String parent : sinkNodeFactory.parents) {
processorMap.get(parent).addChild(node);
if (internalTopicNames.contains(sinkNodeFactory.topic)) {
// prefix the internal topic name with the application id
topicSinkMap.put(decorateTopic(sinkNodeFactory.topic), (SinkNode) node);
} else {
topicSinkMap.put(sinkNodeFactory.topic, (SinkNode) node);
}
}
} else {
throw new TopologyBuilderException("Unknown definition class: " + factory.getClass().getName());
}
}
}
return new ProcessorTopology(processorNodes, topicSourceMap, topicSinkMap, new ArrayList<>(stateStoreMap.values()), storeToChangelogTopic, new ArrayList<>(globalStateStores.values()));
}
use of org.apache.kafka.streams.errors.TopologyBuilderException in project kafka by apache.
the class KStreamImpl method writeAsText.
/**
* @throws TopologyBuilderException if file is not found
*/
@Override
public void writeAsText(String filePath, String streamName, Serde<K> keySerde, Serde<V> valSerde) {
Objects.requireNonNull(filePath, "filePath can't be null");
if (filePath.trim().isEmpty()) {
throw new TopologyBuilderException("filePath can't be an empty string");
}
String name = topology.newName(PRINTING_NAME);
streamName = (streamName == null) ? this.name : streamName;
try {
PrintStream printStream = new PrintStream(new FileOutputStream(filePath));
topology.addProcessor(name, new KeyValuePrinter<>(printStream, keySerde, valSerde, streamName), this.name);
} catch (FileNotFoundException e) {
String message = "Unable to write stream to file at [" + filePath + "] " + e.getMessage();
throw new TopologyBuilderException(message);
}
}
use of org.apache.kafka.streams.errors.TopologyBuilderException in project kafka by apache.
the class TopologyBuilderTest method shouldThroughOnUnassignedStateStoreAccess.
@Test(expected = TopologyBuilderException.class)
public void shouldThroughOnUnassignedStateStoreAccess() {
final String sourceNodeName = "source";
final String goodNodeName = "goodGuy";
final String badNodeName = "badGuy";
final Properties config = new Properties();
config.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "host:1");
config.put(StreamsConfig.APPLICATION_ID_CONFIG, "appId");
final StreamsConfig streamsConfig = new StreamsConfig(config);
try {
final TopologyBuilder builder = new TopologyBuilder();
builder.addSource(sourceNodeName, "topic").addProcessor(goodNodeName, new LocalMockProcessorSupplier(), sourceNodeName).addStateStore(Stores.create(LocalMockProcessorSupplier.STORE_NAME).withStringKeys().withStringValues().inMemory().build(), goodNodeName).addProcessor(badNodeName, new LocalMockProcessorSupplier(), sourceNodeName);
final ProcessorTopologyTestDriver driver = new ProcessorTopologyTestDriver(streamsConfig, builder);
driver.process("topic", null, null);
} catch (final StreamsException e) {
final Throwable cause = e.getCause();
if (cause != null && cause instanceof TopologyBuilderException && cause.getMessage().equals("Invalid topology building: Processor " + badNodeName + " has no access to StateStore " + LocalMockProcessorSupplier.STORE_NAME)) {
throw (TopologyBuilderException) cause;
} else {
throw new RuntimeException("Did expect different exception. Did catch:", e);
}
}
}
use of org.apache.kafka.streams.errors.TopologyBuilderException in project kafka by apache.
the class TopologyBuilder method addSink.
/**
* Add a new sink that forwards records from upstream parent processor and/or source nodes to the named Kafka topic.
* The sink will use the specified key and value serializers, and the supplied partitioner.
*
* @param name the unique name of the sink
* @param topic the name of the Kafka topic to which this sink should write its records
* @param keySerializer the {@link Serializer key serializer} used when consuming records; may be null if the sink
* should use the {@link org.apache.kafka.streams.StreamsConfig#KEY_SERDE_CLASS_CONFIG default key serializer} specified in the
* {@link org.apache.kafka.streams.StreamsConfig stream configuration}
* @param valSerializer the {@link Serializer value serializer} used when consuming records; may be null if the sink
* should use the {@link org.apache.kafka.streams.StreamsConfig#VALUE_SERDE_CLASS_CONFIG default value serializer} specified in the
* {@link org.apache.kafka.streams.StreamsConfig stream configuration}
* @param partitioner the function that should be used to determine the partition for each record processed by the sink
* @param parentNames the name of one or more source or processor nodes whose output records this sink should consume
* and write to its topic
* @return this builder instance so methods can be chained together; never null
* @see #addSink(String, String, String...)
* @see #addSink(String, String, StreamPartitioner, String...)
* @see #addSink(String, String, Serializer, Serializer, String...)
* @throws TopologyBuilderException if parent processor is not added yet, or if this processor's name is equal to the parent's name
*/
public final synchronized <K, V> TopologyBuilder addSink(String name, String topic, Serializer<K> keySerializer, Serializer<V> valSerializer, StreamPartitioner<? super K, ? super V> partitioner, String... parentNames) {
Objects.requireNonNull(name, "name must not be null");
Objects.requireNonNull(topic, "topic must not be null");
if (nodeFactories.containsKey(name))
throw new TopologyBuilderException("Processor " + name + " is already added.");
if (parentNames != null) {
for (String parent : parentNames) {
if (parent.equals(name)) {
throw new TopologyBuilderException("Processor " + name + " cannot be a parent of itself.");
}
if (!nodeFactories.containsKey(parent)) {
throw new TopologyBuilderException("Parent processor " + parent + " is not added yet.");
}
}
}
nodeFactories.put(name, new SinkNodeFactory<>(name, parentNames, topic, keySerializer, valSerializer, partitioner));
nodeToSinkTopic.put(name, topic);
nodeGrouper.add(name);
nodeGrouper.unite(name, parentNames);
return this;
}
use of org.apache.kafka.streams.errors.TopologyBuilderException in project kafka by apache.
the class TopologyBuilder method connectProcessorAndStateStore.
private void connectProcessorAndStateStore(String processorName, String stateStoreName) {
if (!stateFactories.containsKey(stateStoreName))
throw new TopologyBuilderException("StateStore " + stateStoreName + " is not added yet.");
if (!nodeFactories.containsKey(processorName))
throw new TopologyBuilderException("Processor " + processorName + " is not added yet.");
StateStoreFactory stateStoreFactory = stateFactories.get(stateStoreName);
Iterator<String> iter = stateStoreFactory.users.iterator();
if (iter.hasNext()) {
String user = iter.next();
nodeGrouper.unite(user, processorName);
}
stateStoreFactory.users.add(processorName);
NodeFactory nodeFactory = nodeFactories.get(processorName);
if (nodeFactory instanceof ProcessorNodeFactory) {
ProcessorNodeFactory processorNodeFactory = (ProcessorNodeFactory) nodeFactory;
processorNodeFactory.addStateStore(stateStoreName);
connectStateStoreNameToSourceTopics(stateStoreName, processorNodeFactory);
} else {
throw new TopologyBuilderException("cannot connect a state store " + stateStoreName + " to a source node or a sink node.");
}
}
Aggregations