use of org.apache.kafka.streams.errors.TopologyBuilderException in project kafka by apache.
the class TopologyBuilder method addProcessor.
/**
* Add a new processor node that receives and processes records output by one or more parent source or processor node.
* Any new record output by this processor will be forwarded to its child processor or sink nodes.
* @param name the unique name of the processor node
* @param supplier the supplier used to obtain this node's {@link Processor} instance
* @param parentNames the name of one or more source or processor nodes whose output records this processor should receive
* and process
* @return this builder instance so methods can be chained together; never null
* @throws TopologyBuilderException if parent processor is not added yet, or if this processor's name is equal to the parent's name
*/
public final synchronized TopologyBuilder addProcessor(String name, ProcessorSupplier supplier, String... parentNames) {
Objects.requireNonNull(name, "name must not be null");
Objects.requireNonNull(supplier, "supplier must not be null");
if (nodeFactories.containsKey(name))
throw new TopologyBuilderException("Processor " + name + " is already added.");
if (parentNames != null) {
for (String parent : parentNames) {
if (parent.equals(name)) {
throw new TopologyBuilderException("Processor " + name + " cannot be a parent of itself.");
}
if (!nodeFactories.containsKey(parent)) {
throw new TopologyBuilderException("Parent processor " + parent + " is not added yet.");
}
}
}
nodeFactories.put(name, new ProcessorNodeFactory(name, parentNames, supplier));
nodeGrouper.add(name);
nodeGrouper.unite(name, parentNames);
return this;
}
use of org.apache.kafka.streams.errors.TopologyBuilderException in project kafka by apache.
the class TopologyBuilder method addSource.
/**
* Add a new source that consumes the named topics and forwards the records to child processor and/or sink nodes.
* The source will use the specified key and value deserializers.
*
* @param offsetReset the auto offset reset policy to use for this stream if no committed offsets found; acceptable values are earliest or latest.
* @param name the unique name of the source used to reference this node when
* {@link #addProcessor(String, ProcessorSupplier, String...) adding processor children}.
* @param keyDeserializer the {@link Deserializer key deserializer} used when consuming records; may be null if the source
* should use the {@link org.apache.kafka.streams.StreamsConfig#KEY_SERDE_CLASS_CONFIG default key deserializer} specified in the
* {@link org.apache.kafka.streams.StreamsConfig stream configuration}
* @param valDeserializer the {@link Deserializer value deserializer} used when consuming records; may be null if the source
* should use the {@link org.apache.kafka.streams.StreamsConfig#VALUE_SERDE_CLASS_CONFIG default value deserializer} specified in the
* {@link org.apache.kafka.streams.StreamsConfig stream configuration}
* @param topics the name of one or more Kafka topics that this source is to consume
* @return this builder instance so methods can be chained together; never null
* @throws TopologyBuilderException if processor is already added or if topics have already been registered by another source
*/
public final synchronized TopologyBuilder addSource(AutoOffsetReset offsetReset, String name, Deserializer keyDeserializer, Deserializer valDeserializer, String... topics) {
if (topics.length == 0) {
throw new TopologyBuilderException("You must provide at least one topic");
}
Objects.requireNonNull(name, "name must not be null");
if (nodeFactories.containsKey(name))
throw new TopologyBuilderException("Processor " + name + " is already added.");
for (String topic : topics) {
Objects.requireNonNull(topic, "topic names cannot be null");
validateTopicNotAlreadyRegistered(topic);
maybeAddToResetList(earliestResetTopics, latestResetTopics, offsetReset, topic);
sourceTopicNames.add(topic);
}
nodeFactories.put(name, new SourceNodeFactory(name, topics, null, keyDeserializer, valDeserializer));
nodeToSourceTopics.put(name, Arrays.asList(topics));
nodeGrouper.add(name);
return this;
}
use of org.apache.kafka.streams.errors.TopologyBuilderException in project kafka by apache.
the class TopologyBuilder method connectProcessors.
/**
* Connects a list of processors.
*
* NOTE this function would not needed by developers working with the processor APIs, but only used
* for the high-level DSL parsing functionalities.
*
* @param processorNames the name of the processors
* @return this builder instance so methods can be chained together; never null
* @throws TopologyBuilderException if less than two processors are specified, or if one of the processors is not added yet
*/
public final synchronized TopologyBuilder connectProcessors(String... processorNames) {
if (processorNames.length < 2)
throw new TopologyBuilderException("At least two processors need to participate in the connection.");
for (String processorName : processorNames) {
if (!nodeFactories.containsKey(processorName))
throw new TopologyBuilderException("Processor " + processorName + " is not added yet.");
}
String firstProcessorName = processorNames[0];
nodeGrouper.unite(firstProcessorName, Arrays.copyOfRange(processorNames, 1, processorNames.length));
return this;
}
use of org.apache.kafka.streams.errors.TopologyBuilderException in project kafka by apache.
the class KTableImpl method writeAsText.
/**
* @throws TopologyBuilderException if file is not found
*/
@Override
public void writeAsText(String filePath, String streamName, Serde<K> keySerde, Serde<V> valSerde) {
Objects.requireNonNull(filePath, "filePath can't be null");
if (filePath.trim().isEmpty()) {
throw new TopologyBuilderException("filePath can't be an empty string");
}
String name = topology.newName(PRINTING_NAME);
streamName = (streamName == null) ? this.name : streamName;
try {
PrintStream printStream = new PrintStream(new FileOutputStream(filePath));
topology.addProcessor(name, new KeyValuePrinter<>(printStream, keySerde, valSerde, streamName), this.name);
} catch (FileNotFoundException e) {
String message = "Unable to write stream to file at [" + filePath + "] " + e.getMessage();
throw new TopologyBuilderException(message);
}
}
Aggregations