use of org.apache.metron.parsers.bolt.WriterBolt in project metron by apache.
the class ParserTopologyBuilder method createErrorBolt.
/**
* Create a bolt that handles error messages.
*
* @param zookeeperUrl Kafka zookeeper URL
* @param brokerUrl Kafka Broker URL
* @param sensorType Type of sensor that is being consumed.
* @param securityProtocol Security protocol used (if any)
* @param configs
* @param parserConfig
* @return A Storm bolt that handles error messages.
*/
private static WriterBolt createErrorBolt(String zookeeperUrl, Optional<String> brokerUrl, String sensorType, Optional<String> securityProtocol, ParserConfigurations configs, SensorParserConfig parserConfig) {
// create writer - if not configured uses a sensible default
AbstractWriter writer = parserConfig.getErrorWriterClassName() == null ? createKafkaWriter(brokerUrl, zookeeperUrl, securityProtocol).withTopic((String) configs.getGlobalConfig().get("parser.error.topic")).withConfigPrefix("error") : ReflectionUtils.createInstance(parserConfig.getWriterClassName());
writer.configure(sensorType, new ParserWriterConfiguration(configs));
// create a writer handler
WriterHandler writerHandler = createWriterHandler(writer);
return new WriterBolt(writerHandler, configs, sensorType).withErrorType(Constants.ErrorType.PARSER_ERROR);
}
use of org.apache.metron.parsers.bolt.WriterBolt in project metron by apache.
the class ParserTopologyBuilder method build.
/**
* Builds a Storm topology that parses telemetry data received from an external sensor.
*
* @param zookeeperUrl Zookeeper URL
* @param brokerUrl Kafka Broker URL
* @param sensorType Type of sensor
* @param spoutParallelismSupplier Supplier for the parallelism hint for the spout
* @param spoutNumTasksSupplier Supplier for the number of tasks for the spout
* @param parserParallelismSupplier Supplier for the parallelism hint for the parser bolt
* @param parserNumTasksSupplier Supplier for the number of tasks for the parser bolt
* @param errorWriterParallelismSupplier Supplier for the parallelism hint for the bolt that handles errors
* @param errorWriterNumTasksSupplier Supplier for the number of tasks for the bolt that handles errors
* @param kafkaSpoutConfigSupplier Supplier for the configuration options for the kafka spout
* @param securityProtocolSupplier Supplier for the security protocol
* @param outputTopic The output kafka topic
* @param stormConfigSupplier Supplier for the storm config
* @return A Storm topology that parses telemetry data received from an external sensor
* @throws Exception
*/
public static ParserTopology build(String zookeeperUrl, Optional<String> brokerUrl, String sensorType, ValueSupplier<Integer> spoutParallelismSupplier, ValueSupplier<Integer> spoutNumTasksSupplier, ValueSupplier<Integer> parserParallelismSupplier, ValueSupplier<Integer> parserNumTasksSupplier, ValueSupplier<Integer> errorWriterParallelismSupplier, ValueSupplier<Integer> errorWriterNumTasksSupplier, ValueSupplier<Map> kafkaSpoutConfigSupplier, ValueSupplier<String> securityProtocolSupplier, Optional<String> outputTopic, ValueSupplier<Config> stormConfigSupplier) throws Exception {
// fetch configuration from zookeeper
ParserConfigurations configs = new ParserConfigurations();
SensorParserConfig parserConfig = getSensorParserConfig(zookeeperUrl, sensorType, configs);
int spoutParallelism = spoutParallelismSupplier.get(parserConfig, Integer.class);
int spoutNumTasks = spoutNumTasksSupplier.get(parserConfig, Integer.class);
int parserParallelism = parserParallelismSupplier.get(parserConfig, Integer.class);
int parserNumTasks = parserNumTasksSupplier.get(parserConfig, Integer.class);
int errorWriterParallelism = errorWriterParallelismSupplier.get(parserConfig, Integer.class);
int errorWriterNumTasks = errorWriterNumTasksSupplier.get(parserConfig, Integer.class);
Map<String, Object> kafkaSpoutConfig = kafkaSpoutConfigSupplier.get(parserConfig, Map.class);
Optional<String> securityProtocol = Optional.ofNullable(securityProtocolSupplier.get(parserConfig, String.class));
// create the spout
TopologyBuilder builder = new TopologyBuilder();
KafkaSpout kafkaSpout = createKafkaSpout(zookeeperUrl, sensorType, securityProtocol, Optional.ofNullable(kafkaSpoutConfig), parserConfig);
builder.setSpout("kafkaSpout", kafkaSpout, spoutParallelism).setNumTasks(spoutNumTasks);
// create the parser bolt
ParserBolt parserBolt = createParserBolt(zookeeperUrl, brokerUrl, sensorType, securityProtocol, configs, parserConfig, outputTopic);
builder.setBolt("parserBolt", parserBolt, parserParallelism).setNumTasks(parserNumTasks).localOrShuffleGrouping("kafkaSpout");
// create the error bolt, if needed
if (errorWriterNumTasks > 0) {
WriterBolt errorBolt = createErrorBolt(zookeeperUrl, brokerUrl, sensorType, securityProtocol, configs, parserConfig);
builder.setBolt("errorMessageWriter", errorBolt, errorWriterParallelism).setNumTasks(errorWriterNumTasks).localOrShuffleGrouping("parserBolt", Constants.ERROR_STREAM);
}
return new ParserTopology(builder, stormConfigSupplier.get(parserConfig, Config.class));
}
Aggregations