use of org.apache.storm.kafka.spout.KafkaSpout in project open-kilda by telstra.
the class AbstractTopology method createHealthCheckHandler.
/**
* Creates health-check handler spout and bolts.
*
* @param builder topology builder
* @param prefix component id
*/
protected void createHealthCheckHandler(TopologyBuilder builder, String prefix) {
checkAndCreateTopic(Topic.HEALTH_CHECK);
KafkaSpout healthCheckKafkaSpout = createKafkaSpout(Topic.HEALTH_CHECK, prefix);
builder.setSpout(prefix + "HealthCheckKafkaSpout", healthCheckKafkaSpout, 1);
HealthCheckBolt healthCheckBolt = new HealthCheckBolt(prefix);
builder.setBolt(prefix + "HealthCheckBolt", healthCheckBolt, 1).shuffleGrouping(prefix + "HealthCheckKafkaSpout");
KafkaBolt healthCheckKafkaBolt = createKafkaBolt(Topic.HEALTH_CHECK);
builder.setBolt(prefix + "HealthCheckKafkaBolt", healthCheckKafkaBolt, 1).shuffleGrouping(prefix + "HealthCheckBolt", Topic.HEALTH_CHECK);
}
use of org.apache.storm.kafka.spout.KafkaSpout in project open-kilda by telstra.
the class AbstractTopology method createCtrlBranch.
protected void createCtrlBranch(TopologyBuilder builder, List<CtrlBoltRef> targets) throws StreamNameCollisionException {
checkAndCreateTopic(config.getKafkaCtrlTopic());
KafkaSpout kafkaSpout;
kafkaSpout = createKafkaSpout(config.getKafkaCtrlTopic(), SPOUT_ID_CTRL);
builder.setSpout(SPOUT_ID_CTRL, kafkaSpout);
RouteBolt route = new RouteBolt(getTopologyName());
builder.setBolt(BOLT_ID_CTRL_ROUTE, route).shuffleGrouping(SPOUT_ID_CTRL);
KafkaBolt kafkaBolt = createKafkaBolt(config.getKafkaCtrlTopic());
BoltDeclarer outputSetup = builder.setBolt(BOLT_ID_CTRL_OUTPUT, kafkaBolt).shuffleGrouping(BOLT_ID_CTRL_ROUTE, route.STREAM_ID_ERROR);
for (CtrlBoltRef ref : targets) {
String boltId = ref.getBoltId();
ref.getDeclarer().allGrouping(BOLT_ID_CTRL_ROUTE, route.registerEndpoint(boltId));
outputSetup.shuffleGrouping(boltId, ref.getBolt().getCtrlStreamId());
}
}
use of org.apache.storm.kafka.spout.KafkaSpout in project open-kilda by telstra.
the class CacheTopology method createTopology.
/**
* {@inheritDoc}
*/
@Override
public StormTopology createTopology() throws NameCollisionException {
logger.info("Creating Topology: {}", topologyName);
initKafkaTopics();
Integer parallelism = config.getParallelism();
TopologyBuilder builder = new TopologyBuilder();
List<CtrlBoltRef> ctrlTargets = new ArrayList<>();
BoltDeclarer boltSetup;
KafkaSpout kafkaSpout;
/*
* Receives cache from storage.
*/
kafkaSpout = createKafkaSpout(config.getKafkaTopoCacheTopic(), SPOUT_ID_COMMON);
builder.setSpout(SPOUT_ID_COMMON, kafkaSpout, parallelism);
// (carmine) - as part of 0.8 refactor, merged inputs to one topic, so this isn't neccessary
// /*
// * Receives cache updates from WFM topology.
// */
// kafkaSpout = createKafkaSpout(config.getKafkaTopoCacheTopic(), SPOUT_ID_TOPOLOGY);
// builder.setSpout(SPOUT_ID_TOPOLOGY, kafkaSpout, parallelism);
/*
* Stores network cache.
*/
CacheBolt cacheBolt = new CacheBolt(config.getDiscoveryTimeout());
boltSetup = builder.setBolt(BOLT_ID_CACHE, cacheBolt, parallelism).shuffleGrouping(SPOUT_ID_COMMON);
ctrlTargets.add(new CtrlBoltRef(BOLT_ID_CACHE, cacheBolt, boltSetup));
KafkaBolt kafkaBolt;
/*
* Sends network events to storage.
*/
kafkaBolt = createKafkaBolt(config.getKafkaTopoEngTopic());
builder.setBolt(BOLT_ID_COMMON_OUTPUT, kafkaBolt, parallelism).shuffleGrouping(BOLT_ID_CACHE, StreamType.TPE.toString());
/*
* Sends cache dump and reroute requests to WFM topology.
*/
kafkaBolt = createKafkaBolt(config.getKafkaFlowTopic());
builder.setBolt(BOLT_ID_TOPOLOGY_OUTPUT, kafkaBolt, parallelism).shuffleGrouping(BOLT_ID_CACHE, StreamType.WFM_DUMP.toString());
/*
* Sends requests for ISL to OFE topology.
*/
// FIXME(surabjin): 2 kafka bold with same topic (see previous bolt)
KafkaBolt oFEKafkaBolt = createKafkaBolt(config.getKafkaFlowTopic());
builder.setBolt(BOLT_ID_OFE, oFEKafkaBolt, parallelism).shuffleGrouping(BOLT_ID_CACHE, StreamType.OFE.toString());
createCtrlBranch(builder, ctrlTargets);
createHealthCheckHandler(builder, ServiceType.CACHE_TOPOLOGY.getId());
return builder.createTopology();
}
use of org.apache.storm.kafka.spout.KafkaSpout in project open-kilda by telstra.
the class FlowTopology method createTopology.
@Override
public StormTopology createTopology() throws StreamNameCollisionException {
logger.info("Creating Topology: {}", topologyName);
TopologyBuilder builder = new TopologyBuilder();
List<CtrlBoltRef> ctrlTargets = new ArrayList<>();
BoltDeclarer boltSetup;
Integer parallelism = config.getParallelism();
/*
* Spout receives all Northbound requests.
*/
KafkaSpout northboundKafkaSpout = createKafkaSpout(config.getKafkaFlowTopic(), ComponentType.NORTHBOUND_KAFKA_SPOUT.toString());
builder.setSpout(ComponentType.NORTHBOUND_KAFKA_SPOUT.toString(), northboundKafkaSpout, parallelism);
/*
* Bolt splits requests on streams.
* It groups requests by flow-id.
*/
SplitterBolt splitterBolt = new SplitterBolt();
builder.setBolt(ComponentType.SPLITTER_BOLT.toString(), splitterBolt, parallelism).shuffleGrouping(ComponentType.NORTHBOUND_KAFKA_SPOUT.toString());
/*
* Bolt handles flow CRUD operations.
* It groups requests by flow-id.
*/
CrudBolt crudBolt = new CrudBolt(pathComputerAuth);
ComponentObject.serialized_java(org.apache.storm.utils.Utils.javaSerialize(pathComputerAuth));
boltSetup = builder.setBolt(ComponentType.CRUD_BOLT.toString(), crudBolt, parallelism).fieldsGrouping(ComponentType.SPLITTER_BOLT.toString(), StreamType.CREATE.toString(), fieldFlowId).fieldsGrouping(ComponentType.SPLITTER_BOLT.toString(), StreamType.READ.toString(), fieldFlowId).fieldsGrouping(ComponentType.SPLITTER_BOLT.toString(), StreamType.UPDATE.toString(), fieldFlowId).fieldsGrouping(ComponentType.SPLITTER_BOLT.toString(), StreamType.DELETE.toString(), fieldFlowId).fieldsGrouping(ComponentType.SPLITTER_BOLT.toString(), StreamType.PUSH.toString(), fieldFlowId).fieldsGrouping(ComponentType.SPLITTER_BOLT.toString(), StreamType.UNPUSH.toString(), fieldFlowId).fieldsGrouping(ComponentType.SPLITTER_BOLT.toString(), StreamType.PATH.toString(), fieldFlowId).fieldsGrouping(ComponentType.SPLITTER_BOLT.toString(), StreamType.RESTORE.toString(), fieldFlowId).fieldsGrouping(ComponentType.SPLITTER_BOLT.toString(), StreamType.REROUTE.toString(), fieldFlowId).fieldsGrouping(ComponentType.SPLITTER_BOLT.toString(), StreamType.STATUS.toString(), fieldFlowId).fieldsGrouping(ComponentType.SPLITTER_BOLT.toString(), StreamType.CACHE_SYNC.toString(), fieldFlowId).fieldsGrouping(ComponentType.TRANSACTION_BOLT.toString(), StreamType.STATUS.toString(), fieldFlowId).fieldsGrouping(ComponentType.SPEAKER_BOLT.toString(), StreamType.STATUS.toString(), fieldFlowId).fieldsGrouping(ComponentType.TOPOLOGY_ENGINE_BOLT.toString(), StreamType.STATUS.toString(), fieldFlowId);
ctrlTargets.add(new CtrlBoltRef(ComponentType.CRUD_BOLT.toString(), crudBolt, boltSetup));
/*
* Bolt sends cache updates.
*/
KafkaBolt cacheKafkaBolt = createKafkaBolt(config.getKafkaTopoCacheTopic());
builder.setBolt(ComponentType.CACHE_KAFKA_BOLT.toString(), cacheKafkaBolt, parallelism).shuffleGrouping(ComponentType.CRUD_BOLT.toString(), StreamType.CREATE.toString()).shuffleGrouping(ComponentType.CRUD_BOLT.toString(), StreamType.UPDATE.toString()).shuffleGrouping(ComponentType.CRUD_BOLT.toString(), StreamType.DELETE.toString()).shuffleGrouping(ComponentType.CRUD_BOLT.toString(), StreamType.STATUS.toString());
/*
* Spout receives Topology Engine response
*/
KafkaSpout topologyKafkaSpout = createKafkaSpout(config.getKafkaFlowTopic(), ComponentType.TOPOLOGY_ENGINE_KAFKA_SPOUT.toString());
builder.setSpout(ComponentType.TOPOLOGY_ENGINE_KAFKA_SPOUT.toString(), topologyKafkaSpout, parallelism);
/*
* Bolt processes Topology Engine responses, groups by flow-id field
*/
TopologyEngineBolt topologyEngineBolt = new TopologyEngineBolt();
builder.setBolt(ComponentType.TOPOLOGY_ENGINE_BOLT.toString(), topologyEngineBolt, parallelism).shuffleGrouping(ComponentType.TOPOLOGY_ENGINE_KAFKA_SPOUT.toString());
/*
* Bolt sends Speaker requests
*/
KafkaBolt speakerKafkaBolt = createKafkaBolt(config.getKafkaSpeakerTopic());
builder.setBolt(ComponentType.SPEAKER_KAFKA_BOLT.toString(), speakerKafkaBolt, parallelism).shuffleGrouping(ComponentType.TRANSACTION_BOLT.toString(), StreamType.CREATE.toString()).shuffleGrouping(ComponentType.TRANSACTION_BOLT.toString(), StreamType.DELETE.toString());
/*
* Spout receives Speaker responses
*/
KafkaSpout speakerKafkaSpout = createKafkaSpout(config.getKafkaFlowTopic(), ComponentType.SPEAKER_KAFKA_SPOUT.toString());
builder.setSpout(ComponentType.SPEAKER_KAFKA_SPOUT.toString(), speakerKafkaSpout, parallelism);
/*
* Bolt processes Speaker responses, groups by flow-id field
*/
SpeakerBolt speakerBolt = new SpeakerBolt();
builder.setBolt(ComponentType.SPEAKER_BOLT.toString(), speakerBolt, parallelism).shuffleGrouping(ComponentType.SPEAKER_KAFKA_SPOUT.toString());
/*
* Transaction bolt.
*/
TransactionBolt transactionBolt = new TransactionBolt();
boltSetup = builder.setBolt(ComponentType.TRANSACTION_BOLT.toString(), transactionBolt, parallelism).fieldsGrouping(ComponentType.TOPOLOGY_ENGINE_BOLT.toString(), StreamType.CREATE.toString(), fieldSwitchId).fieldsGrouping(ComponentType.TOPOLOGY_ENGINE_BOLT.toString(), StreamType.DELETE.toString(), fieldSwitchId).fieldsGrouping(ComponentType.SPEAKER_BOLT.toString(), StreamType.CREATE.toString(), fieldSwitchId).fieldsGrouping(ComponentType.SPEAKER_BOLT.toString(), StreamType.DELETE.toString(), fieldSwitchId);
ctrlTargets.add(new CtrlBoltRef(ComponentType.TRANSACTION_BOLT.toString(), transactionBolt, boltSetup));
/*
* Error processing bolt
*/
ErrorBolt errorProcessingBolt = new ErrorBolt();
builder.setBolt(ComponentType.ERROR_BOLT.toString(), errorProcessingBolt, parallelism).shuffleGrouping(ComponentType.SPLITTER_BOLT.toString(), StreamType.ERROR.toString()).shuffleGrouping(ComponentType.CRUD_BOLT.toString(), StreamType.ERROR.toString());
/*
* Bolt forms Northbound responses
*/
NorthboundReplyBolt northboundReplyBolt = new NorthboundReplyBolt();
builder.setBolt(ComponentType.NORTHBOUND_REPLY_BOLT.toString(), northboundReplyBolt, parallelism).shuffleGrouping(ComponentType.CRUD_BOLT.toString(), StreamType.RESPONSE.toString()).shuffleGrouping(ComponentType.ERROR_BOLT.toString(), StreamType.RESPONSE.toString());
/*
* Bolt sends Northbound responses
*/
KafkaBolt northboundKafkaBolt = createKafkaBolt(config.getKafkaNorthboundTopic());
builder.setBolt(ComponentType.NORTHBOUND_KAFKA_BOLT.toString(), northboundKafkaBolt, parallelism).shuffleGrouping(ComponentType.NORTHBOUND_REPLY_BOLT.toString(), StreamType.RESPONSE.toString());
createCtrlBranch(builder, ctrlTargets);
createHealthCheckHandler(builder, ServiceType.FLOW_TOPOLOGY.getId());
return builder.createTopology();
}
use of org.apache.storm.kafka.spout.KafkaSpout in project open-kilda by telstra.
the class OpenTSDBTopology method createTopology.
@Override
public StormTopology createTopology() {
LOGGER.info("Creating OpenTSDB topology");
TopologyBuilder tb = new TopologyBuilder();
checkAndCreateTopic(topic);
KafkaSpout kafkaSpout = createKafkaSpout(topic, spoutId);
tb.setSpout(spoutId, kafkaSpout, config.getOpenTsdbNumSpouts());
tb.setBolt(parseBoltId, new DatapointParseBolt(), config.getGetDatapointParseBoltExecutors()).setNumTasks(config.getGetDatapointParseBoltWorkers()).shuffleGrouping(spoutId);
tb.setBolt(boltId, new OpenTSDBFilterBolt(), config.getOpenTsdbFilterBoltExecutors()).fieldsGrouping(parseBoltId, new Fields("hash"));
OpenTsdbClient.Builder tsdbBuilder = OpenTsdbClient.newBuilder(config.getOpenTsDBHosts()).sync(config.getOpenTsdbTimeout()).returnDetails();
OpenTsdbBolt openTsdbBolt = new OpenTsdbBolt(tsdbBuilder, Collections.singletonList(TupleOpenTsdbDatapointMapper.DEFAULT_MAPPER));
openTsdbBolt.withBatchSize(config.getOpenTsdbBatchSize()).withFlushInterval(config.getOpenTsdbFlushInterval());
// .failTupleForFailedMetrics();
tb.setBolt("opentsdb", openTsdbBolt, config.getOpenTsdbBoltExecutors()).setNumTasks(config.getOpenTsdbBoltWorkers()).shuffleGrouping(boltId);
return tb.createTopology();
}
Aggregations