use of org.openkilda.wfm.CtrlBoltRef in project open-kilda by telstra.
the class AbstractTopology method createCtrlBranch.
protected void createCtrlBranch(TopologyBuilder builder, List<CtrlBoltRef> targets) throws StreamNameCollisionException {
checkAndCreateTopic(config.getKafkaCtrlTopic());
KafkaSpout kafkaSpout;
kafkaSpout = createKafkaSpout(config.getKafkaCtrlTopic(), SPOUT_ID_CTRL);
builder.setSpout(SPOUT_ID_CTRL, kafkaSpout);
RouteBolt route = new RouteBolt(getTopologyName());
builder.setBolt(BOLT_ID_CTRL_ROUTE, route).shuffleGrouping(SPOUT_ID_CTRL);
KafkaBolt kafkaBolt = createKafkaBolt(config.getKafkaCtrlTopic());
BoltDeclarer outputSetup = builder.setBolt(BOLT_ID_CTRL_OUTPUT, kafkaBolt).shuffleGrouping(BOLT_ID_CTRL_ROUTE, route.STREAM_ID_ERROR);
for (CtrlBoltRef ref : targets) {
String boltId = ref.getBoltId();
ref.getDeclarer().allGrouping(BOLT_ID_CTRL_ROUTE, route.registerEndpoint(boltId));
outputSetup.shuffleGrouping(boltId, ref.getBolt().getCtrlStreamId());
}
}
use of org.openkilda.wfm.CtrlBoltRef in project open-kilda by telstra.
the class CacheTopology method createTopology.
/**
* {@inheritDoc}
*/
@Override
public StormTopology createTopology() throws NameCollisionException {
logger.info("Creating Topology: {}", topologyName);
initKafkaTopics();
Integer parallelism = config.getParallelism();
TopologyBuilder builder = new TopologyBuilder();
List<CtrlBoltRef> ctrlTargets = new ArrayList<>();
BoltDeclarer boltSetup;
KafkaSpout kafkaSpout;
/*
* Receives cache from storage.
*/
kafkaSpout = createKafkaSpout(config.getKafkaTopoCacheTopic(), SPOUT_ID_COMMON);
builder.setSpout(SPOUT_ID_COMMON, kafkaSpout, parallelism);
// (carmine) - as part of 0.8 refactor, merged inputs to one topic, so this isn't neccessary
// /*
// * Receives cache updates from WFM topology.
// */
// kafkaSpout = createKafkaSpout(config.getKafkaTopoCacheTopic(), SPOUT_ID_TOPOLOGY);
// builder.setSpout(SPOUT_ID_TOPOLOGY, kafkaSpout, parallelism);
/*
* Stores network cache.
*/
CacheBolt cacheBolt = new CacheBolt(config.getDiscoveryTimeout());
boltSetup = builder.setBolt(BOLT_ID_CACHE, cacheBolt, parallelism).shuffleGrouping(SPOUT_ID_COMMON);
ctrlTargets.add(new CtrlBoltRef(BOLT_ID_CACHE, cacheBolt, boltSetup));
KafkaBolt kafkaBolt;
/*
* Sends network events to storage.
*/
kafkaBolt = createKafkaBolt(config.getKafkaTopoEngTopic());
builder.setBolt(BOLT_ID_COMMON_OUTPUT, kafkaBolt, parallelism).shuffleGrouping(BOLT_ID_CACHE, StreamType.TPE.toString());
/*
* Sends cache dump and reroute requests to WFM topology.
*/
kafkaBolt = createKafkaBolt(config.getKafkaFlowTopic());
builder.setBolt(BOLT_ID_TOPOLOGY_OUTPUT, kafkaBolt, parallelism).shuffleGrouping(BOLT_ID_CACHE, StreamType.WFM_DUMP.toString());
/*
* Sends requests for ISL to OFE topology.
*/
// FIXME(surabjin): 2 kafka bold with same topic (see previous bolt)
KafkaBolt oFEKafkaBolt = createKafkaBolt(config.getKafkaFlowTopic());
builder.setBolt(BOLT_ID_OFE, oFEKafkaBolt, parallelism).shuffleGrouping(BOLT_ID_CACHE, StreamType.OFE.toString());
createCtrlBranch(builder, ctrlTargets);
createHealthCheckHandler(builder, ServiceType.CACHE_TOPOLOGY.getId());
return builder.createTopology();
}
use of org.openkilda.wfm.CtrlBoltRef in project open-kilda by telstra.
the class FlowTopology method createTopology.
@Override
public StormTopology createTopology() throws StreamNameCollisionException {
logger.info("Creating Topology: {}", topologyName);
TopologyBuilder builder = new TopologyBuilder();
List<CtrlBoltRef> ctrlTargets = new ArrayList<>();
BoltDeclarer boltSetup;
Integer parallelism = config.getParallelism();
/*
* Spout receives all Northbound requests.
*/
KafkaSpout northboundKafkaSpout = createKafkaSpout(config.getKafkaFlowTopic(), ComponentType.NORTHBOUND_KAFKA_SPOUT.toString());
builder.setSpout(ComponentType.NORTHBOUND_KAFKA_SPOUT.toString(), northboundKafkaSpout, parallelism);
/*
* Bolt splits requests on streams.
* It groups requests by flow-id.
*/
SplitterBolt splitterBolt = new SplitterBolt();
builder.setBolt(ComponentType.SPLITTER_BOLT.toString(), splitterBolt, parallelism).shuffleGrouping(ComponentType.NORTHBOUND_KAFKA_SPOUT.toString());
/*
* Bolt handles flow CRUD operations.
* It groups requests by flow-id.
*/
CrudBolt crudBolt = new CrudBolt(pathComputerAuth);
ComponentObject.serialized_java(org.apache.storm.utils.Utils.javaSerialize(pathComputerAuth));
boltSetup = builder.setBolt(ComponentType.CRUD_BOLT.toString(), crudBolt, parallelism).fieldsGrouping(ComponentType.SPLITTER_BOLT.toString(), StreamType.CREATE.toString(), fieldFlowId).fieldsGrouping(ComponentType.SPLITTER_BOLT.toString(), StreamType.READ.toString(), fieldFlowId).fieldsGrouping(ComponentType.SPLITTER_BOLT.toString(), StreamType.UPDATE.toString(), fieldFlowId).fieldsGrouping(ComponentType.SPLITTER_BOLT.toString(), StreamType.DELETE.toString(), fieldFlowId).fieldsGrouping(ComponentType.SPLITTER_BOLT.toString(), StreamType.PUSH.toString(), fieldFlowId).fieldsGrouping(ComponentType.SPLITTER_BOLT.toString(), StreamType.UNPUSH.toString(), fieldFlowId).fieldsGrouping(ComponentType.SPLITTER_BOLT.toString(), StreamType.PATH.toString(), fieldFlowId).fieldsGrouping(ComponentType.SPLITTER_BOLT.toString(), StreamType.RESTORE.toString(), fieldFlowId).fieldsGrouping(ComponentType.SPLITTER_BOLT.toString(), StreamType.REROUTE.toString(), fieldFlowId).fieldsGrouping(ComponentType.SPLITTER_BOLT.toString(), StreamType.STATUS.toString(), fieldFlowId).fieldsGrouping(ComponentType.SPLITTER_BOLT.toString(), StreamType.CACHE_SYNC.toString(), fieldFlowId).fieldsGrouping(ComponentType.TRANSACTION_BOLT.toString(), StreamType.STATUS.toString(), fieldFlowId).fieldsGrouping(ComponentType.SPEAKER_BOLT.toString(), StreamType.STATUS.toString(), fieldFlowId).fieldsGrouping(ComponentType.TOPOLOGY_ENGINE_BOLT.toString(), StreamType.STATUS.toString(), fieldFlowId);
ctrlTargets.add(new CtrlBoltRef(ComponentType.CRUD_BOLT.toString(), crudBolt, boltSetup));
/*
* Bolt sends cache updates.
*/
KafkaBolt cacheKafkaBolt = createKafkaBolt(config.getKafkaTopoCacheTopic());
builder.setBolt(ComponentType.CACHE_KAFKA_BOLT.toString(), cacheKafkaBolt, parallelism).shuffleGrouping(ComponentType.CRUD_BOLT.toString(), StreamType.CREATE.toString()).shuffleGrouping(ComponentType.CRUD_BOLT.toString(), StreamType.UPDATE.toString()).shuffleGrouping(ComponentType.CRUD_BOLT.toString(), StreamType.DELETE.toString()).shuffleGrouping(ComponentType.CRUD_BOLT.toString(), StreamType.STATUS.toString());
/*
* Spout receives Topology Engine response
*/
KafkaSpout topologyKafkaSpout = createKafkaSpout(config.getKafkaFlowTopic(), ComponentType.TOPOLOGY_ENGINE_KAFKA_SPOUT.toString());
builder.setSpout(ComponentType.TOPOLOGY_ENGINE_KAFKA_SPOUT.toString(), topologyKafkaSpout, parallelism);
/*
* Bolt processes Topology Engine responses, groups by flow-id field
*/
TopologyEngineBolt topologyEngineBolt = new TopologyEngineBolt();
builder.setBolt(ComponentType.TOPOLOGY_ENGINE_BOLT.toString(), topologyEngineBolt, parallelism).shuffleGrouping(ComponentType.TOPOLOGY_ENGINE_KAFKA_SPOUT.toString());
/*
* Bolt sends Speaker requests
*/
KafkaBolt speakerKafkaBolt = createKafkaBolt(config.getKafkaSpeakerTopic());
builder.setBolt(ComponentType.SPEAKER_KAFKA_BOLT.toString(), speakerKafkaBolt, parallelism).shuffleGrouping(ComponentType.TRANSACTION_BOLT.toString(), StreamType.CREATE.toString()).shuffleGrouping(ComponentType.TRANSACTION_BOLT.toString(), StreamType.DELETE.toString());
/*
* Spout receives Speaker responses
*/
KafkaSpout speakerKafkaSpout = createKafkaSpout(config.getKafkaFlowTopic(), ComponentType.SPEAKER_KAFKA_SPOUT.toString());
builder.setSpout(ComponentType.SPEAKER_KAFKA_SPOUT.toString(), speakerKafkaSpout, parallelism);
/*
* Bolt processes Speaker responses, groups by flow-id field
*/
SpeakerBolt speakerBolt = new SpeakerBolt();
builder.setBolt(ComponentType.SPEAKER_BOLT.toString(), speakerBolt, parallelism).shuffleGrouping(ComponentType.SPEAKER_KAFKA_SPOUT.toString());
/*
* Transaction bolt.
*/
TransactionBolt transactionBolt = new TransactionBolt();
boltSetup = builder.setBolt(ComponentType.TRANSACTION_BOLT.toString(), transactionBolt, parallelism).fieldsGrouping(ComponentType.TOPOLOGY_ENGINE_BOLT.toString(), StreamType.CREATE.toString(), fieldSwitchId).fieldsGrouping(ComponentType.TOPOLOGY_ENGINE_BOLT.toString(), StreamType.DELETE.toString(), fieldSwitchId).fieldsGrouping(ComponentType.SPEAKER_BOLT.toString(), StreamType.CREATE.toString(), fieldSwitchId).fieldsGrouping(ComponentType.SPEAKER_BOLT.toString(), StreamType.DELETE.toString(), fieldSwitchId);
ctrlTargets.add(new CtrlBoltRef(ComponentType.TRANSACTION_BOLT.toString(), transactionBolt, boltSetup));
/*
* Error processing bolt
*/
ErrorBolt errorProcessingBolt = new ErrorBolt();
builder.setBolt(ComponentType.ERROR_BOLT.toString(), errorProcessingBolt, parallelism).shuffleGrouping(ComponentType.SPLITTER_BOLT.toString(), StreamType.ERROR.toString()).shuffleGrouping(ComponentType.CRUD_BOLT.toString(), StreamType.ERROR.toString());
/*
* Bolt forms Northbound responses
*/
NorthboundReplyBolt northboundReplyBolt = new NorthboundReplyBolt();
builder.setBolt(ComponentType.NORTHBOUND_REPLY_BOLT.toString(), northboundReplyBolt, parallelism).shuffleGrouping(ComponentType.CRUD_BOLT.toString(), StreamType.RESPONSE.toString()).shuffleGrouping(ComponentType.ERROR_BOLT.toString(), StreamType.RESPONSE.toString());
/*
* Bolt sends Northbound responses
*/
KafkaBolt northboundKafkaBolt = createKafkaBolt(config.getKafkaNorthboundTopic());
builder.setBolt(ComponentType.NORTHBOUND_KAFKA_BOLT.toString(), northboundKafkaBolt, parallelism).shuffleGrouping(ComponentType.NORTHBOUND_REPLY_BOLT.toString(), StreamType.RESPONSE.toString());
createCtrlBranch(builder, ctrlTargets);
createHealthCheckHandler(builder, ServiceType.FLOW_TOPOLOGY.getId());
return builder.createTopology();
}
use of org.openkilda.wfm.CtrlBoltRef in project open-kilda by telstra.
the class OFEventWFMTopology method createTopology.
/**
* The best place to look for detailed design information regarding this topologies
* interactions is to look at docs/design/usecase/network-discovery.md
*
* At a high level, it receives input from the speaker, and sends output to the
* topology engine.
*
* @return
* @throws StreamNameCollisionException
*/
public StormTopology createTopology() throws StreamNameCollisionException {
logger.debug("Building Topology - " + this.getClass().getSimpleName());
String kafkaTopoDiscoTopic = config.getKafkaTopoDiscoTopic();
String kafkaTopoEngTopic = config.getKafkaTopoEngTopic();
String kafkaSpeakerTopic = config.getKafkaSpeakerTopic();
checkAndCreateTopic(kafkaTopoDiscoTopic);
checkAndCreateTopic(kafkaTopoEngTopic);
TopologyBuilder builder = new TopologyBuilder();
List<CtrlBoltRef> ctrlTargets = new ArrayList<>();
String spoutName = SPOUT_ID_INPUT;
String boltName = BOLT_ID;
builder.setSpout(spoutName, createKafkaSpout(kafkaTopoDiscoTopic, spoutName));
IStatefulBolt bolt = new OFELinkBolt(config);
// TODO: resolve the comments below; are there any state issues?
// NB: with shuffleGrouping, we can't maintain state .. would need to parse first
// just to pull out switchID.
// (crimi) - not sure I agree here .. state can be maintained, albeit distributed.
//
BoltDeclarer bd = builder.setBolt(boltName, bolt, config.getParallelism()).shuffleGrouping(spoutName);
builder.setBolt(kafkaTopoEngTopic, createKafkaBolt(kafkaTopoEngTopic), config.getParallelism()).shuffleGrouping(boltName, kafkaTopoEngTopic);
builder.setBolt(kafkaSpeakerTopic, createKafkaBolt(kafkaSpeakerTopic), config.getParallelism()).shuffleGrouping(boltName, kafkaSpeakerTopic);
// TODO: verify this ctrlTarget after refactoring.
ctrlTargets.add(new CtrlBoltRef(boltName, (ICtrlBolt) bolt, bd));
createCtrlBranch(builder, ctrlTargets);
// TODO: verify WFM_TOPOLOGY health check
createHealthCheckHandler(builder, ServiceType.WFM_TOPOLOGY.getId());
return builder.createTopology();
}
Aggregations