Search in sources :

Example 1 with BoltDeclarer

use of org.apache.storm.topology.BoltDeclarer in project storm by apache.

the class TransactionalTopologyBuilder method buildTopologyBuilder.

public TopologyBuilder buildTopologyBuilder() {
    String coordinator = _spoutId + "/coordinator";
    TopologyBuilder builder = new TopologyBuilder();
    SpoutDeclarer declarer = builder.setSpout(coordinator, new TransactionalSpoutCoordinator(_spout));
    for (Map<String, Object> conf : _spoutConfs) {
        declarer.addConfigurations(conf);
    }
    declarer.addConfiguration(Config.TOPOLOGY_TRANSACTIONAL_ID, _id);
    BoltDeclarer emitterDeclarer = builder.setBolt(_spoutId, new CoordinatedBolt(new TransactionalSpoutBatchExecutor(_spout), null, null), _spoutParallelism).allGrouping(coordinator, TransactionalSpoutCoordinator.TRANSACTION_BATCH_STREAM_ID).addConfiguration(Config.TOPOLOGY_TRANSACTIONAL_ID, _id);
    if (_spout instanceof ICommitterTransactionalSpout) {
        emitterDeclarer.allGrouping(coordinator, TransactionalSpoutCoordinator.TRANSACTION_COMMIT_STREAM_ID);
    }
    for (String id : _bolts.keySet()) {
        Component component = _bolts.get(id);
        Map<String, SourceArgs> coordinatedArgs = new HashMap<String, SourceArgs>();
        for (String c : componentBoltSubscriptions(component)) {
            coordinatedArgs.put(c, SourceArgs.all());
        }
        IdStreamSpec idSpec = null;
        if (component.committer) {
            idSpec = IdStreamSpec.makeDetectSpec(coordinator, TransactionalSpoutCoordinator.TRANSACTION_COMMIT_STREAM_ID);
        }
        BoltDeclarer input = builder.setBolt(id, new CoordinatedBolt(component.bolt, coordinatedArgs, idSpec), component.parallelism);
        for (Map conf : component.componentConfs) {
            input.addConfigurations(conf);
        }
        for (String c : componentBoltSubscriptions(component)) {
            input.directGrouping(c, Constants.COORDINATED_STREAM_ID);
        }
        for (InputDeclaration d : component.declarations) {
            d.declare(input);
        }
        if (component.committer) {
            input.allGrouping(coordinator, TransactionalSpoutCoordinator.TRANSACTION_COMMIT_STREAM_ID);
        }
    }
    return builder;
}
Also used : TopologyBuilder(org.apache.storm.topology.TopologyBuilder) HashMap(java.util.HashMap) IdStreamSpec(org.apache.storm.coordination.CoordinatedBolt.IdStreamSpec) SourceArgs(org.apache.storm.coordination.CoordinatedBolt.SourceArgs) BoltDeclarer(org.apache.storm.topology.BoltDeclarer) SpoutDeclarer(org.apache.storm.topology.SpoutDeclarer) HashMap(java.util.HashMap) Map(java.util.Map) CoordinatedBolt(org.apache.storm.coordination.CoordinatedBolt)

Example 2 with BoltDeclarer

use of org.apache.storm.topology.BoltDeclarer in project storm by apache.

the class TestResourceAwareScheduler method TestMultipleSpoutsAndCyclicTopologies.

/**
     * Test multiple spouts and cyclic topologies
     */
@Test
public void TestMultipleSpoutsAndCyclicTopologies() {
    TopologyBuilder builder = new TopologyBuilder();
    SpoutDeclarer s1 = builder.setSpout("spout-1", new TestUtilsForResourceAwareScheduler.TestSpout(), 5);
    SpoutDeclarer s2 = builder.setSpout("spout-2", new TestUtilsForResourceAwareScheduler.TestSpout(), 5);
    BoltDeclarer b1 = builder.setBolt("bolt-1", new TestUtilsForResourceAwareScheduler.TestBolt(), 5).shuffleGrouping("spout-1").shuffleGrouping("bolt-3");
    BoltDeclarer b2 = builder.setBolt("bolt-2", new TestUtilsForResourceAwareScheduler.TestBolt(), 5).shuffleGrouping("bolt-1");
    BoltDeclarer b3 = builder.setBolt("bolt-3", new TestUtilsForResourceAwareScheduler.TestBolt(), 5).shuffleGrouping("bolt-2").shuffleGrouping("spout-2");
    INimbus iNimbus = new TestUtilsForResourceAwareScheduler.INimbusTest();
    Map<String, Number> resourceMap = new HashMap<String, Number>();
    resourceMap.put(Config.SUPERVISOR_CPU_CAPACITY, 100.0);
    resourceMap.put(Config.SUPERVISOR_MEMORY_CAPACITY_MB, 1000.0);
    Map<String, SupervisorDetails> supMap = TestUtilsForResourceAwareScheduler.genSupervisors(25, 1, resourceMap);
    Config config = new Config();
    config.putAll(Utils.readDefaultConfig());
    config.put(Config.RESOURCE_AWARE_SCHEDULER_EVICTION_STRATEGY, org.apache.storm.scheduler.resource.strategies.eviction.DefaultEvictionStrategy.class.getName());
    config.put(Config.RESOURCE_AWARE_SCHEDULER_PRIORITY_STRATEGY, org.apache.storm.scheduler.resource.strategies.priority.DefaultSchedulingPriorityStrategy.class.getName());
    config.put(Config.TOPOLOGY_SCHEDULER_STRATEGY, org.apache.storm.scheduler.resource.strategies.scheduling.DefaultResourceAwareStrategy.class.getName());
    config.put(Config.TOPOLOGY_COMPONENT_CPU_PCORE_PERCENT, 100.0);
    config.put(Config.TOPOLOGY_COMPONENT_RESOURCES_OFFHEAP_MEMORY_MB, 500);
    config.put(Config.TOPOLOGY_COMPONENT_RESOURCES_ONHEAP_MEMORY_MB, 500);
    config.put(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB, Double.MAX_VALUE);
    StormTopology stormTopology = builder.createTopology();
    TopologyDetails topo = new TopologyDetails("topo-1", config, stormTopology, 0, genExecsAndComps(stormTopology), 0);
    Cluster cluster = new Cluster(iNimbus, supMap, new HashMap<String, SchedulerAssignmentImpl>(), config);
    config.put(Config.TOPOLOGY_SUBMITTER_USER, "jerry");
    Map<String, TopologyDetails> topoMap = new HashMap<String, TopologyDetails>();
    topoMap.put(topo.getId(), topo);
    Topologies topologies = new Topologies(topoMap);
    ResourceAwareScheduler rs = new ResourceAwareScheduler();
    rs.prepare(config);
    rs.schedule(topologies, cluster);
    Assert.assertTrue("Topo scheduled?", cluster.getAssignmentById(topo.getId()) != null);
    Assert.assertEquals("Topo all executors scheduled?", 25, cluster.getAssignmentById(topo.getId()).getExecutorToSlot().size());
}
Also used : TopologyBuilder(org.apache.storm.topology.TopologyBuilder) HashMap(java.util.HashMap) Config(org.apache.storm.Config) StormTopology(org.apache.storm.generated.StormTopology) SchedulerAssignmentImpl(org.apache.storm.scheduler.SchedulerAssignmentImpl) Topologies(org.apache.storm.scheduler.Topologies) SupervisorDetails(org.apache.storm.scheduler.SupervisorDetails) Cluster(org.apache.storm.scheduler.Cluster) INimbus(org.apache.storm.scheduler.INimbus) TopologyDetails(org.apache.storm.scheduler.TopologyDetails) BoltDeclarer(org.apache.storm.topology.BoltDeclarer) SpoutDeclarer(org.apache.storm.topology.SpoutDeclarer) Test(org.junit.Test)

Example 3 with BoltDeclarer

use of org.apache.storm.topology.BoltDeclarer in project open-kilda by telstra.

the class AbstractTopology method createCtrlBranch.

protected void createCtrlBranch(TopologyBuilder builder, List<CtrlBoltRef> targets) throws StreamNameCollisionException {
    checkAndCreateTopic(config.getKafkaCtrlTopic());
    KafkaSpout kafkaSpout;
    kafkaSpout = createKafkaSpout(config.getKafkaCtrlTopic(), SPOUT_ID_CTRL);
    builder.setSpout(SPOUT_ID_CTRL, kafkaSpout);
    RouteBolt route = new RouteBolt(getTopologyName());
    builder.setBolt(BOLT_ID_CTRL_ROUTE, route).shuffleGrouping(SPOUT_ID_CTRL);
    KafkaBolt kafkaBolt = createKafkaBolt(config.getKafkaCtrlTopic());
    BoltDeclarer outputSetup = builder.setBolt(BOLT_ID_CTRL_OUTPUT, kafkaBolt).shuffleGrouping(BOLT_ID_CTRL_ROUTE, route.STREAM_ID_ERROR);
    for (CtrlBoltRef ref : targets) {
        String boltId = ref.getBoltId();
        ref.getDeclarer().allGrouping(BOLT_ID_CTRL_ROUTE, route.registerEndpoint(boltId));
        outputSetup.shuffleGrouping(boltId, ref.getBolt().getCtrlStreamId());
    }
}
Also used : CtrlBoltRef(org.openkilda.wfm.CtrlBoltRef) BoltDeclarer(org.apache.storm.topology.BoltDeclarer) RouteBolt(org.openkilda.wfm.ctrl.RouteBolt) KafkaSpout(org.apache.storm.kafka.spout.KafkaSpout) KafkaBolt(org.apache.storm.kafka.bolt.KafkaBolt)

Example 4 with BoltDeclarer

use of org.apache.storm.topology.BoltDeclarer in project open-kilda by telstra.

the class CacheTopology method createTopology.

/**
 * {@inheritDoc}
 */
@Override
public StormTopology createTopology() throws NameCollisionException {
    logger.info("Creating Topology: {}", topologyName);
    initKafkaTopics();
    Integer parallelism = config.getParallelism();
    TopologyBuilder builder = new TopologyBuilder();
    List<CtrlBoltRef> ctrlTargets = new ArrayList<>();
    BoltDeclarer boltSetup;
    KafkaSpout kafkaSpout;
    /*
         * Receives cache from storage.
         */
    kafkaSpout = createKafkaSpout(config.getKafkaTopoCacheTopic(), SPOUT_ID_COMMON);
    builder.setSpout(SPOUT_ID_COMMON, kafkaSpout, parallelism);
    // (carmine) - as part of 0.8 refactor, merged inputs to one topic, so this isn't neccessary
    // /*
    // * Receives cache updates from WFM topology.
    // */
    // kafkaSpout = createKafkaSpout(config.getKafkaTopoCacheTopic(), SPOUT_ID_TOPOLOGY);
    // builder.setSpout(SPOUT_ID_TOPOLOGY, kafkaSpout, parallelism);
    /*
         * Stores network cache.
         */
    CacheBolt cacheBolt = new CacheBolt(config.getDiscoveryTimeout());
    boltSetup = builder.setBolt(BOLT_ID_CACHE, cacheBolt, parallelism).shuffleGrouping(SPOUT_ID_COMMON);
    ctrlTargets.add(new CtrlBoltRef(BOLT_ID_CACHE, cacheBolt, boltSetup));
    KafkaBolt kafkaBolt;
    /*
         * Sends network events to storage.
         */
    kafkaBolt = createKafkaBolt(config.getKafkaTopoEngTopic());
    builder.setBolt(BOLT_ID_COMMON_OUTPUT, kafkaBolt, parallelism).shuffleGrouping(BOLT_ID_CACHE, StreamType.TPE.toString());
    /*
         * Sends cache dump and reroute requests to WFM topology.
         */
    kafkaBolt = createKafkaBolt(config.getKafkaFlowTopic());
    builder.setBolt(BOLT_ID_TOPOLOGY_OUTPUT, kafkaBolt, parallelism).shuffleGrouping(BOLT_ID_CACHE, StreamType.WFM_DUMP.toString());
    /*
         * Sends requests for ISL to OFE topology.
         */
    // FIXME(surabjin): 2 kafka bold with same topic (see previous bolt)
    KafkaBolt oFEKafkaBolt = createKafkaBolt(config.getKafkaFlowTopic());
    builder.setBolt(BOLT_ID_OFE, oFEKafkaBolt, parallelism).shuffleGrouping(BOLT_ID_CACHE, StreamType.OFE.toString());
    createCtrlBranch(builder, ctrlTargets);
    createHealthCheckHandler(builder, ServiceType.CACHE_TOPOLOGY.getId());
    return builder.createTopology();
}
Also used : CtrlBoltRef(org.openkilda.wfm.CtrlBoltRef) TopologyBuilder(org.apache.storm.topology.TopologyBuilder) BoltDeclarer(org.apache.storm.topology.BoltDeclarer) ArrayList(java.util.ArrayList) KafkaSpout(org.apache.storm.kafka.spout.KafkaSpout) KafkaBolt(org.apache.storm.kafka.bolt.KafkaBolt)

Example 5 with BoltDeclarer

use of org.apache.storm.topology.BoltDeclarer in project open-kilda by telstra.

the class FlowTopology method createTopology.

@Override
public StormTopology createTopology() throws StreamNameCollisionException {
    logger.info("Creating Topology: {}", topologyName);
    TopologyBuilder builder = new TopologyBuilder();
    List<CtrlBoltRef> ctrlTargets = new ArrayList<>();
    BoltDeclarer boltSetup;
    Integer parallelism = config.getParallelism();
    /*
         * Spout receives all Northbound requests.
         */
    KafkaSpout northboundKafkaSpout = createKafkaSpout(config.getKafkaFlowTopic(), ComponentType.NORTHBOUND_KAFKA_SPOUT.toString());
    builder.setSpout(ComponentType.NORTHBOUND_KAFKA_SPOUT.toString(), northboundKafkaSpout, parallelism);
    /*
         * Bolt splits requests on streams.
         * It groups requests by flow-id.
         */
    SplitterBolt splitterBolt = new SplitterBolt();
    builder.setBolt(ComponentType.SPLITTER_BOLT.toString(), splitterBolt, parallelism).shuffleGrouping(ComponentType.NORTHBOUND_KAFKA_SPOUT.toString());
    /*
         * Bolt handles flow CRUD operations.
         * It groups requests by flow-id.
         */
    CrudBolt crudBolt = new CrudBolt(pathComputerAuth);
    ComponentObject.serialized_java(org.apache.storm.utils.Utils.javaSerialize(pathComputerAuth));
    boltSetup = builder.setBolt(ComponentType.CRUD_BOLT.toString(), crudBolt, parallelism).fieldsGrouping(ComponentType.SPLITTER_BOLT.toString(), StreamType.CREATE.toString(), fieldFlowId).fieldsGrouping(ComponentType.SPLITTER_BOLT.toString(), StreamType.READ.toString(), fieldFlowId).fieldsGrouping(ComponentType.SPLITTER_BOLT.toString(), StreamType.UPDATE.toString(), fieldFlowId).fieldsGrouping(ComponentType.SPLITTER_BOLT.toString(), StreamType.DELETE.toString(), fieldFlowId).fieldsGrouping(ComponentType.SPLITTER_BOLT.toString(), StreamType.PUSH.toString(), fieldFlowId).fieldsGrouping(ComponentType.SPLITTER_BOLT.toString(), StreamType.UNPUSH.toString(), fieldFlowId).fieldsGrouping(ComponentType.SPLITTER_BOLT.toString(), StreamType.PATH.toString(), fieldFlowId).fieldsGrouping(ComponentType.SPLITTER_BOLT.toString(), StreamType.RESTORE.toString(), fieldFlowId).fieldsGrouping(ComponentType.SPLITTER_BOLT.toString(), StreamType.REROUTE.toString(), fieldFlowId).fieldsGrouping(ComponentType.SPLITTER_BOLT.toString(), StreamType.STATUS.toString(), fieldFlowId).fieldsGrouping(ComponentType.SPLITTER_BOLT.toString(), StreamType.CACHE_SYNC.toString(), fieldFlowId).fieldsGrouping(ComponentType.TRANSACTION_BOLT.toString(), StreamType.STATUS.toString(), fieldFlowId).fieldsGrouping(ComponentType.SPEAKER_BOLT.toString(), StreamType.STATUS.toString(), fieldFlowId).fieldsGrouping(ComponentType.TOPOLOGY_ENGINE_BOLT.toString(), StreamType.STATUS.toString(), fieldFlowId);
    ctrlTargets.add(new CtrlBoltRef(ComponentType.CRUD_BOLT.toString(), crudBolt, boltSetup));
    /*
         * Bolt sends cache updates.
         */
    KafkaBolt cacheKafkaBolt = createKafkaBolt(config.getKafkaTopoCacheTopic());
    builder.setBolt(ComponentType.CACHE_KAFKA_BOLT.toString(), cacheKafkaBolt, parallelism).shuffleGrouping(ComponentType.CRUD_BOLT.toString(), StreamType.CREATE.toString()).shuffleGrouping(ComponentType.CRUD_BOLT.toString(), StreamType.UPDATE.toString()).shuffleGrouping(ComponentType.CRUD_BOLT.toString(), StreamType.DELETE.toString()).shuffleGrouping(ComponentType.CRUD_BOLT.toString(), StreamType.STATUS.toString());
    /*
         * Spout receives Topology Engine response
         */
    KafkaSpout topologyKafkaSpout = createKafkaSpout(config.getKafkaFlowTopic(), ComponentType.TOPOLOGY_ENGINE_KAFKA_SPOUT.toString());
    builder.setSpout(ComponentType.TOPOLOGY_ENGINE_KAFKA_SPOUT.toString(), topologyKafkaSpout, parallelism);
    /*
         * Bolt processes Topology Engine responses, groups by flow-id field
         */
    TopologyEngineBolt topologyEngineBolt = new TopologyEngineBolt();
    builder.setBolt(ComponentType.TOPOLOGY_ENGINE_BOLT.toString(), topologyEngineBolt, parallelism).shuffleGrouping(ComponentType.TOPOLOGY_ENGINE_KAFKA_SPOUT.toString());
    /*
         * Bolt sends Speaker requests
         */
    KafkaBolt speakerKafkaBolt = createKafkaBolt(config.getKafkaSpeakerTopic());
    builder.setBolt(ComponentType.SPEAKER_KAFKA_BOLT.toString(), speakerKafkaBolt, parallelism).shuffleGrouping(ComponentType.TRANSACTION_BOLT.toString(), StreamType.CREATE.toString()).shuffleGrouping(ComponentType.TRANSACTION_BOLT.toString(), StreamType.DELETE.toString());
    /*
         * Spout receives Speaker responses
         */
    KafkaSpout speakerKafkaSpout = createKafkaSpout(config.getKafkaFlowTopic(), ComponentType.SPEAKER_KAFKA_SPOUT.toString());
    builder.setSpout(ComponentType.SPEAKER_KAFKA_SPOUT.toString(), speakerKafkaSpout, parallelism);
    /*
         * Bolt processes Speaker responses, groups by flow-id field
         */
    SpeakerBolt speakerBolt = new SpeakerBolt();
    builder.setBolt(ComponentType.SPEAKER_BOLT.toString(), speakerBolt, parallelism).shuffleGrouping(ComponentType.SPEAKER_KAFKA_SPOUT.toString());
    /*
         * Transaction bolt.
         */
    TransactionBolt transactionBolt = new TransactionBolt();
    boltSetup = builder.setBolt(ComponentType.TRANSACTION_BOLT.toString(), transactionBolt, parallelism).fieldsGrouping(ComponentType.TOPOLOGY_ENGINE_BOLT.toString(), StreamType.CREATE.toString(), fieldSwitchId).fieldsGrouping(ComponentType.TOPOLOGY_ENGINE_BOLT.toString(), StreamType.DELETE.toString(), fieldSwitchId).fieldsGrouping(ComponentType.SPEAKER_BOLT.toString(), StreamType.CREATE.toString(), fieldSwitchId).fieldsGrouping(ComponentType.SPEAKER_BOLT.toString(), StreamType.DELETE.toString(), fieldSwitchId);
    ctrlTargets.add(new CtrlBoltRef(ComponentType.TRANSACTION_BOLT.toString(), transactionBolt, boltSetup));
    /*
         * Error processing bolt
         */
    ErrorBolt errorProcessingBolt = new ErrorBolt();
    builder.setBolt(ComponentType.ERROR_BOLT.toString(), errorProcessingBolt, parallelism).shuffleGrouping(ComponentType.SPLITTER_BOLT.toString(), StreamType.ERROR.toString()).shuffleGrouping(ComponentType.CRUD_BOLT.toString(), StreamType.ERROR.toString());
    /*
         * Bolt forms Northbound responses
         */
    NorthboundReplyBolt northboundReplyBolt = new NorthboundReplyBolt();
    builder.setBolt(ComponentType.NORTHBOUND_REPLY_BOLT.toString(), northboundReplyBolt, parallelism).shuffleGrouping(ComponentType.CRUD_BOLT.toString(), StreamType.RESPONSE.toString()).shuffleGrouping(ComponentType.ERROR_BOLT.toString(), StreamType.RESPONSE.toString());
    /*
         * Bolt sends Northbound responses
         */
    KafkaBolt northboundKafkaBolt = createKafkaBolt(config.getKafkaNorthboundTopic());
    builder.setBolt(ComponentType.NORTHBOUND_KAFKA_BOLT.toString(), northboundKafkaBolt, parallelism).shuffleGrouping(ComponentType.NORTHBOUND_REPLY_BOLT.toString(), StreamType.RESPONSE.toString());
    createCtrlBranch(builder, ctrlTargets);
    createHealthCheckHandler(builder, ServiceType.FLOW_TOPOLOGY.getId());
    return builder.createTopology();
}
Also used : TransactionBolt(org.openkilda.wfm.topology.flow.bolts.TransactionBolt) TopologyBuilder(org.apache.storm.topology.TopologyBuilder) SpeakerBolt(org.openkilda.wfm.topology.flow.bolts.SpeakerBolt) ArrayList(java.util.ArrayList) TopologyEngineBolt(org.openkilda.wfm.topology.flow.bolts.TopologyEngineBolt) CrudBolt(org.openkilda.wfm.topology.flow.bolts.CrudBolt) SplitterBolt(org.openkilda.wfm.topology.flow.bolts.SplitterBolt) ErrorBolt(org.openkilda.wfm.topology.flow.bolts.ErrorBolt) CtrlBoltRef(org.openkilda.wfm.CtrlBoltRef) BoltDeclarer(org.apache.storm.topology.BoltDeclarer) KafkaSpout(org.apache.storm.kafka.spout.KafkaSpout) KafkaBolt(org.apache.storm.kafka.bolt.KafkaBolt) NorthboundReplyBolt(org.openkilda.wfm.topology.flow.bolts.NorthboundReplyBolt)

Aggregations

BoltDeclarer (org.apache.storm.topology.BoltDeclarer)34 TopologyBuilder (org.apache.storm.topology.TopologyBuilder)20 HashMap (java.util.HashMap)13 SpoutDeclarer (org.apache.storm.topology.SpoutDeclarer)10 ArrayList (java.util.ArrayList)7 IRichBolt (org.apache.storm.topology.IRichBolt)6 Map (java.util.Map)5 IBasicBolt (org.apache.storm.topology.IBasicBolt)5 Config (org.apache.storm.Config)4 SharedMemory (org.apache.storm.generated.SharedMemory)4 KafkaSpout (org.apache.storm.kafka.spout.KafkaSpout)4 Fields (org.apache.storm.tuple.Fields)4 List (java.util.List)3 SourceArgs (org.apache.storm.coordination.CoordinatedBolt.SourceArgs)3 StormTopology (org.apache.storm.generated.StormTopology)3 KafkaBolt (org.apache.storm.kafka.bolt.KafkaBolt)3 CtrlBoltRef (org.openkilda.wfm.CtrlBoltRef)3 HashSet (java.util.HashSet)2 LinkedHashMap (java.util.LinkedHashMap)2 Set (java.util.Set)2