Search in sources :

Example 1 with SpoutDeclarer

use of org.apache.storm.topology.SpoutDeclarer in project storm by apache.

the class TransactionalTopologyBuilder method buildTopologyBuilder.

public TopologyBuilder buildTopologyBuilder() {
    String coordinator = _spoutId + "/coordinator";
    TopologyBuilder builder = new TopologyBuilder();
    SpoutDeclarer declarer = builder.setSpout(coordinator, new TransactionalSpoutCoordinator(_spout));
    for (Map<String, Object> conf : _spoutConfs) {
        declarer.addConfigurations(conf);
    }
    declarer.addConfiguration(Config.TOPOLOGY_TRANSACTIONAL_ID, _id);
    BoltDeclarer emitterDeclarer = builder.setBolt(_spoutId, new CoordinatedBolt(new TransactionalSpoutBatchExecutor(_spout), null, null), _spoutParallelism).allGrouping(coordinator, TransactionalSpoutCoordinator.TRANSACTION_BATCH_STREAM_ID).addConfiguration(Config.TOPOLOGY_TRANSACTIONAL_ID, _id);
    if (_spout instanceof ICommitterTransactionalSpout) {
        emitterDeclarer.allGrouping(coordinator, TransactionalSpoutCoordinator.TRANSACTION_COMMIT_STREAM_ID);
    }
    for (String id : _bolts.keySet()) {
        Component component = _bolts.get(id);
        Map<String, SourceArgs> coordinatedArgs = new HashMap<String, SourceArgs>();
        for (String c : componentBoltSubscriptions(component)) {
            coordinatedArgs.put(c, SourceArgs.all());
        }
        IdStreamSpec idSpec = null;
        if (component.committer) {
            idSpec = IdStreamSpec.makeDetectSpec(coordinator, TransactionalSpoutCoordinator.TRANSACTION_COMMIT_STREAM_ID);
        }
        BoltDeclarer input = builder.setBolt(id, new CoordinatedBolt(component.bolt, coordinatedArgs, idSpec), component.parallelism);
        for (Map conf : component.componentConfs) {
            input.addConfigurations(conf);
        }
        for (String c : componentBoltSubscriptions(component)) {
            input.directGrouping(c, Constants.COORDINATED_STREAM_ID);
        }
        for (InputDeclaration d : component.declarations) {
            d.declare(input);
        }
        if (component.committer) {
            input.allGrouping(coordinator, TransactionalSpoutCoordinator.TRANSACTION_COMMIT_STREAM_ID);
        }
    }
    return builder;
}
Also used : TopologyBuilder(org.apache.storm.topology.TopologyBuilder) HashMap(java.util.HashMap) IdStreamSpec(org.apache.storm.coordination.CoordinatedBolt.IdStreamSpec) SourceArgs(org.apache.storm.coordination.CoordinatedBolt.SourceArgs) BoltDeclarer(org.apache.storm.topology.BoltDeclarer) SpoutDeclarer(org.apache.storm.topology.SpoutDeclarer) HashMap(java.util.HashMap) Map(java.util.Map) CoordinatedBolt(org.apache.storm.coordination.CoordinatedBolt)

Example 2 with SpoutDeclarer

use of org.apache.storm.topology.SpoutDeclarer in project storm by apache.

the class TestResourceAwareScheduler method TestMultipleSpoutsAndCyclicTopologies.

/**
     * Test multiple spouts and cyclic topologies
     */
@Test
public void TestMultipleSpoutsAndCyclicTopologies() {
    TopologyBuilder builder = new TopologyBuilder();
    SpoutDeclarer s1 = builder.setSpout("spout-1", new TestUtilsForResourceAwareScheduler.TestSpout(), 5);
    SpoutDeclarer s2 = builder.setSpout("spout-2", new TestUtilsForResourceAwareScheduler.TestSpout(), 5);
    BoltDeclarer b1 = builder.setBolt("bolt-1", new TestUtilsForResourceAwareScheduler.TestBolt(), 5).shuffleGrouping("spout-1").shuffleGrouping("bolt-3");
    BoltDeclarer b2 = builder.setBolt("bolt-2", new TestUtilsForResourceAwareScheduler.TestBolt(), 5).shuffleGrouping("bolt-1");
    BoltDeclarer b3 = builder.setBolt("bolt-3", new TestUtilsForResourceAwareScheduler.TestBolt(), 5).shuffleGrouping("bolt-2").shuffleGrouping("spout-2");
    INimbus iNimbus = new TestUtilsForResourceAwareScheduler.INimbusTest();
    Map<String, Number> resourceMap = new HashMap<String, Number>();
    resourceMap.put(Config.SUPERVISOR_CPU_CAPACITY, 100.0);
    resourceMap.put(Config.SUPERVISOR_MEMORY_CAPACITY_MB, 1000.0);
    Map<String, SupervisorDetails> supMap = TestUtilsForResourceAwareScheduler.genSupervisors(25, 1, resourceMap);
    Config config = new Config();
    config.putAll(Utils.readDefaultConfig());
    config.put(Config.RESOURCE_AWARE_SCHEDULER_EVICTION_STRATEGY, org.apache.storm.scheduler.resource.strategies.eviction.DefaultEvictionStrategy.class.getName());
    config.put(Config.RESOURCE_AWARE_SCHEDULER_PRIORITY_STRATEGY, org.apache.storm.scheduler.resource.strategies.priority.DefaultSchedulingPriorityStrategy.class.getName());
    config.put(Config.TOPOLOGY_SCHEDULER_STRATEGY, org.apache.storm.scheduler.resource.strategies.scheduling.DefaultResourceAwareStrategy.class.getName());
    config.put(Config.TOPOLOGY_COMPONENT_CPU_PCORE_PERCENT, 100.0);
    config.put(Config.TOPOLOGY_COMPONENT_RESOURCES_OFFHEAP_MEMORY_MB, 500);
    config.put(Config.TOPOLOGY_COMPONENT_RESOURCES_ONHEAP_MEMORY_MB, 500);
    config.put(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB, Double.MAX_VALUE);
    StormTopology stormTopology = builder.createTopology();
    TopologyDetails topo = new TopologyDetails("topo-1", config, stormTopology, 0, genExecsAndComps(stormTopology), 0);
    Cluster cluster = new Cluster(iNimbus, supMap, new HashMap<String, SchedulerAssignmentImpl>(), config);
    config.put(Config.TOPOLOGY_SUBMITTER_USER, "jerry");
    Map<String, TopologyDetails> topoMap = new HashMap<String, TopologyDetails>();
    topoMap.put(topo.getId(), topo);
    Topologies topologies = new Topologies(topoMap);
    ResourceAwareScheduler rs = new ResourceAwareScheduler();
    rs.prepare(config);
    rs.schedule(topologies, cluster);
    Assert.assertTrue("Topo scheduled?", cluster.getAssignmentById(topo.getId()) != null);
    Assert.assertEquals("Topo all executors scheduled?", 25, cluster.getAssignmentById(topo.getId()).getExecutorToSlot().size());
}
Also used : TopologyBuilder(org.apache.storm.topology.TopologyBuilder) HashMap(java.util.HashMap) Config(org.apache.storm.Config) StormTopology(org.apache.storm.generated.StormTopology) SchedulerAssignmentImpl(org.apache.storm.scheduler.SchedulerAssignmentImpl) Topologies(org.apache.storm.scheduler.Topologies) SupervisorDetails(org.apache.storm.scheduler.SupervisorDetails) Cluster(org.apache.storm.scheduler.Cluster) INimbus(org.apache.storm.scheduler.INimbus) TopologyDetails(org.apache.storm.scheduler.TopologyDetails) BoltDeclarer(org.apache.storm.topology.BoltDeclarer) SpoutDeclarer(org.apache.storm.topology.SpoutDeclarer) Test(org.junit.Test)

Example 3 with SpoutDeclarer

use of org.apache.storm.topology.SpoutDeclarer in project storm by apache.

the class FluxBuilder method buildSpouts.

private static void buildSpouts(ExecutionContext context, TopologyBuilder builder) throws ClassNotFoundException, NoSuchMethodException, InvocationTargetException, InstantiationException, IllegalAccessException, NoSuchFieldException {
    for (SpoutDef sd : context.getTopologyDef().getSpouts()) {
        IRichSpout spout = buildSpout(sd, context);
        SpoutDeclarer declarer = builder.setSpout(sd.getId(), spout, sd.getParallelism());
        if (sd.getOnHeapMemoryLoad() > -1) {
            if (sd.getOffHeapMemoryLoad() > -1) {
                declarer.setMemoryLoad(sd.getOnHeapMemoryLoad(), sd.getOffHeapMemoryLoad());
            } else {
                declarer.setMemoryLoad(sd.getOnHeapMemoryLoad());
            }
        }
        if (sd.getCpuLoad() > -1) {
            declarer.setCPULoad(sd.getCpuLoad());
        }
        if (sd.getNumTasks() > -1) {
            declarer.setNumTasks(sd.getNumTasks());
        }
        context.addSpout(sd.getId(), spout);
    }
}
Also used : IRichSpout(org.apache.storm.topology.IRichSpout) SpoutDeclarer(org.apache.storm.topology.SpoutDeclarer) SpoutDef(org.apache.storm.flux.model.SpoutDef)

Example 4 with SpoutDeclarer

use of org.apache.storm.topology.SpoutDeclarer in project storm by apache.

the class TestUtilsForResourceAwareScheduler method topologyBuilder.

public static TopologyBuilder topologyBuilder(int numSpout, int numBolt, int spoutParallelism, int boltParallelism) {
    LOG.debug("buildTopology with -> numSpout: " + numSpout + " spoutParallelism: " + spoutParallelism + " numBolt: " + numBolt + " boltParallelism: " + boltParallelism);
    TopologyBuilder builder = new TopologyBuilder();
    for (int i = 0; i < numSpout; i++) {
        SpoutDeclarer s1 = builder.setSpout("spout-" + i, new TestSpout(), spoutParallelism);
    }
    int j = 0;
    for (int i = 0; i < numBolt; i++) {
        if (j >= numSpout) {
            j = 0;
        }
        BoltDeclarer b1 = builder.setBolt("bolt-" + i, new TestBolt(), boltParallelism).shuffleGrouping("spout-" + j);
        j++;
    }
    return builder;
}
Also used : TopologyBuilder(org.apache.storm.topology.TopologyBuilder) BoltDeclarer(org.apache.storm.topology.BoltDeclarer) SpoutDeclarer(org.apache.storm.topology.SpoutDeclarer)

Example 5 with SpoutDeclarer

use of org.apache.storm.topology.SpoutDeclarer in project storm by apache.

the class TestRebalance method testRebalanceTopologyResourcesAndConfigs.

@Test
public void testRebalanceTopologyResourcesAndConfigs() throws Exception {
    LOG.info("Starting local cluster...");
    Config conf = new Config();
    conf.put(DaemonConfig.STORM_SCHEDULER, ResourceAwareScheduler.class.getName());
    conf.put(DaemonConfig.RESOURCE_AWARE_SCHEDULER_PRIORITY_STRATEGY, DefaultSchedulingPriorityStrategy.class.getName());
    conf.put(Config.TOPOLOGY_SCHEDULER_STRATEGY, getDefaultResourceAwareStrategyClass().getName());
    conf.put(Config.TOPOLOGY_COMPONENT_CPU_PCORE_PERCENT, 10.0);
    conf.put(Config.TOPOLOGY_COMPONENT_RESOURCES_OFFHEAP_MEMORY_MB, 10.0);
    conf.put(Config.TOPOLOGY_COMPONENT_RESOURCES_ONHEAP_MEMORY_MB, 100.0);
    conf.put(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB, Double.MAX_VALUE);
    Map<String, Double> resourcesMap = new HashMap();
    resourcesMap.put("gpu.count", 5.0);
    conf.put(Config.TOPOLOGY_COMPONENT_RESOURCES_MAP, resourcesMap);
    try (ILocalCluster cluster = new LocalCluster.Builder().withDaemonConf(conf).build()) {
        TopologyBuilder builder = new TopologyBuilder();
        SpoutDeclarer s1 = builder.setSpout("spout-1", new TestUtilsForResourceAwareScheduler.TestSpout(), 2);
        BoltDeclarer b1 = builder.setBolt("bolt-1", new TestUtilsForResourceAwareScheduler.TestBolt(), 2).shuffleGrouping("spout-1");
        BoltDeclarer b2 = builder.setBolt("bolt-2", new TestUtilsForResourceAwareScheduler.TestBolt(), 2).shuffleGrouping("bolt-1");
        StormTopology stormTopology = builder.createTopology();
        LOG.info("submitting topologies....");
        String topoName = "topo1";
        cluster.submitTopology(topoName, new HashMap<>(), stormTopology);
        waitTopologyScheduled(topoName, cluster, 20);
        RebalanceOptions opts = new RebalanceOptions();
        Map<String, Map<String, Double>> resources = new HashMap<String, Map<String, Double>>();
        resources.put("spout-1", new HashMap<String, Double>());
        resources.get("spout-1").put(Config.TOPOLOGY_COMPONENT_RESOURCES_ONHEAP_MEMORY_MB, 120.0);
        resources.get("spout-1").put(Config.TOPOLOGY_COMPONENT_CPU_PCORE_PERCENT, 25.0);
        resources.get("spout-1").put("gpu.count", 5.0);
        opts.set_topology_resources_overrides(resources);
        opts.set_wait_secs(0);
        JSONObject jsonObject = new JSONObject();
        jsonObject.put(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB, 768.0);
        opts.set_topology_conf_overrides(jsonObject.toJSONString());
        LOG.info("rebalancing....");
        cluster.rebalance("topo1", opts);
        waitTopologyScheduled(topoName, cluster, 10);
        boolean topologyUpdated = false;
        JSONParser parser = new JSONParser();
        for (int i = 0; i < 5; i++) {
            Utils.sleep(SLEEP_TIME_BETWEEN_RETRY);
            String confRaw = cluster.getTopologyConf(topoNameToId(topoName, cluster));
            JSONObject readConf = (JSONObject) parser.parse(confRaw);
            if (768.0 == (double) readConf.get(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB)) {
                topologyUpdated = true;
                break;
            }
        }
        StormTopology readStormTopology = cluster.getTopology(topoNameToId(topoName, cluster));
        String componentConfRaw = readStormTopology.get_spouts().get("spout-1").get_common().get_json_conf();
        JSONObject readTopologyConf = (JSONObject) parser.parse(componentConfRaw);
        Map<String, Double> componentResources = (Map<String, Double>) readTopologyConf.get(Config.TOPOLOGY_COMPONENT_RESOURCES_MAP);
        assertTrue("Topology has been updated", topologyUpdated);
        assertEquals("Updated CPU correct", 25.0, componentResources.get(Constants.COMMON_CPU_RESOURCE_NAME), 0.001);
        assertEquals("Updated Memory correct", 120.0, componentResources.get(Constants.COMMON_ONHEAP_MEMORY_RESOURCE_NAME), 0.001);
        assertEquals("Updated Generic resource correct", 5.0, componentResources.get("gpu.count"), 0.001);
    }
}
Also used : HashMap(java.util.HashMap) TopologyBuilder(org.apache.storm.topology.TopologyBuilder) TopologyBuilder(org.apache.storm.topology.TopologyBuilder) StormTopology(org.apache.storm.generated.StormTopology) ResourceAwareScheduler(org.apache.storm.scheduler.resource.ResourceAwareScheduler) TestUtilsForResourceAwareScheduler(org.apache.storm.scheduler.resource.TestUtilsForResourceAwareScheduler) DefaultSchedulingPriorityStrategy(org.apache.storm.scheduler.resource.strategies.priority.DefaultSchedulingPriorityStrategy) TestUtilsForResourceAwareScheduler(org.apache.storm.scheduler.resource.TestUtilsForResourceAwareScheduler) JSONObject(org.json.simple.JSONObject) BoltDeclarer(org.apache.storm.topology.BoltDeclarer) JSONParser(org.json.simple.parser.JSONParser) SpoutDeclarer(org.apache.storm.topology.SpoutDeclarer) HashMap(java.util.HashMap) Map(java.util.Map) RebalanceOptions(org.apache.storm.generated.RebalanceOptions) Test(org.junit.Test)

Aggregations

SpoutDeclarer (org.apache.storm.topology.SpoutDeclarer)12 BoltDeclarer (org.apache.storm.topology.BoltDeclarer)10 TopologyBuilder (org.apache.storm.topology.TopologyBuilder)10 HashMap (java.util.HashMap)6 Map (java.util.Map)3 Config (org.apache.storm.Config)3 IRichSpout (org.apache.storm.topology.IRichSpout)3 ArrayList (java.util.ArrayList)2 HashSet (java.util.HashSet)2 LinkedHashMap (java.util.LinkedHashMap)2 Set (java.util.Set)2 GlobalStreamId (org.apache.storm.generated.GlobalStreamId)2 SharedMemory (org.apache.storm.generated.SharedMemory)2 StormTopology (org.apache.storm.generated.StormTopology)2 ITridentSpout (org.apache.storm.trident.spout.ITridentSpout)2 Test (org.junit.Test)2 LinkedHashSet (java.util.LinkedHashSet)1 List (java.util.List)1 CoordinatedBolt (org.apache.storm.coordination.CoordinatedBolt)1 IdStreamSpec (org.apache.storm.coordination.CoordinatedBolt.IdStreamSpec)1