Search in sources :

Example 1 with TopologyBuilder

use of org.apache.storm.topology.TopologyBuilder in project storm by apache.

the class TransactionalTopologyBuilder method buildTopologyBuilder.

public TopologyBuilder buildTopologyBuilder() {
    String coordinator = _spoutId + "/coordinator";
    TopologyBuilder builder = new TopologyBuilder();
    SpoutDeclarer declarer = builder.setSpout(coordinator, new TransactionalSpoutCoordinator(_spout));
    for (Map<String, Object> conf : _spoutConfs) {
        declarer.addConfigurations(conf);
    }
    declarer.addConfiguration(Config.TOPOLOGY_TRANSACTIONAL_ID, _id);
    BoltDeclarer emitterDeclarer = builder.setBolt(_spoutId, new CoordinatedBolt(new TransactionalSpoutBatchExecutor(_spout), null, null), _spoutParallelism).allGrouping(coordinator, TransactionalSpoutCoordinator.TRANSACTION_BATCH_STREAM_ID).addConfiguration(Config.TOPOLOGY_TRANSACTIONAL_ID, _id);
    if (_spout instanceof ICommitterTransactionalSpout) {
        emitterDeclarer.allGrouping(coordinator, TransactionalSpoutCoordinator.TRANSACTION_COMMIT_STREAM_ID);
    }
    for (String id : _bolts.keySet()) {
        Component component = _bolts.get(id);
        Map<String, SourceArgs> coordinatedArgs = new HashMap<String, SourceArgs>();
        for (String c : componentBoltSubscriptions(component)) {
            coordinatedArgs.put(c, SourceArgs.all());
        }
        IdStreamSpec idSpec = null;
        if (component.committer) {
            idSpec = IdStreamSpec.makeDetectSpec(coordinator, TransactionalSpoutCoordinator.TRANSACTION_COMMIT_STREAM_ID);
        }
        BoltDeclarer input = builder.setBolt(id, new CoordinatedBolt(component.bolt, coordinatedArgs, idSpec), component.parallelism);
        for (Map conf : component.componentConfs) {
            input.addConfigurations(conf);
        }
        for (String c : componentBoltSubscriptions(component)) {
            input.directGrouping(c, Constants.COORDINATED_STREAM_ID);
        }
        for (InputDeclaration d : component.declarations) {
            d.declare(input);
        }
        if (component.committer) {
            input.allGrouping(coordinator, TransactionalSpoutCoordinator.TRANSACTION_COMMIT_STREAM_ID);
        }
    }
    return builder;
}
Also used : TopologyBuilder(org.apache.storm.topology.TopologyBuilder) HashMap(java.util.HashMap) IdStreamSpec(org.apache.storm.coordination.CoordinatedBolt.IdStreamSpec) SourceArgs(org.apache.storm.coordination.CoordinatedBolt.SourceArgs) BoltDeclarer(org.apache.storm.topology.BoltDeclarer) SpoutDeclarer(org.apache.storm.topology.SpoutDeclarer) HashMap(java.util.HashMap) Map(java.util.Map) CoordinatedBolt(org.apache.storm.coordination.CoordinatedBolt)

Example 2 with TopologyBuilder

use of org.apache.storm.topology.TopologyBuilder in project storm by apache.

the class TestResourceAwareScheduler method TestMultipleSpoutsAndCyclicTopologies.

/**
     * Test multiple spouts and cyclic topologies
     */
@Test
public void TestMultipleSpoutsAndCyclicTopologies() {
    TopologyBuilder builder = new TopologyBuilder();
    SpoutDeclarer s1 = builder.setSpout("spout-1", new TestUtilsForResourceAwareScheduler.TestSpout(), 5);
    SpoutDeclarer s2 = builder.setSpout("spout-2", new TestUtilsForResourceAwareScheduler.TestSpout(), 5);
    BoltDeclarer b1 = builder.setBolt("bolt-1", new TestUtilsForResourceAwareScheduler.TestBolt(), 5).shuffleGrouping("spout-1").shuffleGrouping("bolt-3");
    BoltDeclarer b2 = builder.setBolt("bolt-2", new TestUtilsForResourceAwareScheduler.TestBolt(), 5).shuffleGrouping("bolt-1");
    BoltDeclarer b3 = builder.setBolt("bolt-3", new TestUtilsForResourceAwareScheduler.TestBolt(), 5).shuffleGrouping("bolt-2").shuffleGrouping("spout-2");
    INimbus iNimbus = new TestUtilsForResourceAwareScheduler.INimbusTest();
    Map<String, Number> resourceMap = new HashMap<String, Number>();
    resourceMap.put(Config.SUPERVISOR_CPU_CAPACITY, 100.0);
    resourceMap.put(Config.SUPERVISOR_MEMORY_CAPACITY_MB, 1000.0);
    Map<String, SupervisorDetails> supMap = TestUtilsForResourceAwareScheduler.genSupervisors(25, 1, resourceMap);
    Config config = new Config();
    config.putAll(Utils.readDefaultConfig());
    config.put(Config.RESOURCE_AWARE_SCHEDULER_EVICTION_STRATEGY, org.apache.storm.scheduler.resource.strategies.eviction.DefaultEvictionStrategy.class.getName());
    config.put(Config.RESOURCE_AWARE_SCHEDULER_PRIORITY_STRATEGY, org.apache.storm.scheduler.resource.strategies.priority.DefaultSchedulingPriorityStrategy.class.getName());
    config.put(Config.TOPOLOGY_SCHEDULER_STRATEGY, org.apache.storm.scheduler.resource.strategies.scheduling.DefaultResourceAwareStrategy.class.getName());
    config.put(Config.TOPOLOGY_COMPONENT_CPU_PCORE_PERCENT, 100.0);
    config.put(Config.TOPOLOGY_COMPONENT_RESOURCES_OFFHEAP_MEMORY_MB, 500);
    config.put(Config.TOPOLOGY_COMPONENT_RESOURCES_ONHEAP_MEMORY_MB, 500);
    config.put(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB, Double.MAX_VALUE);
    StormTopology stormTopology = builder.createTopology();
    TopologyDetails topo = new TopologyDetails("topo-1", config, stormTopology, 0, genExecsAndComps(stormTopology), 0);
    Cluster cluster = new Cluster(iNimbus, supMap, new HashMap<String, SchedulerAssignmentImpl>(), config);
    config.put(Config.TOPOLOGY_SUBMITTER_USER, "jerry");
    Map<String, TopologyDetails> topoMap = new HashMap<String, TopologyDetails>();
    topoMap.put(topo.getId(), topo);
    Topologies topologies = new Topologies(topoMap);
    ResourceAwareScheduler rs = new ResourceAwareScheduler();
    rs.prepare(config);
    rs.schedule(topologies, cluster);
    Assert.assertTrue("Topo scheduled?", cluster.getAssignmentById(topo.getId()) != null);
    Assert.assertEquals("Topo all executors scheduled?", 25, cluster.getAssignmentById(topo.getId()).getExecutorToSlot().size());
}
Also used : TopologyBuilder(org.apache.storm.topology.TopologyBuilder) HashMap(java.util.HashMap) Config(org.apache.storm.Config) StormTopology(org.apache.storm.generated.StormTopology) SchedulerAssignmentImpl(org.apache.storm.scheduler.SchedulerAssignmentImpl) Topologies(org.apache.storm.scheduler.Topologies) SupervisorDetails(org.apache.storm.scheduler.SupervisorDetails) Cluster(org.apache.storm.scheduler.Cluster) INimbus(org.apache.storm.scheduler.INimbus) TopologyDetails(org.apache.storm.scheduler.TopologyDetails) BoltDeclarer(org.apache.storm.topology.BoltDeclarer) SpoutDeclarer(org.apache.storm.topology.SpoutDeclarer) Test(org.junit.Test)

Example 3 with TopologyBuilder

use of org.apache.storm.topology.TopologyBuilder in project storm by apache.

the class ConstSpoutIdBoltNullBoltTopo method getTopology.

public static StormTopology getTopology(Map conf) {
    // 1 -  Setup Spout   --------
    ConstSpout spout = new ConstSpout("some data").withOutputFields("str");
    // 2 -  Setup IdBolt & DevNullBolt   --------
    IdBolt bolt1 = new IdBolt();
    DevNullBolt bolt2 = new DevNullBolt();
    // 3 - Setup Topology  --------
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout(SPOUT_ID, spout, Helper.getInt(conf, SPOUT_COUNT, 1));
    builder.setBolt(BOLT1_ID, bolt1, Helper.getInt(conf, BOLT1_COUNT, 1)).localOrShuffleGrouping(SPOUT_ID);
    builder.setBolt(BOLT2_ID, bolt2, Helper.getInt(conf, BOLT2_COUNT, 1)).localOrShuffleGrouping(BOLT1_ID);
    return builder.createTopology();
}
Also used : IdBolt(org.apache.storm.perf.bolt.IdBolt) ConstSpout(org.apache.storm.perf.spout.ConstSpout) TopologyBuilder(org.apache.storm.topology.TopologyBuilder) DevNullBolt(org.apache.storm.perf.bolt.DevNullBolt)

Example 4 with TopologyBuilder

use of org.apache.storm.topology.TopologyBuilder in project storm by apache.

the class ThroughputVsLatency method main.

public static void main(String[] args) throws Exception {
    long ratePerSecond = 500;
    if (args != null && args.length > 0) {
        ratePerSecond = Long.valueOf(args[0]);
    }
    int parallelism = 4;
    if (args != null && args.length > 1) {
        parallelism = Integer.valueOf(args[1]);
    }
    int numMins = 5;
    if (args != null && args.length > 2) {
        numMins = Integer.valueOf(args[2]);
    }
    String name = "wc-test";
    if (args != null && args.length > 3) {
        name = args[3];
    }
    Config conf = new Config();
    HttpForwardingMetricsServer metricServer = new HttpForwardingMetricsServer(conf) {

        @Override
        public void handle(TaskInfo taskInfo, Collection<DataPoint> dataPoints) {
            String worker = taskInfo.srcWorkerHost + ":" + taskInfo.srcWorkerPort;
            for (DataPoint dp : dataPoints) {
                if ("comp-lat-histo".equals(dp.name) && dp.value instanceof Histogram) {
                    synchronized (_histo) {
                        _histo.add((Histogram) dp.value);
                    }
                } else if ("CPU".equals(dp.name) && dp.value instanceof Map) {
                    Map<Object, Object> m = (Map<Object, Object>) dp.value;
                    Object sys = m.get("sys-ms");
                    if (sys instanceof Number) {
                        _systemCPU.getAndAdd(((Number) sys).longValue());
                    }
                    Object user = m.get("user-ms");
                    if (user instanceof Number) {
                        _userCPU.getAndAdd(((Number) user).longValue());
                    }
                } else if (dp.name.startsWith("GC/") && dp.value instanceof Map) {
                    Map<Object, Object> m = (Map<Object, Object>) dp.value;
                    Object count = m.get("count");
                    if (count instanceof Number) {
                        _gcCount.getAndAdd(((Number) count).longValue());
                    }
                    Object time = m.get("timeMs");
                    if (time instanceof Number) {
                        _gcMs.getAndAdd(((Number) time).longValue());
                    }
                } else if (dp.name.startsWith("memory/") && dp.value instanceof Map) {
                    Map<Object, Object> m = (Map<Object, Object>) dp.value;
                    Object val = m.get("usedBytes");
                    if (val instanceof Number) {
                        MemMeasure mm = _memoryBytes.get(worker);
                        if (mm == null) {
                            mm = new MemMeasure();
                            MemMeasure tmp = _memoryBytes.putIfAbsent(worker, mm);
                            mm = tmp == null ? mm : tmp;
                        }
                        mm.update(((Number) val).longValue());
                    }
                }
            }
        }
    };
    metricServer.serve();
    String url = metricServer.getUrl();
    C cluster = new C(conf);
    conf.setNumWorkers(parallelism);
    conf.registerMetricsConsumer(org.apache.storm.metric.LoggingMetricsConsumer.class);
    conf.registerMetricsConsumer(org.apache.storm.metric.HttpForwardingMetricsConsumer.class, url, 1);
    Map<String, String> workerMetrics = new HashMap<String, String>();
    if (!cluster.isLocal()) {
        //sigar uses JNI and does not work in local mode
        workerMetrics.put("CPU", "org.apache.storm.metrics.sigar.CPUMetric");
    }
    conf.put(Config.TOPOLOGY_WORKER_METRICS, workerMetrics);
    conf.put(Config.TOPOLOGY_BUILTIN_METRICS_BUCKET_SIZE_SECS, 10);
    conf.put(Config.TOPOLOGY_WORKER_GC_CHILDOPTS, "-XX:+UseConcMarkSweepGC -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:NewSize=128m -XX:CMSInitiatingOccupancyFraction=70 -XX:-CMSConcurrentMTEnabled");
    conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, "-Xmx2g");
    TopologyBuilder builder = new TopologyBuilder();
    int numEach = 4 * parallelism;
    builder.setSpout("spout", new FastRandomSentenceSpout(ratePerSecond / numEach), numEach);
    builder.setBolt("split", new SplitSentence(), numEach).shuffleGrouping("spout");
    builder.setBolt("count", new WordCount(), numEach).fieldsGrouping("split", new Fields("word"));
    try {
        cluster.submitTopology(name, conf, builder.createTopology());
        for (int i = 0; i < numMins * 2; i++) {
            Thread.sleep(30 * 1000);
            printMetrics(cluster, name);
        }
    } finally {
        kill(cluster, name);
    }
    System.exit(0);
}
Also used : HttpForwardingMetricsServer(org.apache.storm.metric.HttpForwardingMetricsServer) Histogram(org.HdrHistogram.Histogram) HashMap(java.util.HashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) TopologyBuilder(org.apache.storm.topology.TopologyBuilder) Config(org.apache.storm.Config) DataPoint(org.apache.storm.metric.api.IMetricsConsumer.DataPoint) TaskInfo(org.apache.storm.metric.api.IMetricsConsumer.TaskInfo) Fields(org.apache.storm.tuple.Fields) DataPoint(org.apache.storm.metric.api.IMetricsConsumer.DataPoint) Collection(java.util.Collection) HashMap(java.util.HashMap) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap)

Example 5 with TopologyBuilder

use of org.apache.storm.topology.TopologyBuilder in project storm by apache.

the class SingleTopicKafkaSpoutConfiguration method getTopologyKafkaSpout.

public static StormTopology getTopologyKafkaSpout(int port) {
    final TopologyBuilder tp = new TopologyBuilder();
    tp.setSpout("kafka_spout", new KafkaSpout<>(getKafkaSpoutConfig(port)), 1);
    tp.setBolt("kafka_bolt", new KafkaSpoutTestBolt()).shuffleGrouping("kafka_spout", STREAM);
    return tp.createTopology();
}
Also used : TopologyBuilder(org.apache.storm.topology.TopologyBuilder) KafkaSpoutTestBolt(org.apache.storm.kafka.spout.test.KafkaSpoutTestBolt)

Aggregations

TopologyBuilder (org.apache.storm.topology.TopologyBuilder)266 Config (org.apache.storm.Config)141 Fields (org.apache.storm.tuple.Fields)76 StormTopology (org.apache.storm.generated.StormTopology)47 HashMap (java.util.HashMap)41 LocalCluster (org.apache.storm.LocalCluster)39 TestWordSpout (org.apache.storm.testing.TestWordSpout)34 TopologyDetails (org.apache.storm.scheduler.TopologyDetails)26 Test (org.junit.Test)26 Test (org.junit.jupiter.api.Test)26 Cluster (org.apache.storm.scheduler.Cluster)25 SupervisorDetails (org.apache.storm.scheduler.SupervisorDetails)25 Topologies (org.apache.storm.scheduler.Topologies)25 Values (org.apache.storm.tuple.Values)25 TestUtilsForResourceAwareScheduler (org.apache.storm.scheduler.resource.TestUtilsForResourceAwareScheduler)24 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)24 Map (java.util.Map)23 INimbus (org.apache.storm.scheduler.INimbus)23 StormMetricsRegistry (org.apache.storm.metric.StormMetricsRegistry)22 ResourceMetrics (org.apache.storm.scheduler.resource.normalization.ResourceMetrics)22