use of org.apache.storm.topology.SpoutDeclarer in project storm by apache.
the class TestUtilsForResourceAwareScheduler method buildTopology.
public static StormTopology buildTopology(int numSpout, int numBolt, int spoutParallelism, int boltParallelism) {
LOG.debug("buildTopology with -> numSpout: " + numSpout + " spoutParallelism: " + spoutParallelism + " numBolt: " + numBolt + " boltParallelism: " + boltParallelism);
TopologyBuilder builder = new TopologyBuilder();
for (int i = 0; i < numSpout; i++) {
SpoutDeclarer s1 = builder.setSpout("spout-" + i, new TestSpout(), spoutParallelism);
}
int j = 0;
for (int i = 0; i < numBolt; i++) {
if (j >= numSpout) {
j = 0;
}
BoltDeclarer b1 = builder.setBolt("bolt-" + i, new TestBolt(), boltParallelism).shuffleGrouping("spout-" + j);
j++;
}
return builder.createTopology();
}
use of org.apache.storm.topology.SpoutDeclarer in project storm by apache.
the class GenLoad method parseAndSubmit.
private static String parseAndSubmit(TopologyLoadConf tlc, String url) throws IOException, InvalidTopologyException, AuthorizationException, AlreadyAliveException {
// First we need some configs
Config conf = new Config();
if (tlc.topoConf != null) {
conf.putAll(tlc.topoConf);
}
// For some reason on the new code if ackers is null we get 0???
Object ackers = conf.get(Config.TOPOLOGY_ACKER_EXECUTORS);
Object workers = conf.get(Config.TOPOLOGY_WORKERS);
if (ackers == null || ((Number) ackers).intValue() <= 0) {
if (workers == null) {
workers = 1;
}
conf.put(Config.TOPOLOGY_ACKER_EXECUTORS, workers);
}
conf.registerMetricsConsumer(LoggingMetricsConsumer.class);
conf.registerMetricsConsumer(HttpForwardingMetricsConsumer.class, url, 1);
Map<String, String> workerMetrics = new HashMap<>();
if (!NimbusClient.isLocalOverride()) {
// sigar uses JNI and does not work in local mode
workerMetrics.put("CPU", "org.apache.storm.metrics.sigar.CPUMetric");
}
conf.put(Config.TOPOLOGY_WORKER_METRICS, workerMetrics);
conf.put(Config.TOPOLOGY_BUILTIN_METRICS_BUCKET_SIZE_SECS, 10);
// Lets build a topology.
TopologyBuilder builder = new TopologyBuilder();
for (LoadCompConf spoutConf : tlc.spouts) {
System.out.println("ADDING SPOUT " + spoutConf.id);
SpoutDeclarer sd = builder.setSpout(spoutConf.id, new LoadSpout(spoutConf), spoutConf.parallelism);
if (spoutConf.memoryLoad > 0) {
sd.setMemoryLoad(spoutConf.memoryLoad);
}
if (spoutConf.cpuLoad > 0) {
sd.setCPULoad(spoutConf.cpuLoad);
}
}
Map<String, BoltDeclarer> boltDeclarers = new HashMap<>();
Map<String, LoadBolt> bolts = new HashMap<>();
if (tlc.bolts != null) {
for (LoadCompConf boltConf : tlc.bolts) {
System.out.println("ADDING BOLT " + boltConf.id);
LoadBolt lb = new LoadBolt(boltConf);
bolts.put(boltConf.id, lb);
BoltDeclarer bd = builder.setBolt(boltConf.id, lb, boltConf.parallelism);
if (boltConf.memoryLoad > 0) {
bd.setMemoryLoad(boltConf.memoryLoad);
}
if (boltConf.cpuLoad > 0) {
bd.setCPULoad(boltConf.cpuLoad);
}
boltDeclarers.put(boltConf.id, bd);
}
}
if (tlc.streams != null) {
for (InputStream in : tlc.streams) {
BoltDeclarer declarer = boltDeclarers.get(in.toComponent);
if (declarer == null) {
throw new IllegalArgumentException("to bolt " + in.toComponent + " does not exist");
}
LoadBolt lb = bolts.get(in.toComponent);
lb.add(in);
in.groupingType.assign(declarer, in);
}
}
String topoName = tlc.name + "-" + uniquifier++;
StormSubmitter.submitTopology(topoName, conf, builder.createTopology());
return topoName;
}
use of org.apache.storm.topology.SpoutDeclarer in project storm by apache.
the class TestUtilsForBlacklistScheduler method buildTopology.
public static StormTopology buildTopology(int numSpout, int numBolt, int spoutParallelism, int boltParallelism) {
LOG.debug("buildTopology with -> numSpout: " + numSpout + " spoutParallelism: " + spoutParallelism + " numBolt: " + numBolt + " boltParallelism: " + boltParallelism);
TopologyBuilder builder = new TopologyBuilder();
for (int i = 0; i < numSpout; i++) {
SpoutDeclarer s1 = builder.setSpout("spout-" + i, new TestSpout(), spoutParallelism);
}
int j = 0;
for (int i = 0; i < numBolt; i++) {
if (j >= numSpout) {
j = 0;
}
BoltDeclarer b1 = builder.setBolt("bolt-" + i, new TestBolt(), boltParallelism).shuffleGrouping("spout-" + j);
}
return builder.createTopology();
}
use of org.apache.storm.topology.SpoutDeclarer in project storm by apache.
the class ResourceAwareExampleTopology method main.
public static void main(String[] args) throws Exception {
TopologyBuilder builder = new TopologyBuilder();
// A topology can set resources in terms of CPU and Memory for each component
// These can be chained (like with setting the CPU requirement)
SpoutDeclarer spout = builder.setSpout("word", new TestWordSpout(), 10).setCPULoad(20);
// Or done separately like with setting the
// onheap and offheap memory requirement
spout.setMemoryLoad(64, 16);
// On heap memory is used to help calculate the heap of the java process for the worker
// off heap memory is for things like JNI memory allocated off heap, or when using the
// ShellBolt or ShellSpout. In this case the 16 MB of off heap is just as an example
// as we are not using it.
// Some times a Bolt or Spout will have some memory that is shared between the instances
// These are typically caches, but could be anything like a static database that is memory
// mapped into the processes. These can be declared separately and added to the bolts and
// spouts that use them. Or if only one uses it they can be created inline with the add
SharedOnHeap exclaimCache = new SharedOnHeap(100, "exclaim-cache");
SharedOffHeapWithinNode notImplementedButJustAnExample = new SharedOffHeapWithinNode(500, "not-implemented-node-level-cache");
// If CPU or memory is not set the values stored in topology.component.resources.onheap.memory.mb,
// topology.component.resources.offheap.memory.mb and topology.component.cpu.pcore.percent
// will be used instead
builder.setBolt("exclaim1", new ExclamationBolt(), 3).shuffleGrouping("word").addSharedMemory(exclaimCache);
builder.setBolt("exclaim2", new ExclamationBolt(), 2).shuffleGrouping("exclaim1").setMemoryLoad(100).addSharedMemory(exclaimCache).addSharedMemory(notImplementedButJustAnExample);
Config conf = new Config();
conf.setDebug(true);
// Under RAS the number of workers is determined by the scheduler and the settings in the conf are ignored
// conf.setNumWorkers(3);
// Instead the scheduler lets you set the maximum heap size for any worker.
conf.setTopologyWorkerMaxHeapSize(1024.0);
// The scheduler generally will try to pack executors into workers until the max heap size is met, but
// this can vary depending on the specific scheduling strategy selected.
// The reason for this is to try and balance the maximum pause time GC might take (which is larger for larger heaps)
// against better performance because of not needing to serialize/deserialize tuples.
// The priority of a topology describes the importance of the topology in decreasing importance
// starting from 0 (i.e. 0 is the highest priority and the priority importance decreases as the priority number increases).
// Recommended range of 0-29 but no hard limit set.
// If there are not enough resources in a cluster the priority in combination with how far over a guarantees
// a user is will decide which topologies are run and which ones are not.
conf.setTopologyPriority(29);
// set to use the default resource aware strategy when using the MultitenantResourceAwareBridgeScheduler
conf.setTopologyStrategy("org.apache.storm.scheduler.resource.strategies.scheduling.DefaultResourceAwareStrategy");
String topoName = "test";
if (args != null && args.length > 0) {
topoName = args[0];
}
StormSubmitter.submitTopologyWithProgressBar(topoName, conf, builder.createTopology());
}
use of org.apache.storm.topology.SpoutDeclarer in project storm by apache.
the class Thrift method buildTopology.
public static StormTopology buildTopology(Map<String, SpoutDetails> spoutMap, Map<String, BoltDetails> boltMap) {
TopologyBuilder builder = new TopologyBuilder();
for (Entry<String, SpoutDetails> entry : spoutMap.entrySet()) {
String spoutId = entry.getKey();
SpoutDetails spec = entry.getValue();
SpoutDeclarer spoutDeclarer = builder.setSpout(spoutId, spec.getSpout(), spec.getParallelism());
spoutDeclarer.addConfigurations(spec.getConf());
}
for (Entry<String, BoltDetails> entry : boltMap.entrySet()) {
String spoutId = entry.getKey();
BoltDetails spec = entry.getValue();
BoltDeclarer boltDeclarer = null;
if (spec.bolt instanceof IRichBolt) {
boltDeclarer = builder.setBolt(spoutId, (IRichBolt) spec.getBolt(), spec.getParallelism());
} else {
boltDeclarer = builder.setBolt(spoutId, (IBasicBolt) spec.getBolt(), spec.getParallelism());
}
boltDeclarer.addConfigurations(spec.getConf());
addInputs(boltDeclarer, spec.getInputs());
}
return builder.createTopology();
}
Aggregations