use of backtype.storm.Config in project heron by twitter.
the class MultiStageAckingTopology method main.
public static void main(String[] args) throws Exception {
if (args.length != 1) {
throw new RuntimeException("Please specify the name of the topology");
}
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("word", new AckingTestWordSpout(), 2);
builder.setBolt("exclaim1", new ExclamationBolt(true), 2).shuffleGrouping("word");
builder.setBolt("exclaim2", new ExclamationBolt(false), 2).shuffleGrouping("exclaim1");
Config conf = new Config();
conf.setDebug(true);
// Put an arbitrary large number here if you don't want to slow the topology down
conf.setMaxSpoutPending(1000 * 1000 * 1000);
// To enable acking, we need to setEnableAcking true
conf.setEnableAcking(true);
conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, "-XX:+HeapDumpOnOutOfMemoryError");
conf.setNumStmgrs(1);
StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
}
use of backtype.storm.Config in project heron by twitter.
the class WordCountTopology method main.
/**
* Main method
*/
public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException {
if (args.length < 1) {
throw new RuntimeException("Specify topology name");
}
int parallelism = 1;
if (args.length > 1) {
parallelism = Integer.parseInt(args[1]);
}
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("word", new WordSpout(), parallelism);
builder.setBolt("consumer", new ConsumerBolt(), parallelism).fieldsGrouping("word", new Fields("word"));
Config conf = new Config();
conf.setNumStmgrs(parallelism);
/*
Set config here
*/
conf.setComponentRam("word", ByteAmount.fromGigabytes(2));
conf.setComponentRam("consumer", ByteAmount.fromGigabytes(3));
conf.setContainerCpuRequested(6);
StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
}
use of backtype.storm.Config in project heron by twitter.
the class AckingTopology method main.
public static void main(String[] args) throws Exception {
if (args.length != 1) {
throw new RuntimeException("Specify topology name");
}
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("word", new AckingTestWordSpout(), 2);
builder.setBolt("exclaim1", new ExclamationBolt(), 2).shuffleGrouping("word");
Config conf = new Config();
conf.setDebug(true);
// Put an arbitrary large number here if you don't want to slow the topology down
conf.setMaxSpoutPending(1000 * 1000 * 1000);
// To enable acking, we need to setEnableAcking true
conf.setEnableAcking(true);
conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, "-XX:+HeapDumpOnOutOfMemoryError");
// Set the number of workers or stream managers
conf.setNumStmgrs(1);
StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
}
use of backtype.storm.Config in project heron by twitter.
the class ComponentJVMOptionsTopology method main.
public static void main(String[] args) throws Exception {
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("word", new TestWordSpout(), 2);
builder.setBolt("exclaim1", new ExclamationBolt(), 2).shuffleGrouping("word");
Config conf = new Config();
conf.setDebug(true);
conf.setMaxSpoutPending(10);
conf.setComponentRam("word", ByteAmount.fromMegabytes(500));
conf.setComponentRam("exclaim1", ByteAmount.fromGigabytes(1));
// TOPOLOGY_WORKER_CHILDOPTS will be a global one
conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, "-XX:+HeapDumpOnOutOfMemoryError");
// For each component, both the global and if any the component one will be appended.
// And the component one will take precedence
conf.setComponentJvmOptions("word", "-XX:NewSize=300m");
conf.setComponentJvmOptions("exclaim1", "-XX:NewSize=800m");
if (args != null && args.length > 0) {
conf.setNumStmgrs(1);
StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
} else {
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("test", conf, builder.createTopology());
Utils.sleep(10000);
cluster.killTopology("test");
cluster.shutdown();
}
}
use of backtype.storm.Config in project storm-hbase by jrkinley.
the class HBaseTridentAggregateTopology method main.
/**
* @param args
* @throws InterruptedException
*/
@SuppressWarnings({ "unchecked", "rawtypes" })
public static void main(String[] args) throws InterruptedException {
List<Object> v0 = HBaseCountersBatchTopology.values.get(0).get(0);
List<Object> v1 = HBaseCountersBatchTopology.values.get(0).get(1);
List<Object> v2 = HBaseCountersBatchTopology.values.get(0).get(2);
List<Object> v3 = HBaseCountersBatchTopology.values.get(0).get(3);
List<Object> v4 = HBaseCountersBatchTopology.values.get(0).get(4);
List<Object> v5 = HBaseCountersBatchTopology.values.get(1).get(0);
List<Object> v6 = HBaseCountersBatchTopology.values.get(1).get(1);
List<Object> v7 = HBaseCountersBatchTopology.values.get(1).get(2);
List<Object> v8 = HBaseCountersBatchTopology.values.get(2).get(0);
List<Object> v9 = HBaseCountersBatchTopology.values.get(2).get(1);
List<Object> v10 = HBaseCountersBatchTopology.values.get(2).get(2);
HBaseCountersBatchTopology.values.values();
FixedBatchSpout spout = new FixedBatchSpout(new Fields("shortid", "url", "user", "date"), 3, v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10);
spout.setCycle(false);
TridentConfig config = new TridentConfig("shorturl", "shortid");
config.setBatch(false);
StateFactory state = HBaseAggregateState.transactional(config);
TridentTopology topology = new TridentTopology();
topology.newStream("spout", spout).each(new Fields("shortid", "date"), new DatePartitionFunction(), new Fields("cf", "cq")).project(new Fields("shortid", "cf", "cq")).groupBy(new Fields("shortid", "cf", "cq")).persistentAggregate(state, new Count(), new Fields("count"));
Config conf = new Config();
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("hbase-trident-aggregate", conf, topology.build());
Utils.sleep(5000);
cluster.shutdown();
}
Aggregations