use of org.apache.storm.topology.TopologyBuilder in project storm by apache.
the class LookupWordCount method main.
public static void main(String[] args) throws Exception {
Config config = new Config();
String host = TEST_REDIS_HOST;
int port = TEST_REDIS_PORT;
if (args.length >= 2) {
host = args[0];
port = Integer.parseInt(args[1]);
}
JedisPoolConfig poolConfig = new JedisPoolConfig.Builder().setHost(host).setPort(port).build();
WordSpout spout = new WordSpout();
RedisLookupMapper lookupMapper = setupLookupMapper();
RedisLookupBolt lookupBolt = new RedisLookupBolt(poolConfig, lookupMapper);
PrintWordTotalCountBolt printBolt = new PrintWordTotalCountBolt();
//wordspout -> lookupbolt
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout(WORD_SPOUT, spout, 1);
builder.setBolt(LOOKUP_BOLT, lookupBolt, 1).shuffleGrouping(WORD_SPOUT);
builder.setBolt(PRINT_BOLT, printBolt, 1).shuffleGrouping(LOOKUP_BOLT);
if (args.length == 2) {
try (LocalCluster cluster = new LocalCluster();
LocalTopology topo = cluster.submitTopology("test", config, builder.createTopology())) {
Thread.sleep(30000);
}
System.exit(0);
} else if (args.length == 3) {
StormSubmitter.submitTopology(args[2], config, builder.createTopology());
} else {
System.out.println("Usage: LookupWordCount <redis host> <redis port> (topology name)");
}
}
use of org.apache.storm.topology.TopologyBuilder in project storm by apache.
the class HdfsSpoutTopology method main.
// class
/** Copies text file content from sourceDir to destinationDir. Moves source files into sourceDir after its done consuming */
public static void main(String[] args) throws Exception {
// 0 - validate args
if (args.length < 7) {
System.err.println("Please check command line arguments.");
System.err.println("Usage :");
System.err.println(HdfsSpoutTopology.class.toString() + " topologyName hdfsUri fileFormat sourceDir sourceArchiveDir badDir destinationDir.");
System.err.println(" topologyName - topology name.");
System.err.println(" hdfsUri - hdfs name node URI");
System.err.println(" fileFormat - Set to 'TEXT' for reading text files or 'SEQ' for sequence files.");
System.err.println(" sourceDir - read files from this HDFS dir using HdfsSpout.");
System.err.println(" archiveDir - after a file in sourceDir is read completely, it is moved to this HDFS location.");
System.err.println(" badDir - files that cannot be read properly will be moved to this HDFS location.");
System.err.println(" spoutCount - Num of spout instances.");
System.err.println();
System.exit(-1);
}
// 1 - parse cmd line args
String topologyName = args[0];
String hdfsUri = args[1];
String fileFormat = args[2];
String sourceDir = args[3];
String archiveDir = args[4];
String badDir = args[5];
int spoutNum = Integer.parseInt(args[6]);
// 2 - create and configure spout and bolt
ConstBolt bolt = new ConstBolt();
HdfsSpout spout = new HdfsSpout().withOutputFields(TextFileReader.defaultFields).setReaderType(fileFormat).setHdfsUri(hdfsUri).setSourceDir(sourceDir).setArchiveDir(archiveDir).setBadFilesDir(badDir);
// 3 - Create and configure topology
Config conf = new Config();
conf.setNumWorkers(1);
conf.setNumAckers(1);
conf.setMaxTaskParallelism(1);
conf.setDebug(true);
conf.registerMetricsConsumer(LoggingMetricsConsumer.class);
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout(SPOUT_ID, spout, spoutNum);
builder.setBolt(BOLT_ID, bolt, 1).shuffleGrouping(SPOUT_ID);
// 4 - submit topology, wait for a few min and terminate it
Map clusterConf = Utils.readStormConfig();
StormSubmitter.submitTopologyWithProgressBar(topologyName, conf, builder.createTopology());
Nimbus.Client client = NimbusClient.getConfiguredClient(clusterConf).getClient();
// 5 - Print metrics every 30 sec, kill topology after 20 min
for (int i = 0; i < 40; i++) {
Thread.sleep(30 * 1000);
printMetrics(client, topologyName);
}
kill(client, topologyName);
}
use of org.apache.storm.topology.TopologyBuilder in project storm by apache.
the class SlidingTupleTsTopology method main.
public static void main(String[] args) throws Exception {
TopologyBuilder builder = new TopologyBuilder();
BaseWindowedBolt bolt = new SlidingWindowSumBolt().withWindow(new Duration(5, TimeUnit.SECONDS), new Duration(3, TimeUnit.SECONDS)).withTimestampField("ts").withLag(new Duration(5, TimeUnit.SECONDS));
builder.setSpout("integer", new RandomIntegerSpout(), 1);
builder.setBolt("slidingsum", bolt, 1).shuffleGrouping("integer");
builder.setBolt("printer", new PrinterBolt(), 1).shuffleGrouping("slidingsum");
Config conf = new Config();
conf.setDebug(true);
if (args != null && args.length > 0) {
conf.setNumWorkers(1);
StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
} else {
try (LocalCluster cluster = new LocalCluster();
LocalTopology topo = cluster.submitTopology("test", conf, builder.createTopology())) {
Utils.sleep(40000);
}
}
}
use of org.apache.storm.topology.TopologyBuilder in project storm by apache.
the class SlidingWindowTopology method main.
public static void main(String[] args) throws Exception {
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("integer", new RandomIntegerSpout(), 1);
builder.setBolt("slidingsum", new SlidingWindowSumBolt().withWindow(Count.of(30), Count.of(10)), 1).shuffleGrouping("integer");
builder.setBolt("tumblingavg", new TumblingWindowAvgBolt().withTumblingWindow(Count.of(3)), 1).shuffleGrouping("slidingsum");
builder.setBolt("printer", new PrinterBolt(), 1).shuffleGrouping("tumblingavg");
Config conf = new Config();
conf.setDebug(true);
if (args != null && args.length > 0) {
conf.setNumWorkers(1);
StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
} else {
try (LocalCluster cluster = new LocalCluster();
LocalTopology topo = cluster.submitTopology("test", conf, builder.createTopology())) {
Utils.sleep(40000);
}
}
}
use of org.apache.storm.topology.TopologyBuilder in project storm by apache.
the class ThroughputVsLatency method main.
public static void main(String[] args) throws Exception {
long ratePerSecond = 500;
if (args != null && args.length > 0) {
ratePerSecond = Long.valueOf(args[0]);
}
int parallelism = 4;
if (args != null && args.length > 1) {
parallelism = Integer.valueOf(args[1]);
}
int numMins = 5;
if (args != null && args.length > 2) {
numMins = Integer.valueOf(args[2]);
}
String name = "wc-test";
if (args != null && args.length > 3) {
name = args[3];
}
Config conf = new Config();
HttpForwardingMetricsServer metricServer = new HttpForwardingMetricsServer(conf) {
@Override
public void handle(TaskInfo taskInfo, Collection<DataPoint> dataPoints) {
String worker = taskInfo.srcWorkerHost + ":" + taskInfo.srcWorkerPort;
for (DataPoint dp : dataPoints) {
if ("comp-lat-histo".equals(dp.name) && dp.value instanceof Histogram) {
synchronized (_histo) {
_histo.add((Histogram) dp.value);
}
} else if ("CPU".equals(dp.name) && dp.value instanceof Map) {
Map<Object, Object> m = (Map<Object, Object>) dp.value;
Object sys = m.get("sys-ms");
if (sys instanceof Number) {
_systemCPU.getAndAdd(((Number) sys).longValue());
}
Object user = m.get("user-ms");
if (user instanceof Number) {
_userCPU.getAndAdd(((Number) user).longValue());
}
} else if (dp.name.startsWith("GC/") && dp.value instanceof Map) {
Map<Object, Object> m = (Map<Object, Object>) dp.value;
Object count = m.get("count");
if (count instanceof Number) {
_gcCount.getAndAdd(((Number) count).longValue());
}
Object time = m.get("timeMs");
if (time instanceof Number) {
_gcMs.getAndAdd(((Number) time).longValue());
}
} else if (dp.name.startsWith("memory/") && dp.value instanceof Map) {
Map<Object, Object> m = (Map<Object, Object>) dp.value;
Object val = m.get("usedBytes");
if (val instanceof Number) {
MemMeasure mm = _memoryBytes.get(worker);
if (mm == null) {
mm = new MemMeasure();
MemMeasure tmp = _memoryBytes.putIfAbsent(worker, mm);
mm = tmp == null ? mm : tmp;
}
mm.update(((Number) val).longValue());
}
}
}
}
};
metricServer.serve();
String url = metricServer.getUrl();
C cluster = new C(conf);
conf.setNumWorkers(parallelism);
conf.registerMetricsConsumer(org.apache.storm.metric.LoggingMetricsConsumer.class);
conf.registerMetricsConsumer(org.apache.storm.metric.HttpForwardingMetricsConsumer.class, url, 1);
Map<String, String> workerMetrics = new HashMap<String, String>();
if (!cluster.isLocal()) {
//sigar uses JNI and does not work in local mode
workerMetrics.put("CPU", "org.apache.storm.metrics.sigar.CPUMetric");
}
conf.put(Config.TOPOLOGY_WORKER_METRICS, workerMetrics);
conf.put(Config.TOPOLOGY_BUILTIN_METRICS_BUCKET_SIZE_SECS, 10);
conf.put(Config.TOPOLOGY_WORKER_GC_CHILDOPTS, "-XX:+UseConcMarkSweepGC -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:NewSize=128m -XX:CMSInitiatingOccupancyFraction=70 -XX:-CMSConcurrentMTEnabled");
conf.put(Config.TOPOLOGY_WORKER_CHILDOPTS, "-Xmx2g");
TopologyBuilder builder = new TopologyBuilder();
int numEach = 4 * parallelism;
builder.setSpout("spout", new FastRandomSentenceSpout(ratePerSecond / numEach), numEach);
builder.setBolt("split", new SplitSentence(), numEach).shuffleGrouping("spout");
builder.setBolt("count", new WordCount(), numEach).fieldsGrouping("split", new Fields("word"));
try {
cluster.submitTopology(name, conf, builder.createTopology());
for (int i = 0; i < numMins * 2; i++) {
Thread.sleep(30 * 1000);
printMetrics(cluster, name);
}
} finally {
kill(cluster, name);
}
System.exit(0);
}
Aggregations