use of backtype.storm.topology.TopologyBuilder in project storm by nathanmarz.
the class TransactionalTopologyBuilder method buildTopologyBuilder.
public TopologyBuilder buildTopologyBuilder() {
String coordinator = _spoutId + "/coordinator";
TopologyBuilder builder = new TopologyBuilder();
SpoutDeclarer declarer = builder.setSpout(coordinator, new TransactionalSpoutCoordinator(_spout));
for (Map conf : _spoutConfs) {
declarer.addConfigurations(conf);
}
declarer.addConfiguration(Config.TOPOLOGY_TRANSACTIONAL_ID, _id);
BoltDeclarer emitterDeclarer = builder.setBolt(_spoutId, new CoordinatedBolt(new TransactionalSpoutBatchExecutor(_spout), null, null), _spoutParallelism).allGrouping(coordinator, TransactionalSpoutCoordinator.TRANSACTION_BATCH_STREAM_ID).addConfiguration(Config.TOPOLOGY_TRANSACTIONAL_ID, _id);
if (_spout instanceof ICommitterTransactionalSpout) {
emitterDeclarer.allGrouping(coordinator, TransactionalSpoutCoordinator.TRANSACTION_COMMIT_STREAM_ID);
}
for (String id : _bolts.keySet()) {
Component component = _bolts.get(id);
Map<String, SourceArgs> coordinatedArgs = new HashMap<String, SourceArgs>();
for (String c : componentBoltSubscriptions(component)) {
coordinatedArgs.put(c, SourceArgs.all());
}
IdStreamSpec idSpec = null;
if (component.committer) {
idSpec = IdStreamSpec.makeDetectSpec(coordinator, TransactionalSpoutCoordinator.TRANSACTION_COMMIT_STREAM_ID);
}
BoltDeclarer input = builder.setBolt(id, new CoordinatedBolt(component.bolt, coordinatedArgs, idSpec), component.parallelism);
for (Map conf : component.componentConfs) {
input.addConfigurations(conf);
}
for (String c : componentBoltSubscriptions(component)) {
input.directGrouping(c, Constants.COORDINATED_STREAM_ID);
}
for (InputDeclaration d : component.declarations) {
d.declare(input);
}
if (component.committer) {
input.allGrouping(coordinator, TransactionalSpoutCoordinator.TRANSACTION_COMMIT_STREAM_ID);
}
}
return builder;
}
use of backtype.storm.topology.TopologyBuilder in project storm by nathanmarz.
the class LinearDRPCTopologyBuilder method createTopology.
private StormTopology createTopology(DRPCSpout spout) {
final String SPOUT_ID = "spout";
final String PREPARE_ID = "prepare-request";
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout(SPOUT_ID, spout);
builder.setBolt(PREPARE_ID, new PrepareRequest()).noneGrouping(SPOUT_ID);
int i = 0;
for (; i < _components.size(); i++) {
Component component = _components.get(i);
Map<String, SourceArgs> source = new HashMap<String, SourceArgs>();
if (i == 1) {
source.put(boltId(i - 1), SourceArgs.single());
} else if (i >= 2) {
source.put(boltId(i - 1), SourceArgs.all());
}
IdStreamSpec idSpec = null;
if (i == _components.size() - 1 && component.bolt instanceof FinishedCallback) {
idSpec = IdStreamSpec.makeDetectSpec(PREPARE_ID, PrepareRequest.ID_STREAM);
}
BoltDeclarer declarer = builder.setBolt(boltId(i), new CoordinatedBolt(component.bolt, source, idSpec), component.parallelism);
for (Map conf : component.componentConfs) {
declarer.addConfigurations(conf);
}
if (idSpec != null) {
declarer.fieldsGrouping(idSpec.getGlobalStreamId().get_componentId(), PrepareRequest.ID_STREAM, new Fields("request"));
}
if (i == 0 && component.declarations.isEmpty()) {
declarer.noneGrouping(PREPARE_ID, PrepareRequest.ARGS_STREAM);
} else {
String prevId;
if (i == 0) {
prevId = PREPARE_ID;
} else {
prevId = boltId(i - 1);
}
for (InputDeclaration declaration : component.declarations) {
declaration.declare(prevId, declarer);
}
}
if (i > 0) {
declarer.directGrouping(boltId(i - 1), Constants.COORDINATED_STREAM_ID);
}
}
IRichBolt lastBolt = _components.get(_components.size() - 1).bolt;
OutputFieldsGetter getter = new OutputFieldsGetter();
lastBolt.declareOutputFields(getter);
Map<String, StreamInfo> streams = getter.getFieldsDeclaration();
if (streams.size() != 1) {
throw new RuntimeException("Must declare exactly one stream from last bolt in LinearDRPCTopology");
}
String outputStream = streams.keySet().iterator().next();
List<String> fields = streams.get(outputStream).get_output_fields();
if (fields.size() != 2) {
throw new RuntimeException("Output stream of last component in LinearDRPCTopology must contain exactly two fields. The first should be the request id, and the second should be the result.");
}
builder.setBolt(boltId(i), new JoinResult(PREPARE_ID)).fieldsGrouping(boltId(i - 1), outputStream, new Fields(fields.get(0))).fieldsGrouping(PREPARE_ID, PrepareRequest.RETURN_STREAM, new Fields("request"));
i++;
builder.setBolt(boltId(i), new ReturnResults()).noneGrouping(boltId(i - 1));
return builder.createTopology();
}
use of backtype.storm.topology.TopologyBuilder in project storm-lib by xumingming.
the class TestingApiDemo method testTimeout.
public void testTimeout() {
Config daemonConfig = new Config();
daemonConfig.put(Config.TOPOLOGY_ENABLE_MESSAGE_TIMEOUTS, true);
MkClusterParam mkClusterParam = new MkClusterParam();
mkClusterParam.setDaemonConf(daemonConfig);
Testing.withSimulatedTimeLocalCluster(mkClusterParam, new TestJob() {
@Override
public void run(ILocalCluster cluster) {
AckFailMapTracker tracker = new AckFailMapTracker();
FeederSpout feeder = createFeederSpout("field1");
feeder.setAckFailDelegate(tracker);
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("1", feeder);
builder.setBolt("2", new AckEveryOtherBolt()).globalGrouping("1");
StormTopology topology = builder.createTopology();
Config topologyConfig = new Config();
topologyConfig.setMessageTimeoutSecs(10);
/**
* TODO
*/
try {
cluster.submitTopology("timeout-tester", topologyConfig, topology);
} catch (AlreadyAliveException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (InvalidTopologyException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
feeder.feed(new Values("a"), 1);
feeder.feed(new Values("b"), 2);
feeder.feed(new Values("c"), 3);
/**
* TODO
*/
Testing.advanceClusterTime(cluster, 9);
assertAcked(tracker, 1, 3);
assertFalse(tracker.isFailed(2));
Testing.advanceClusterTime(cluster, 12);
assertFailed(tracker, 2);
}
});
}
use of backtype.storm.topology.TopologyBuilder in project pulsar by yahoo.
the class StormExample method main.
public static void main(String[] args) throws PulsarClientException {
ClientConfiguration clientConf = new ClientConfiguration();
// String authPluginClassName = "com.yahoo.pulsar.client.impl.auth.MyAuthentication";
// String authParams = "key1:val1,key2:val2";
// clientConf.setAuthentication(authPluginClassName, authParams);
String topic1 = "persistent://my-property/use/my-ns/my-topic1";
String topic2 = "persistent://my-property/use/my-ns/my-topic2";
String subscriptionName1 = "my-subscriber-name1";
String subscriptionName2 = "my-subscriber-name2";
// create spout
PulsarSpoutConfiguration spoutConf = new PulsarSpoutConfiguration();
spoutConf.setServiceUrl(serviceUrl);
spoutConf.setTopic(topic1);
spoutConf.setSubscriptionName(subscriptionName1);
spoutConf.setMessageToValuesMapper(messageToValuesMapper);
PulsarSpout spout = new PulsarSpout(spoutConf, clientConf);
// create bolt
PulsarBoltConfiguration boltConf = new PulsarBoltConfiguration();
boltConf.setServiceUrl(serviceUrl);
boltConf.setTopic(topic2);
boltConf.setTupleToMessageMapper(tupleToMessageMapper);
PulsarBolt bolt = new PulsarBolt(boltConf, clientConf);
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("testSpout", spout);
builder.setBolt("testBolt", bolt).shuffleGrouping("testSpout");
Config conf = new Config();
conf.setNumWorkers(2);
conf.setDebug(true);
conf.registerMetricsConsumer(PulsarMetricsConsumer.class);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("test", conf, builder.createTopology());
Utils.sleep(10000);
PulsarClient pulsarClient = PulsarClient.create(serviceUrl, clientConf);
// create a consumer on topic2 to receive messages from the bolt when the processing is done
Consumer consumer = pulsarClient.subscribe(topic2, subscriptionName2);
// create a producer on topic1 to send messages that will be received by the spout
Producer producer = pulsarClient.createProducer(topic1);
for (int i = 0; i < 10; i++) {
String msg = "msg-" + i;
producer.send(msg.getBytes());
LOG.info("Message {} sent", msg);
}
Message msg = null;
for (int i = 0; i < 10; i++) {
msg = consumer.receive(1, TimeUnit.SECONDS);
LOG.info("Message {} received", new String(msg.getData()));
}
cluster.killTopology("test");
cluster.shutdown();
}
use of backtype.storm.topology.TopologyBuilder in project storm-hbase by ypf412.
the class HBaseSpoutTest method testHBaseSpout.
@Test
public void testHBaseSpout() throws Exception {
hbaseUtil.cleanupTestDir();
HBaseTestUtil.writeLocalHBaseXml(hbaseUtil);
PropConfig hbasePropConfig;
try {
hbasePropConfig = new PropConfig("hbase.properties");
} catch (IOException e1) {
throw new RuntimeException(e1);
}
String tableName = Constants.HBASE_DEFAULT_TABLE_NAME;
String tableNameStr = hbasePropConfig.getProperty("hbase.table.name");
if (tableNameStr != null && !tableNameStr.equals(""))
tableName = tableNameStr;
String columnFamily = Constants.HBASE_DEFAULT_COLUMN_FAMILY;
String columnFamilyStr = hbasePropConfig.getProperty("hbase.table.column_family");
if (columnFamilyStr != null && !columnFamilyStr.equals(""))
columnFamily = columnFamilyStr;
HTable htable = hbaseUtil.createTable(Bytes.toBytes(tableName), Bytes.toBytes(columnFamily));
HBaseTestUtil.loadStreamDataToHBase(ClassLoader.getSystemResource("datasource.txt").getPath(), htable, hbasePropConfig);
int count = hbaseUtil.countRows(htable);
assertTrue(count > 0);
System.out.println("*** load " + count + " rows into hbase test table: " + tableName);
stormUtil.getConfig().put(Constants.STORM_PROP_CONF_FILE, "storm.properties");
stormUtil.getConfig().put(Constants.HBASE_PROP_CONF_FILE, "hbase.properties");
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("hbaseSpout", new HBaseSpout());
StormTestBolt sinkBolt = new StormTestBolt();
List<Object[]> tuples = StormTestUtil.loadTuples("datasource.txt");
for (Object[] tuple : tuples) {
sinkBolt.expectSeq(tuple);
}
builder.setBolt("sinkBolt", sinkBolt).fieldsGrouping("hbaseSpout", new Fields("sharding"));
stormUtil.submitTopology(builder, 5000);
hbaseUtil.deleteTable(Bytes.toBytes(tableName));
}
Aggregations