use of org.apache.storm.topology.TopologyBuilder in project storm by apache.
the class TestResourceAwareScheduler method testTopologyWithMultipleSpouts.
@Test
public void testTopologyWithMultipleSpouts() {
INimbus iNimbus = new TestUtilsForResourceAwareScheduler.INimbusTest();
Map<String, Number> resourceMap = new HashMap<>();
resourceMap.put(Config.SUPERVISOR_CPU_CAPACITY, 400.0);
resourceMap.put(Config.SUPERVISOR_MEMORY_CAPACITY_MB, 2000.0);
Map<String, SupervisorDetails> supMap = TestUtilsForResourceAwareScheduler.genSupervisors(2, 4, resourceMap);
// a topology with multiple spouts
TopologyBuilder builder1 = new TopologyBuilder();
builder1.setSpout("wordSpout1", new TestWordSpout(), 1);
builder1.setSpout("wordSpout2", new TestWordSpout(), 1);
builder1.setBolt("wordCountBolt1", new TestWordCounter(), 1).shuffleGrouping("wordSpout1").shuffleGrouping("wordSpout2");
builder1.setBolt("wordCountBolt2", new TestWordCounter(), 1).shuffleGrouping("wordCountBolt1");
builder1.setBolt("wordCountBolt3", new TestWordCounter(), 1).shuffleGrouping("wordCountBolt1");
builder1.setBolt("wordCountBolt4", new TestWordCounter(), 1).shuffleGrouping("wordCountBolt2");
builder1.setBolt("wordCountBolt5", new TestWordCounter(), 1).shuffleGrouping("wordSpout2");
StormTopology stormTopology1 = builder1.createTopology();
Config config = new Config();
config.putAll(defaultTopologyConf);
Map<ExecutorDetails, String> executorMap1 = TestUtilsForResourceAwareScheduler.genExecsAndComps(stormTopology1);
TopologyDetails topology1 = new TopologyDetails("topology1", config, stormTopology1, 0, executorMap1, 0);
// a topology with two unconnected partitions
TopologyBuilder builder2 = new TopologyBuilder();
builder2.setSpout("wordSpoutX", new TestWordSpout(), 1);
builder2.setSpout("wordSpoutY", new TestWordSpout(), 1);
StormTopology stormTopology2 = builder2.createTopology();
Map<ExecutorDetails, String> executorMap2 = TestUtilsForResourceAwareScheduler.genExecsAndComps(stormTopology2);
TopologyDetails topology2 = new TopologyDetails("topology2", config, stormTopology2, 0, executorMap2, 0);
Cluster cluster = new Cluster(iNimbus, supMap, new HashMap<String, SchedulerAssignmentImpl>(), config);
ResourceAwareScheduler rs = new ResourceAwareScheduler();
Map<String, TopologyDetails> topoMap = new HashMap<>();
topoMap.put(topology1.getId(), topology1);
topoMap.put(topology2.getId(), topology2);
Topologies topologies = new Topologies(topoMap);
rs.prepare(config);
rs.schedule(topologies, cluster);
SchedulerAssignment assignment1 = cluster.getAssignmentById(topology1.getId());
Set<WorkerSlot> assignedSlots1 = assignment1.getSlots();
Set<String> nodesIDs1 = new HashSet<>();
for (WorkerSlot slot : assignedSlots1) {
nodesIDs1.add(slot.getNodeId());
}
Collection<ExecutorDetails> executors1 = assignment1.getExecutors();
Assert.assertEquals(1, assignedSlots1.size());
Assert.assertEquals(1, nodesIDs1.size());
Assert.assertEquals(7, executors1.size());
Assert.assertEquals("Running - Fully Scheduled by DefaultResourceAwareStrategy", cluster.getStatusMap().get(topology1.getId()));
SchedulerAssignment assignment2 = cluster.getAssignmentById(topology2.getId());
Set<WorkerSlot> assignedSlots2 = assignment2.getSlots();
Set<String> nodesIDs2 = new HashSet<>();
for (WorkerSlot slot : assignedSlots2) {
nodesIDs2.add(slot.getNodeId());
}
Collection<ExecutorDetails> executors2 = assignment2.getExecutors();
Assert.assertEquals(1, assignedSlots2.size());
Assert.assertEquals(1, nodesIDs2.size());
Assert.assertEquals(2, executors2.size());
Assert.assertEquals("Running - Fully Scheduled by DefaultResourceAwareStrategy", cluster.getStatusMap().get(topology2.getId()));
}
use of org.apache.storm.topology.TopologyBuilder in project storm by apache.
the class MessagingTest method testLocalTransport.
@Test
public void testLocalTransport() throws Exception {
Config stormConf = new Config();
stormConf.put(Config.TOPOLOGY_WORKERS, 2);
stormConf.put(Config.STORM_MESSAGING_TRANSPORT, "org.apache.storm.messaging.netty.Context");
try (ILocalCluster cluster = new LocalCluster.Builder().withSimulatedTime().withSupervisors(1).withPortsPerSupervisor(2).withDaemonConf(stormConf).build()) {
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("1", new TestWordSpout(true), 2);
builder.setBolt("2", new TestGlobalCount(), 6).shuffleGrouping("1");
StormTopology stormTopology = builder.createTopology();
List<FixedTuple> fixedTuples = new ArrayList<>();
for (int i = 0; i < 12; i++) {
fixedTuples.add(new FixedTuple(Collections.singletonList("a")));
fixedTuples.add(new FixedTuple(Collections.singletonList("b")));
}
Map<String, List<FixedTuple>> data = new HashMap<>();
data.put("1", fixedTuples);
MockedSources mockedSources = new MockedSources(data);
CompleteTopologyParam completeTopologyParam = new CompleteTopologyParam();
completeTopologyParam.setMockedSources(mockedSources);
Map<String, List<FixedTuple>> results = Testing.completeTopology(cluster, stormTopology, completeTopologyParam);
Assert.assertEquals(6 * 4, Testing.readTuples(results, "2").size());
}
}
use of org.apache.storm.topology.TopologyBuilder in project storm by apache.
the class ConstSpoutIdBoltNullBoltTopo method getTopology.
public static StormTopology getTopology(Map conf) {
// 1 - Setup Spout --------
ConstSpout spout = new ConstSpout("some data").withOutputFields("str");
// 2 - Setup IdBolt & DevNullBolt --------
IdBolt bolt1 = new IdBolt();
DevNullBolt bolt2 = new DevNullBolt();
// 3 - Setup Topology --------
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout(SPOUT_ID, spout, Helper.getInt(conf, SPOUT_COUNT, 1));
builder.setBolt(BOLT1_ID, bolt1, Helper.getInt(conf, BOLT1_COUNT, 1)).localOrShuffleGrouping(SPOUT_ID);
builder.setBolt(BOLT2_ID, bolt2, Helper.getInt(conf, BOLT2_COUNT, 1)).localOrShuffleGrouping(BOLT1_ID);
return builder.createTopology();
}
use of org.apache.storm.topology.TopologyBuilder in project storm by apache.
the class HiveTopology method main.
public static void main(String[] args) throws Exception {
String metaStoreURI = args[0];
String dbName = args[1];
String tblName = args[2];
String[] colNames = { "id", "name", "phone", "street", "city", "state" };
Config config = new Config();
config.setNumWorkers(1);
UserDataSpout spout = new UserDataSpout();
DelimitedRecordHiveMapper mapper = new DelimitedRecordHiveMapper().withColumnFields(new Fields(colNames));
HiveOptions hiveOptions;
if (args.length == 6) {
hiveOptions = new HiveOptions(metaStoreURI, dbName, tblName, mapper).withTxnsPerBatch(10).withBatchSize(100).withIdleTimeout(10).withKerberosKeytab(args[4]).withKerberosPrincipal(args[5]);
} else {
hiveOptions = new HiveOptions(metaStoreURI, dbName, tblName, mapper).withTxnsPerBatch(10).withBatchSize(100).withIdleTimeout(10).withMaxOpenConnections(1);
}
HiveBolt hiveBolt = new HiveBolt(hiveOptions);
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout(USER_SPOUT_ID, spout, 1);
// SentenceSpout --> MyBolt
builder.setBolt(BOLT_ID, hiveBolt, 1).shuffleGrouping(USER_SPOUT_ID);
if (args.length == 3) {
try (LocalCluster cluster = new LocalCluster();
LocalTopology topo = cluster.submitTopology(TOPOLOGY_NAME, config, builder.createTopology())) {
waitForSeconds(20);
}
System.exit(0);
} else if (args.length >= 4) {
StormSubmitter.submitTopology(args[3], config, builder.createTopology());
} else {
System.out.println("Usage: HiveTopology metastoreURI dbName tableName [topologyNamey] [keytab file] [principal name]");
}
}
use of org.apache.storm.topology.TopologyBuilder in project storm by apache.
the class HiveTopologyPartitioned method main.
public static void main(String[] args) throws Exception {
String metaStoreURI = args[0];
String dbName = args[1];
String tblName = args[2];
String[] partNames = { "city", "state" };
String[] colNames = { "id", "name", "phone", "street" };
Config config = new Config();
config.setNumWorkers(1);
UserDataSpout spout = new UserDataSpout();
DelimitedRecordHiveMapper mapper = new DelimitedRecordHiveMapper().withColumnFields(new Fields(colNames)).withPartitionFields(new Fields(partNames));
HiveOptions hiveOptions;
if (args.length == 6) {
hiveOptions = new HiveOptions(metaStoreURI, dbName, tblName, mapper).withTxnsPerBatch(10).withBatchSize(1000).withIdleTimeout(10).withKerberosKeytab(args[4]).withKerberosPrincipal(args[5]);
} else {
hiveOptions = new HiveOptions(metaStoreURI, dbName, tblName, mapper).withTxnsPerBatch(10).withBatchSize(1000).withIdleTimeout(10);
}
HiveBolt hiveBolt = new HiveBolt(hiveOptions);
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout(USER_SPOUT_ID, spout, 1);
// SentenceSpout --> MyBolt
builder.setBolt(BOLT_ID, hiveBolt, 1).shuffleGrouping(USER_SPOUT_ID);
if (args.length == 3) {
try (LocalCluster cluster = new LocalCluster();
LocalTopology topo = cluster.submitTopology(TOPOLOGY_NAME, config, builder.createTopology())) {
waitForSeconds(20);
}
System.exit(0);
} else if (args.length >= 4) {
StormSubmitter.submitTopology(args[3], config, builder.createTopology());
} else {
System.out.println("Usage: HiveTopologyPartitioned metastoreURI dbName tableName [topologyNamey] [keytab file] [principal name]");
}
}
Aggregations