use of org.apache.storm.generated.StormTopology in project storm by apache.
the class TestResourceAwareScheduler method TestMultipleSpoutsAndCyclicTopologies.
/**
* Test multiple spouts and cyclic topologies
*/
@Test
public void TestMultipleSpoutsAndCyclicTopologies() {
TopologyBuilder builder = new TopologyBuilder();
SpoutDeclarer s1 = builder.setSpout("spout-1", new TestUtilsForResourceAwareScheduler.TestSpout(), 5);
SpoutDeclarer s2 = builder.setSpout("spout-2", new TestUtilsForResourceAwareScheduler.TestSpout(), 5);
BoltDeclarer b1 = builder.setBolt("bolt-1", new TestUtilsForResourceAwareScheduler.TestBolt(), 5).shuffleGrouping("spout-1").shuffleGrouping("bolt-3");
BoltDeclarer b2 = builder.setBolt("bolt-2", new TestUtilsForResourceAwareScheduler.TestBolt(), 5).shuffleGrouping("bolt-1");
BoltDeclarer b3 = builder.setBolt("bolt-3", new TestUtilsForResourceAwareScheduler.TestBolt(), 5).shuffleGrouping("bolt-2").shuffleGrouping("spout-2");
INimbus iNimbus = new TestUtilsForResourceAwareScheduler.INimbusTest();
Map<String, Number> resourceMap = new HashMap<String, Number>();
resourceMap.put(Config.SUPERVISOR_CPU_CAPACITY, 100.0);
resourceMap.put(Config.SUPERVISOR_MEMORY_CAPACITY_MB, 1000.0);
Map<String, SupervisorDetails> supMap = TestUtilsForResourceAwareScheduler.genSupervisors(25, 1, resourceMap);
Config config = new Config();
config.putAll(Utils.readDefaultConfig());
config.put(Config.RESOURCE_AWARE_SCHEDULER_EVICTION_STRATEGY, org.apache.storm.scheduler.resource.strategies.eviction.DefaultEvictionStrategy.class.getName());
config.put(Config.RESOURCE_AWARE_SCHEDULER_PRIORITY_STRATEGY, org.apache.storm.scheduler.resource.strategies.priority.DefaultSchedulingPriorityStrategy.class.getName());
config.put(Config.TOPOLOGY_SCHEDULER_STRATEGY, org.apache.storm.scheduler.resource.strategies.scheduling.DefaultResourceAwareStrategy.class.getName());
config.put(Config.TOPOLOGY_COMPONENT_CPU_PCORE_PERCENT, 100.0);
config.put(Config.TOPOLOGY_COMPONENT_RESOURCES_OFFHEAP_MEMORY_MB, 500);
config.put(Config.TOPOLOGY_COMPONENT_RESOURCES_ONHEAP_MEMORY_MB, 500);
config.put(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB, Double.MAX_VALUE);
StormTopology stormTopology = builder.createTopology();
TopologyDetails topo = new TopologyDetails("topo-1", config, stormTopology, 0, genExecsAndComps(stormTopology), 0);
Cluster cluster = new Cluster(iNimbus, supMap, new HashMap<String, SchedulerAssignmentImpl>(), config);
config.put(Config.TOPOLOGY_SUBMITTER_USER, "jerry");
Map<String, TopologyDetails> topoMap = new HashMap<String, TopologyDetails>();
topoMap.put(topo.getId(), topo);
Topologies topologies = new Topologies(topoMap);
ResourceAwareScheduler rs = new ResourceAwareScheduler();
rs.prepare(config);
rs.schedule(topologies, cluster);
Assert.assertTrue("Topo scheduled?", cluster.getAssignmentById(topo.getId()) != null);
Assert.assertEquals("Topo all executors scheduled?", 25, cluster.getAssignmentById(topo.getId()).getExecutorToSlot().size());
}
use of org.apache.storm.generated.StormTopology in project storm by apache.
the class TestResourceAwareScheduler method testResourceLimitation.
@Test
public void testResourceLimitation() {
INimbus iNimbus = new TestUtilsForResourceAwareScheduler.INimbusTest();
Map<String, Number> resourceMap = new HashMap<>();
resourceMap.put(Config.SUPERVISOR_CPU_CAPACITY, 400.0);
resourceMap.put(Config.SUPERVISOR_MEMORY_CAPACITY_MB, 2000.0);
Map<String, SupervisorDetails> supMap = TestUtilsForResourceAwareScheduler.genSupervisors(2, 2, resourceMap);
// a topology with multiple spouts
TopologyBuilder builder1 = new TopologyBuilder();
builder1.setSpout("wordSpout", new TestWordSpout(), 2).setCPULoad(250.0).setMemoryLoad(1000.0, 200.0);
builder1.setBolt("wordCountBolt", new TestWordCounter(), 1).shuffleGrouping("wordSpout").setCPULoad(100.0).setMemoryLoad(500.0, 100.0);
StormTopology stormTopology1 = builder1.createTopology();
Config config = new Config();
config.putAll(defaultTopologyConf);
Map<ExecutorDetails, String> executorMap1 = TestUtilsForResourceAwareScheduler.genExecsAndComps(stormTopology1);
TopologyDetails topology1 = new TopologyDetails("topology1", config, stormTopology1, 2, executorMap1, 0);
Cluster cluster = new Cluster(iNimbus, supMap, new HashMap<String, SchedulerAssignmentImpl>(), config);
ResourceAwareScheduler rs = new ResourceAwareScheduler();
Map<String, TopologyDetails> topoMap = new HashMap<>();
topoMap.put(topology1.getId(), topology1);
Topologies topologies = new Topologies(topoMap);
rs.prepare(config);
rs.schedule(topologies, cluster);
SchedulerAssignment assignment1 = cluster.getAssignmentById(topology1.getId());
Set<WorkerSlot> assignedSlots1 = assignment1.getSlots();
Set<String> nodesIDs1 = new HashSet<>();
for (WorkerSlot slot : assignedSlots1) {
nodesIDs1.add(slot.getNodeId());
}
Collection<ExecutorDetails> executors1 = assignment1.getExecutors();
List<Double> assignedExecutorMemory = new ArrayList<>();
List<Double> assignedExecutorCpu = new ArrayList<>();
for (ExecutorDetails executor : executors1) {
assignedExecutorMemory.add(topology1.getTotalMemReqTask(executor));
assignedExecutorCpu.add(topology1.getTotalCpuReqTask(executor));
}
Collections.sort(assignedExecutorCpu);
Collections.sort(assignedExecutorMemory);
Map<ExecutorDetails, SupervisorDetails> executorToSupervisor = new HashMap<>();
Map<SupervisorDetails, List<ExecutorDetails>> supervisorToExecutors = new HashMap<>();
Map<Double, Double> cpuAvailableToUsed = new HashMap();
Map<Double, Double> memoryAvailableToUsed = new HashMap();
for (Map.Entry<ExecutorDetails, WorkerSlot> entry : assignment1.getExecutorToSlot().entrySet()) {
executorToSupervisor.put(entry.getKey(), cluster.getSupervisorById(entry.getValue().getNodeId()));
}
for (Map.Entry<ExecutorDetails, SupervisorDetails> entry : executorToSupervisor.entrySet()) {
List<ExecutorDetails> executorsOnSupervisor = supervisorToExecutors.get(entry.getValue());
if (executorsOnSupervisor == null) {
executorsOnSupervisor = new ArrayList<>();
supervisorToExecutors.put(entry.getValue(), executorsOnSupervisor);
}
executorsOnSupervisor.add(entry.getKey());
}
for (Map.Entry<SupervisorDetails, List<ExecutorDetails>> entry : supervisorToExecutors.entrySet()) {
Double supervisorTotalCpu = entry.getKey().getTotalCPU();
Double supervisorTotalMemory = entry.getKey().getTotalMemory();
Double supervisorUsedCpu = 0.0;
Double supervisorUsedMemory = 0.0;
for (ExecutorDetails executor : entry.getValue()) {
supervisorUsedMemory += topology1.getTotalCpuReqTask(executor);
supervisorTotalCpu += topology1.getTotalMemReqTask(executor);
}
cpuAvailableToUsed.put(supervisorTotalCpu, supervisorUsedCpu);
memoryAvailableToUsed.put(supervisorTotalMemory, supervisorUsedMemory);
}
// executor0 resides one one worker (on one), executor1 and executor2 on another worker (on the other node)
Assert.assertEquals(2, assignedSlots1.size());
Assert.assertEquals(2, nodesIDs1.size());
Assert.assertEquals(3, executors1.size());
Assert.assertEquals(100.0, assignedExecutorCpu.get(0), 0.001);
Assert.assertEquals(250.0, assignedExecutorCpu.get(1), 0.001);
Assert.assertEquals(250.0, assignedExecutorCpu.get(2), 0.001);
Assert.assertEquals(600.0, assignedExecutorMemory.get(0), 0.001);
Assert.assertEquals(1200.0, assignedExecutorMemory.get(1), 0.001);
Assert.assertEquals(1200.0, assignedExecutorMemory.get(2), 0.001);
for (Map.Entry<Double, Double> entry : memoryAvailableToUsed.entrySet()) {
Assert.assertTrue(entry.getKey() - entry.getValue() >= 0);
}
for (Map.Entry<Double, Double> entry : cpuAvailableToUsed.entrySet()) {
Assert.assertTrue(entry.getKey() - entry.getValue() >= 0);
}
Assert.assertEquals("Running - Fully Scheduled by DefaultResourceAwareStrategy", cluster.getStatusMap().get(topology1.getId()));
}
use of org.apache.storm.generated.StormTopology in project storm by apache.
the class TestResourceAwareScheduler method testTopologyWithMultipleSpouts.
@Test
public void testTopologyWithMultipleSpouts() {
INimbus iNimbus = new TestUtilsForResourceAwareScheduler.INimbusTest();
Map<String, Number> resourceMap = new HashMap<>();
resourceMap.put(Config.SUPERVISOR_CPU_CAPACITY, 400.0);
resourceMap.put(Config.SUPERVISOR_MEMORY_CAPACITY_MB, 2000.0);
Map<String, SupervisorDetails> supMap = TestUtilsForResourceAwareScheduler.genSupervisors(2, 4, resourceMap);
// a topology with multiple spouts
TopologyBuilder builder1 = new TopologyBuilder();
builder1.setSpout("wordSpout1", new TestWordSpout(), 1);
builder1.setSpout("wordSpout2", new TestWordSpout(), 1);
builder1.setBolt("wordCountBolt1", new TestWordCounter(), 1).shuffleGrouping("wordSpout1").shuffleGrouping("wordSpout2");
builder1.setBolt("wordCountBolt2", new TestWordCounter(), 1).shuffleGrouping("wordCountBolt1");
builder1.setBolt("wordCountBolt3", new TestWordCounter(), 1).shuffleGrouping("wordCountBolt1");
builder1.setBolt("wordCountBolt4", new TestWordCounter(), 1).shuffleGrouping("wordCountBolt2");
builder1.setBolt("wordCountBolt5", new TestWordCounter(), 1).shuffleGrouping("wordSpout2");
StormTopology stormTopology1 = builder1.createTopology();
Config config = new Config();
config.putAll(defaultTopologyConf);
Map<ExecutorDetails, String> executorMap1 = TestUtilsForResourceAwareScheduler.genExecsAndComps(stormTopology1);
TopologyDetails topology1 = new TopologyDetails("topology1", config, stormTopology1, 0, executorMap1, 0);
// a topology with two unconnected partitions
TopologyBuilder builder2 = new TopologyBuilder();
builder2.setSpout("wordSpoutX", new TestWordSpout(), 1);
builder2.setSpout("wordSpoutY", new TestWordSpout(), 1);
StormTopology stormTopology2 = builder2.createTopology();
Map<ExecutorDetails, String> executorMap2 = TestUtilsForResourceAwareScheduler.genExecsAndComps(stormTopology2);
TopologyDetails topology2 = new TopologyDetails("topology2", config, stormTopology2, 0, executorMap2, 0);
Cluster cluster = new Cluster(iNimbus, supMap, new HashMap<String, SchedulerAssignmentImpl>(), config);
ResourceAwareScheduler rs = new ResourceAwareScheduler();
Map<String, TopologyDetails> topoMap = new HashMap<>();
topoMap.put(topology1.getId(), topology1);
topoMap.put(topology2.getId(), topology2);
Topologies topologies = new Topologies(topoMap);
rs.prepare(config);
rs.schedule(topologies, cluster);
SchedulerAssignment assignment1 = cluster.getAssignmentById(topology1.getId());
Set<WorkerSlot> assignedSlots1 = assignment1.getSlots();
Set<String> nodesIDs1 = new HashSet<>();
for (WorkerSlot slot : assignedSlots1) {
nodesIDs1.add(slot.getNodeId());
}
Collection<ExecutorDetails> executors1 = assignment1.getExecutors();
Assert.assertEquals(1, assignedSlots1.size());
Assert.assertEquals(1, nodesIDs1.size());
Assert.assertEquals(7, executors1.size());
Assert.assertEquals("Running - Fully Scheduled by DefaultResourceAwareStrategy", cluster.getStatusMap().get(topology1.getId()));
SchedulerAssignment assignment2 = cluster.getAssignmentById(topology2.getId());
Set<WorkerSlot> assignedSlots2 = assignment2.getSlots();
Set<String> nodesIDs2 = new HashSet<>();
for (WorkerSlot slot : assignedSlots2) {
nodesIDs2.add(slot.getNodeId());
}
Collection<ExecutorDetails> executors2 = assignment2.getExecutors();
Assert.assertEquals(1, assignedSlots2.size());
Assert.assertEquals(1, nodesIDs2.size());
Assert.assertEquals(2, executors2.size());
Assert.assertEquals("Running - Fully Scheduled by DefaultResourceAwareStrategy", cluster.getStatusMap().get(topology2.getId()));
}
use of org.apache.storm.generated.StormTopology in project storm by apache.
the class TestUtilsForResourceAwareScheduler method getTopology.
public static TopologyDetails getTopology(String name, Map config, int numSpout, int numBolt, int spoutParallelism, int boltParallelism, int launchTime, int priority) {
Config conf = new Config();
conf.putAll(config);
conf.put(Config.TOPOLOGY_PRIORITY, priority);
conf.put(Config.TOPOLOGY_NAME, name);
conf.put(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB, Double.MAX_VALUE);
StormTopology topology = buildTopology(numSpout, numBolt, spoutParallelism, boltParallelism);
TopologyDetails topo = new TopologyDetails(name + "-" + launchTime, conf, topology, 0, genExecsAndComps(topology), launchTime);
return topo;
}
use of org.apache.storm.generated.StormTopology in project storm by apache.
the class BasicContainerTest method testLaunch.
@Test
public void testLaunch() throws Exception {
final String topoId = "test_topology";
final int port = 8080;
final String stormHome = ContainerTest.asAbsPath("tmp", "storm-home");
final String stormLogDir = ContainerTest.asFile(".", "target").getCanonicalPath();
final String workerId = "worker-id";
final String stormLocal = ContainerTest.asAbsPath("tmp", "storm-local");
final String distRoot = ContainerTest.asAbsPath(stormLocal, "supervisor", "stormdist", topoId);
final File stormcode = new File(distRoot, "stormcode.ser");
final File stormjar = new File(distRoot, "stormjar.jar");
final String log4jdir = ContainerTest.asAbsPath(stormHome, "conf");
final String workerConf = ContainerTest.asAbsPath(log4jdir, "worker.xml");
final String workerRoot = ContainerTest.asAbsPath(stormLocal, "workers", workerId);
final String workerTmpDir = ContainerTest.asAbsPath(workerRoot, "tmp");
final StormTopology st = new StormTopology();
st.set_spouts(new HashMap<>());
st.set_bolts(new HashMap<>());
st.set_state_spouts(new HashMap<>());
byte[] serializedState = Utils.gzip(Utils.thriftSerialize(st));
final Map<String, Object> superConf = new HashMap<>();
superConf.put(Config.STORM_LOCAL_DIR, stormLocal);
superConf.put(Config.STORM_WORKERS_ARTIFACTS_DIR, stormLocal);
superConf.put(Config.STORM_LOG4J2_CONF_DIR, log4jdir);
superConf.put(Config.WORKER_CHILDOPTS, " -Dtesting=true");
LocalAssignment la = new LocalAssignment();
la.set_topology_id(topoId);
AdvancedFSOps ops = mock(AdvancedFSOps.class);
when(ops.doRequiredTopoFilesExist(superConf, topoId)).thenReturn(true);
when(ops.slurp(stormcode)).thenReturn(serializedState);
LocalState ls = mock(LocalState.class);
checkpoint(() -> {
MockBasicContainer mc = new MockBasicContainer(ContainerType.LAUNCH, superConf, "SUPERVISOR", port, la, null, ls, workerId, new HashMap<>(), ops, "profile");
mc.launch();
assertEquals(1, mc.workerCmds.size());
CommandRun cmd = mc.workerCmds.get(0);
mc.workerCmds.clear();
assertListEquals(Arrays.asList("java", "-cp", "FRAMEWORK_CP:" + stormjar.getAbsolutePath(), "-Dlogging.sensitivity=S3", "-Dlogfile.name=worker.log", "-Dstorm.home=" + stormHome, "-Dworkers.artifacts=" + stormLocal, "-Dstorm.id=" + topoId, "-Dworker.id=" + workerId, "-Dworker.port=" + port, "-Dstorm.log.dir=" + stormLogDir, "-Dlog4j.configurationFile=" + workerConf, "-DLog4jContextSelector=org.apache.logging.log4j.core.selector.BasicContextSelector", "-Dstorm.local.dir=" + stormLocal, "org.apache.storm.LogWriter", "java", "-server", "-Dlogging.sensitivity=S3", "-Dlogfile.name=worker.log", "-Dstorm.home=" + stormHome, "-Dworkers.artifacts=" + stormLocal, "-Dstorm.id=" + topoId, "-Dworker.id=" + workerId, "-Dworker.port=" + port, "-Dstorm.log.dir=" + stormLogDir, "-Dlog4j.configurationFile=" + workerConf, "-DLog4jContextSelector=org.apache.logging.log4j.core.selector.BasicContextSelector", "-Dstorm.local.dir=" + stormLocal, "-Dtesting=true", "-Djava.library.path=JLP", "-Dstorm.conf.file=", "-Dstorm.options=", "-Djava.io.tmpdir=" + workerTmpDir, "-cp", "FRAMEWORK_CP:" + stormjar.getAbsolutePath(), "org.apache.storm.daemon.worker.Worker", topoId, "SUPERVISOR", String.valueOf(port), workerId), cmd.cmd);
assertEquals(new File(workerRoot), cmd.pwd);
}, "storm.home", stormHome, "storm.log.dir", stormLogDir);
}
Aggregations