use of org.apache.storm.scheduler.SchedulerAssignmentImpl in project storm by apache.
the class TestResourceAwareScheduler method testScheduleResilience.
@Test
public void testScheduleResilience() {
INimbus iNimbus = new TestUtilsForResourceAwareScheduler.INimbusTest();
Map<String, Number> resourceMap = new HashMap<>();
resourceMap.put(Config.SUPERVISOR_CPU_CAPACITY, 400.0);
resourceMap.put(Config.SUPERVISOR_MEMORY_CAPACITY_MB, 2000.0);
Map<String, SupervisorDetails> supMap = TestUtilsForResourceAwareScheduler.genSupervisors(2, 2, resourceMap);
TopologyBuilder builder1 = new TopologyBuilder();
builder1.setSpout("wordSpout1", new TestWordSpout(), 3);
StormTopology stormTopology1 = builder1.createTopology();
Config config1 = new Config();
config1.putAll(defaultTopologyConf);
Map<ExecutorDetails, String> executorMap1 = TestUtilsForResourceAwareScheduler.genExecsAndComps(stormTopology1);
TopologyDetails topology1 = new TopologyDetails("topology1", config1, stormTopology1, 3, executorMap1, 0);
TopologyBuilder builder2 = new TopologyBuilder();
builder2.setSpout("wordSpout2", new TestWordSpout(), 2);
StormTopology stormTopology2 = builder2.createTopology();
Config config2 = new Config();
config2.putAll(defaultTopologyConf);
// memory requirement is large enough so that two executors can not be fully assigned to one node
config2.put(Config.TOPOLOGY_COMPONENT_RESOURCES_ONHEAP_MEMORY_MB, 1280.0);
Map<ExecutorDetails, String> executorMap2 = TestUtilsForResourceAwareScheduler.genExecsAndComps(stormTopology2);
TopologyDetails topology2 = new TopologyDetails("topology2", config2, stormTopology2, 2, executorMap2, 0);
// Test1: When a worker fails, RAS does not alter existing assignments on healthy workers
Cluster cluster = new Cluster(iNimbus, supMap, new HashMap<String, SchedulerAssignmentImpl>(), config1);
ResourceAwareScheduler rs = new ResourceAwareScheduler();
Map<String, TopologyDetails> topoMap = new HashMap<>();
topoMap.put(topology2.getId(), topology2);
Topologies topologies = new Topologies(topoMap);
rs.prepare(config1);
rs.schedule(topologies, cluster);
SchedulerAssignmentImpl assignment = (SchedulerAssignmentImpl) cluster.getAssignmentById(topology2.getId());
// pick a worker to mock as failed
WorkerSlot failedWorker = new ArrayList<WorkerSlot>(assignment.getSlots()).get(0);
Map<ExecutorDetails, WorkerSlot> executorToSlot = assignment.getExecutorToSlot();
List<ExecutorDetails> failedExecutors = new ArrayList<>();
for (Map.Entry<ExecutorDetails, WorkerSlot> entry : executorToSlot.entrySet()) {
if (entry.getValue().equals(failedWorker)) {
failedExecutors.add(entry.getKey());
}
}
for (ExecutorDetails executor : failedExecutors) {
// remove executor details assigned to the failed worker
executorToSlot.remove(executor);
}
Map<ExecutorDetails, WorkerSlot> copyOfOldMapping = new HashMap<>(executorToSlot);
Set<ExecutorDetails> healthyExecutors = copyOfOldMapping.keySet();
rs.schedule(topologies, cluster);
SchedulerAssignment newAssignment = cluster.getAssignmentById(topology2.getId());
Map<ExecutorDetails, WorkerSlot> newExecutorToSlot = newAssignment.getExecutorToSlot();
for (ExecutorDetails executor : healthyExecutors) {
Assert.assertEquals(copyOfOldMapping.get(executor), newExecutorToSlot.get(executor));
}
Assert.assertEquals("Running - Fully Scheduled by DefaultResourceAwareStrategy", cluster.getStatusMap().get(topology2.getId()));
// end of Test1
// Test2: When a supervisor fails, RAS does not alter existing assignments
executorToSlot = new HashMap<>();
executorToSlot.put(new ExecutorDetails(0, 0), new WorkerSlot("sup-0", 0));
executorToSlot.put(new ExecutorDetails(1, 1), new WorkerSlot("sup-0", 1));
executorToSlot.put(new ExecutorDetails(2, 2), new WorkerSlot("sup-1", 1));
Map<String, SchedulerAssignmentImpl> existingAssignments = new HashMap<>();
assignment = new SchedulerAssignmentImpl(topology1.getId(), executorToSlot);
existingAssignments.put(topology1.getId(), assignment);
copyOfOldMapping = new HashMap<>(executorToSlot);
Set<ExecutorDetails> existingExecutors = copyOfOldMapping.keySet();
Map<String, SupervisorDetails> supMap1 = new HashMap<>(supMap);
// mock the supervisor sup-0 as a failed supervisor
supMap1.remove("sup-0");
Cluster cluster1 = new Cluster(iNimbus, supMap1, existingAssignments, config1);
topoMap = new HashMap<>();
topoMap.put(topology1.getId(), topology1);
topologies = new Topologies(topoMap);
rs.schedule(topologies, cluster1);
newAssignment = cluster1.getAssignmentById(topology1.getId());
newExecutorToSlot = newAssignment.getExecutorToSlot();
for (ExecutorDetails executor : existingExecutors) {
Assert.assertEquals(copyOfOldMapping.get(executor), newExecutorToSlot.get(executor));
}
Assert.assertEquals("Fully Scheduled", cluster1.getStatusMap().get(topology1.getId()));
// end of Test2
// Test3: When a supervisor and a worker on it fails, RAS does not alter existing assignments
executorToSlot = new HashMap<>();
// the worker to orphan
executorToSlot.put(new ExecutorDetails(0, 0), new WorkerSlot("sup-0", 1));
// the worker that fails
executorToSlot.put(new ExecutorDetails(1, 1), new WorkerSlot("sup-0", 2));
// the healthy worker
executorToSlot.put(new ExecutorDetails(2, 2), new WorkerSlot("sup-1", 1));
existingAssignments = new HashMap<>();
assignment = new SchedulerAssignmentImpl(topology1.getId(), executorToSlot);
existingAssignments.put(topology1.getId(), assignment);
// delete one worker of sup-0 (failed) from topo1 assignment to enable actual schedule for testing
executorToSlot.remove(new ExecutorDetails(1, 1));
copyOfOldMapping = new HashMap<>(executorToSlot);
// namely the two eds on the orphaned worker and the healthy worker
existingExecutors = copyOfOldMapping.keySet();
supMap1 = new HashMap<>(supMap);
// mock the supervisor sup-0 as a failed supervisor
supMap1.remove("sup-0");
cluster1 = new Cluster(iNimbus, supMap1, existingAssignments, config1);
topoMap = new HashMap<>();
topoMap.put(topology1.getId(), topology1);
topologies = new Topologies(topoMap);
rs.schedule(topologies, cluster1);
newAssignment = cluster1.getAssignmentById(topology1.getId());
newExecutorToSlot = newAssignment.getExecutorToSlot();
for (ExecutorDetails executor : existingExecutors) {
Assert.assertEquals(copyOfOldMapping.get(executor), newExecutorToSlot.get(executor));
}
Assert.assertEquals("Fully Scheduled", cluster1.getStatusMap().get(topology1.getId()));
// end of Test3
// Test4: Scheduling a new topology does not disturb other assignments unnecessarily
cluster1 = new Cluster(iNimbus, supMap, new HashMap<String, SchedulerAssignmentImpl>(), config1);
topoMap = new HashMap<>();
topoMap.put(topology1.getId(), topology1);
topologies = new Topologies(topoMap);
rs.schedule(topologies, cluster1);
assignment = (SchedulerAssignmentImpl) cluster1.getAssignmentById(topology1.getId());
executorToSlot = assignment.getExecutorToSlot();
copyOfOldMapping = new HashMap<>(executorToSlot);
topoMap.put(topology2.getId(), topology2);
topologies = new Topologies(topoMap);
rs.schedule(topologies, cluster1);
newAssignment = (SchedulerAssignmentImpl) cluster1.getAssignmentById(topology1.getId());
newExecutorToSlot = newAssignment.getExecutorToSlot();
for (ExecutorDetails executor : copyOfOldMapping.keySet()) {
Assert.assertEquals(copyOfOldMapping.get(executor), newExecutorToSlot.get(executor));
}
Assert.assertEquals("Running - Fully Scheduled by DefaultResourceAwareStrategy", cluster1.getStatusMap().get(topology1.getId()));
Assert.assertEquals("Running - Fully Scheduled by DefaultResourceAwareStrategy", cluster1.getStatusMap().get(topology2.getId()));
}
use of org.apache.storm.scheduler.SchedulerAssignmentImpl in project storm by apache.
the class TestResourceAwareScheduler method testHeterogeneousCluster.
@Test
public void testHeterogeneousCluster() {
INimbus iNimbus = new TestUtilsForResourceAwareScheduler.INimbusTest();
// strong supervisor node
Map<String, Number> resourceMap1 = new HashMap<>();
resourceMap1.put(Config.SUPERVISOR_CPU_CAPACITY, 800.0);
resourceMap1.put(Config.SUPERVISOR_MEMORY_CAPACITY_MB, 4096.0);
// weak supervisor node
Map<String, Number> resourceMap2 = new HashMap<>();
resourceMap2.put(Config.SUPERVISOR_CPU_CAPACITY, 200.0);
resourceMap2.put(Config.SUPERVISOR_MEMORY_CAPACITY_MB, 1024.0);
Map<String, SupervisorDetails> supMap = new HashMap<String, SupervisorDetails>();
for (int i = 0; i < 2; i++) {
List<Number> ports = new LinkedList<Number>();
for (int j = 0; j < 4; j++) {
ports.add(j);
}
SupervisorDetails sup = new SupervisorDetails("sup-" + i, "host-" + i, null, ports, (Map) (i == 0 ? resourceMap1 : resourceMap2));
supMap.put(sup.getId(), sup);
}
// topo1 has one single huge task that can not be handled by the small-super
TopologyBuilder builder1 = new TopologyBuilder();
builder1.setSpout("wordSpout1", new TestWordSpout(), 1).setCPULoad(300.0).setMemoryLoad(2000.0, 48.0);
StormTopology stormTopology1 = builder1.createTopology();
Config config1 = new Config();
config1.putAll(defaultTopologyConf);
Map<ExecutorDetails, String> executorMap1 = TestUtilsForResourceAwareScheduler.genExecsAndComps(stormTopology1);
TopologyDetails topology1 = new TopologyDetails("topology1", config1, stormTopology1, 1, executorMap1, 0);
// topo2 has 4 large tasks
TopologyBuilder builder2 = new TopologyBuilder();
builder2.setSpout("wordSpout2", new TestWordSpout(), 4).setCPULoad(100.0).setMemoryLoad(500.0, 12.0);
StormTopology stormTopology2 = builder2.createTopology();
Config config2 = new Config();
config2.putAll(defaultTopologyConf);
Map<ExecutorDetails, String> executorMap2 = TestUtilsForResourceAwareScheduler.genExecsAndComps(stormTopology2);
TopologyDetails topology2 = new TopologyDetails("topology2", config2, stormTopology2, 1, executorMap2, 0);
// topo3 has 4 large tasks
TopologyBuilder builder3 = new TopologyBuilder();
builder3.setSpout("wordSpout3", new TestWordSpout(), 4).setCPULoad(20.0).setMemoryLoad(200.0, 56.0);
StormTopology stormTopology3 = builder3.createTopology();
Config config3 = new Config();
config3.putAll(defaultTopologyConf);
Map<ExecutorDetails, String> executorMap3 = TestUtilsForResourceAwareScheduler.genExecsAndComps(stormTopology3);
TopologyDetails topology3 = new TopologyDetails("topology3", config2, stormTopology3, 1, executorMap3, 0);
// topo4 has 12 small tasks, whose mem usage does not exactly divide a node's mem capacity
TopologyBuilder builder4 = new TopologyBuilder();
builder4.setSpout("wordSpout4", new TestWordSpout(), 12).setCPULoad(30.0).setMemoryLoad(100.0, 0.0);
StormTopology stormTopology4 = builder4.createTopology();
Config config4 = new Config();
config4.putAll(defaultTopologyConf);
Map<ExecutorDetails, String> executorMap4 = TestUtilsForResourceAwareScheduler.genExecsAndComps(stormTopology4);
TopologyDetails topology4 = new TopologyDetails("topology4", config4, stormTopology4, 1, executorMap4, 0);
// topo5 has 40 small tasks, it should be able to exactly use up both the cpu and mem in the cluster
TopologyBuilder builder5 = new TopologyBuilder();
builder5.setSpout("wordSpout5", new TestWordSpout(), 40).setCPULoad(25.0).setMemoryLoad(100.0, 28.0);
StormTopology stormTopology5 = builder5.createTopology();
Config config5 = new Config();
config5.putAll(defaultTopologyConf);
Map<ExecutorDetails, String> executorMap5 = TestUtilsForResourceAwareScheduler.genExecsAndComps(stormTopology5);
TopologyDetails topology5 = new TopologyDetails("topology5", config5, stormTopology5, 1, executorMap5, 0);
// Test1: Launch topo 1-3 together, it should be able to use up either mem or cpu resource due to exact division
Cluster cluster = new Cluster(iNimbus, supMap, new HashMap<String, SchedulerAssignmentImpl>(), config1);
ResourceAwareScheduler rs = new ResourceAwareScheduler();
Map<String, TopologyDetails> topoMap = new HashMap<>();
topoMap.put(topology1.getId(), topology1);
topoMap.put(topology2.getId(), topology2);
topoMap.put(topology3.getId(), topology3);
Topologies topologies = new Topologies(topoMap);
rs.prepare(config1);
rs.schedule(topologies, cluster);
Assert.assertEquals("Running - Fully Scheduled by DefaultResourceAwareStrategy", cluster.getStatusMap().get(topology1.getId()));
Assert.assertEquals("Running - Fully Scheduled by DefaultResourceAwareStrategy", cluster.getStatusMap().get(topology2.getId()));
Assert.assertEquals("Running - Fully Scheduled by DefaultResourceAwareStrategy", cluster.getStatusMap().get(topology3.getId()));
Map<SupervisorDetails, Double> superToCpu = TestUtilsForResourceAwareScheduler.getSupervisorToCpuUsage(cluster, topologies);
Map<SupervisorDetails, Double> superToMem = TestUtilsForResourceAwareScheduler.getSupervisorToMemoryUsage(cluster, topologies);
final Double EPSILON = 0.0001;
for (SupervisorDetails supervisor : supMap.values()) {
Double cpuAvailable = supervisor.getTotalCPU();
Double memAvailable = supervisor.getTotalMemory();
Double cpuUsed = superToCpu.get(supervisor);
Double memUsed = superToMem.get(supervisor);
Assert.assertTrue((Math.abs(memAvailable - memUsed) < EPSILON) || (Math.abs(cpuAvailable - cpuUsed) < EPSILON));
}
// end of Test1
// Test2: Launch topo 1, 2 and 4, they together request a little more mem than available, so one of the 3 topos will not be scheduled
cluster = new Cluster(iNimbus, supMap, new HashMap<String, SchedulerAssignmentImpl>(), config1);
topoMap = new HashMap<>();
topoMap.put(topology1.getId(), topology1);
topoMap.put(topology2.getId(), topology2);
topoMap.put(topology4.getId(), topology4);
topologies = new Topologies(topoMap);
rs.prepare(config1);
rs.schedule(topologies, cluster);
int numTopologiesAssigned = 0;
if (cluster.getStatusMap().get(topology1.getId()).equals("Running - Fully Scheduled by DefaultResourceAwareStrategy")) {
numTopologiesAssigned++;
}
if (cluster.getStatusMap().get(topology2.getId()).equals("Running - Fully Scheduled by DefaultResourceAwareStrategy")) {
numTopologiesAssigned++;
}
if (cluster.getStatusMap().get(topology4.getId()).equals("Running - Fully Scheduled by DefaultResourceAwareStrategy")) {
numTopologiesAssigned++;
}
Assert.assertEquals(2, numTopologiesAssigned);
//end of Test2
//Test3: "Launch topo5 only, both mem and cpu should be exactly used up"
cluster = new Cluster(iNimbus, supMap, new HashMap<String, SchedulerAssignmentImpl>(), config1);
topoMap = new HashMap<>();
topoMap.put(topology5.getId(), topology5);
topologies = new Topologies(topoMap);
rs.prepare(config1);
rs.schedule(topologies, cluster);
superToCpu = TestUtilsForResourceAwareScheduler.getSupervisorToCpuUsage(cluster, topologies);
superToMem = TestUtilsForResourceAwareScheduler.getSupervisorToMemoryUsage(cluster, topologies);
for (SupervisorDetails supervisor : supMap.values()) {
Double cpuAvailable = supervisor.getTotalCPU();
Double memAvailable = supervisor.getTotalMemory();
Double cpuUsed = superToCpu.get(supervisor);
Double memUsed = superToMem.get(supervisor);
Assert.assertEquals(cpuAvailable, cpuUsed, 0.0001);
Assert.assertEquals(memAvailable, memUsed, 0.0001);
}
//end of Test3
}
use of org.apache.storm.scheduler.SchedulerAssignmentImpl in project storm by apache.
the class TestResourceAwareScheduler method testTopologySetCpuAndMemLoad.
@Test
public void testTopologySetCpuAndMemLoad() {
INimbus iNimbus = new TestUtilsForResourceAwareScheduler.INimbusTest();
Map<String, Number> resourceMap = new HashMap<>();
resourceMap.put(Config.SUPERVISOR_CPU_CAPACITY, 400.0);
resourceMap.put(Config.SUPERVISOR_MEMORY_CAPACITY_MB, 2000.0);
Map<String, SupervisorDetails> supMap = TestUtilsForResourceAwareScheduler.genSupervisors(2, 2, resourceMap);
// a topology with multiple spouts
TopologyBuilder builder1 = new TopologyBuilder();
builder1.setSpout("wordSpout", new TestWordSpout(), 1).setCPULoad(20.0).setMemoryLoad(200.0);
builder1.setBolt("wordCountBolt", new TestWordCounter(), 1).shuffleGrouping("wordSpout").setCPULoad(20.0).setMemoryLoad(200.0);
StormTopology stormTopology1 = builder1.createTopology();
Config config = new Config();
config.putAll(defaultTopologyConf);
Map<ExecutorDetails, String> executorMap1 = TestUtilsForResourceAwareScheduler.genExecsAndComps(stormTopology1);
TopologyDetails topology1 = new TopologyDetails("topology1", config, stormTopology1, 0, executorMap1, 0);
Cluster cluster = new Cluster(iNimbus, supMap, new HashMap<String, SchedulerAssignmentImpl>(), config);
ResourceAwareScheduler rs = new ResourceAwareScheduler();
Map<String, TopologyDetails> topoMap = new HashMap<>();
topoMap.put(topology1.getId(), topology1);
Topologies topologies = new Topologies(topoMap);
rs.prepare(config);
rs.schedule(topologies, cluster);
SchedulerAssignment assignment1 = cluster.getAssignmentById(topology1.getId());
Set<WorkerSlot> assignedSlots1 = assignment1.getSlots();
double assignedMemory = 0.0;
double assignedCpu = 0.0;
Set<String> nodesIDs1 = new HashSet<>();
for (WorkerSlot slot : assignedSlots1) {
nodesIDs1.add(slot.getNodeId());
assignedMemory += slot.getAllocatedMemOnHeap() + slot.getAllocatedMemOffHeap();
assignedCpu += slot.getAllocatedCpu();
}
Collection<ExecutorDetails> executors1 = assignment1.getExecutors();
Assert.assertEquals(1, assignedSlots1.size());
Assert.assertEquals(1, nodesIDs1.size());
Assert.assertEquals(2, executors1.size());
Assert.assertEquals(400.0, assignedMemory, 0.001);
Assert.assertEquals(40.0, assignedCpu, 0.001);
Assert.assertEquals("Running - Fully Scheduled by DefaultResourceAwareStrategy", cluster.getStatusMap().get(topology1.getId()));
}
use of org.apache.storm.scheduler.SchedulerAssignmentImpl in project storm by apache.
the class TestResourceAwareScheduler method TestTopologySortedInCorrectOrder.
@Test
public void TestTopologySortedInCorrectOrder() {
INimbus iNimbus = new TestUtilsForResourceAwareScheduler.INimbusTest();
Map<String, Number> resourceMap = new HashMap<String, Number>();
resourceMap.put(Config.SUPERVISOR_CPU_CAPACITY, 100.0);
resourceMap.put(Config.SUPERVISOR_MEMORY_CAPACITY_MB, 1024.0);
Map<String, SupervisorDetails> supMap = TestUtilsForResourceAwareScheduler.genSupervisors(20, 4, resourceMap);
Config config = new Config();
config.putAll(Utils.readDefaultConfig());
config.put(Config.RESOURCE_AWARE_SCHEDULER_EVICTION_STRATEGY, org.apache.storm.scheduler.resource.strategies.eviction.DefaultEvictionStrategy.class.getName());
config.put(Config.RESOURCE_AWARE_SCHEDULER_PRIORITY_STRATEGY, org.apache.storm.scheduler.resource.strategies.priority.DefaultSchedulingPriorityStrategy.class.getName());
config.put(Config.TOPOLOGY_SCHEDULER_STRATEGY, org.apache.storm.scheduler.resource.strategies.scheduling.DefaultResourceAwareStrategy.class.getName());
config.put(Config.TOPOLOGY_COMPONENT_CPU_PCORE_PERCENT, 10.0);
config.put(Config.TOPOLOGY_COMPONENT_RESOURCES_OFFHEAP_MEMORY_MB, 128.0);
config.put(Config.TOPOLOGY_COMPONENT_RESOURCES_ONHEAP_MEMORY_MB, 0.0);
config.put(Config.TOPOLOGY_SUBMITTER_USER, TOPOLOGY_SUBMITTER);
Map<String, Map<String, Number>> resourceUserPool = new HashMap<String, Map<String, Number>>();
resourceUserPool.put("jerry", new HashMap<String, Number>());
resourceUserPool.get("jerry").put("cpu", 1000);
resourceUserPool.get("jerry").put("memory", 8192.0);
resourceUserPool.put("bobby", new HashMap<String, Number>());
resourceUserPool.get("bobby").put("cpu", 10000.0);
resourceUserPool.get("bobby").put("memory", 32768);
resourceUserPool.put("derek", new HashMap<String, Number>());
resourceUserPool.get("derek").put("cpu", 5000.0);
resourceUserPool.get("derek").put("memory", 16384.0);
config.put(Config.RESOURCE_AWARE_SCHEDULER_USER_POOLS, resourceUserPool);
TopologyDetails topo1 = TestUtilsForResourceAwareScheduler.getTopology("topo-1", config, 5, 15, 1, 1, currentTime - 2, 20);
TopologyDetails topo2 = TestUtilsForResourceAwareScheduler.getTopology("topo-2", config, 5, 15, 1, 1, currentTime - 8, 30);
TopologyDetails topo3 = TestUtilsForResourceAwareScheduler.getTopology("topo-3", config, 5, 15, 1, 1, currentTime - 16, 30);
TopologyDetails topo4 = TestUtilsForResourceAwareScheduler.getTopology("topo-4", config, 5, 15, 1, 1, currentTime - 16, 20);
TopologyDetails topo5 = TestUtilsForResourceAwareScheduler.getTopology("topo-5", config, 5, 15, 1, 1, currentTime - 24, 30);
Map<String, TopologyDetails> topoMap = new HashMap<String, TopologyDetails>();
topoMap.put(topo1.getId(), topo1);
topoMap.put(topo2.getId(), topo2);
topoMap.put(topo3.getId(), topo3);
topoMap.put(topo4.getId(), topo4);
topoMap.put(topo5.getId(), topo5);
Cluster cluster = new Cluster(iNimbus, supMap, new HashMap<String, SchedulerAssignmentImpl>(), config);
Topologies topologies = new Topologies(topoMap);
ResourceAwareScheduler rs = new ResourceAwareScheduler();
rs.prepare(config);
rs.schedule(topologies, cluster);
Set<TopologyDetails> queue = rs.getUser("jerry").getTopologiesPending();
Assert.assertEquals("check size", queue.size(), 0);
queue = rs.getUser("jerry").getTopologiesRunning();
Iterator<TopologyDetails> itr = queue.iterator();
TopologyDetails topo = itr.next();
LOG.info("{} - {}", topo.getName(), queue);
Assert.assertEquals("check order", topo.getName(), "topo-4");
topo = itr.next();
LOG.info("{} - {}", topo.getName(), queue);
Assert.assertEquals("check order", topo.getName(), "topo-1");
topo = itr.next();
LOG.info("{} - {}", topo.getName(), queue);
Assert.assertEquals("check order", topo.getName(), "topo-5");
topo = itr.next();
LOG.info("{} - {}", topo.getName(), queue);
Assert.assertEquals("check order", topo.getName(), "topo-3");
topo = itr.next();
LOG.info("{} - {}", topo.getName(), queue);
Assert.assertEquals("check order", topo.getName(), "topo-2");
TopologyDetails topo6 = TestUtilsForResourceAwareScheduler.getTopology("topo-6", config, 5, 15, 1, 1, currentTime - 30, 10);
topoMap.put(topo6.getId(), topo6);
topologies = new Topologies(topoMap);
rs.prepare(config);
rs.schedule(topologies, cluster);
queue = rs.getUser("jerry").getTopologiesRunning();
itr = queue.iterator();
topo = itr.next();
Assert.assertEquals("check order", topo.getName(), "topo-6");
topo = itr.next();
Assert.assertEquals("check order", topo.getName(), "topo-4");
topo = itr.next();
Assert.assertEquals("check order", topo.getName(), "topo-1");
topo = itr.next();
Assert.assertEquals("check order", topo.getName(), "topo-5");
topo = itr.next();
Assert.assertEquals("check order", topo.getName(), "topo-3");
topo = itr.next();
Assert.assertEquals("check order", topo.getName(), "topo-2");
queue = rs.getUser("jerry").getTopologiesPending();
Assert.assertEquals("check size", queue.size(), 0);
}
use of org.apache.storm.scheduler.SchedulerAssignmentImpl in project storm by apache.
the class TestResourceAwareScheduler method TestSubmitUsersWithNoGuarantees.
@Test
public void TestSubmitUsersWithNoGuarantees() {
INimbus iNimbus = new TestUtilsForResourceAwareScheduler.INimbusTest();
Map<String, Number> resourceMap = new HashMap<String, Number>();
resourceMap.put(Config.SUPERVISOR_CPU_CAPACITY, 100.0);
resourceMap.put(Config.SUPERVISOR_MEMORY_CAPACITY_MB, 1000.0);
Map<String, SupervisorDetails> supMap = TestUtilsForResourceAwareScheduler.genSupervisors(4, 4, resourceMap);
Config config = new Config();
config.putAll(Utils.readDefaultConfig());
config.put(Config.RESOURCE_AWARE_SCHEDULER_EVICTION_STRATEGY, org.apache.storm.scheduler.resource.strategies.eviction.DefaultEvictionStrategy.class.getName());
config.put(Config.RESOURCE_AWARE_SCHEDULER_PRIORITY_STRATEGY, org.apache.storm.scheduler.resource.strategies.priority.DefaultSchedulingPriorityStrategy.class.getName());
config.put(Config.TOPOLOGY_SCHEDULER_STRATEGY, org.apache.storm.scheduler.resource.strategies.scheduling.DefaultResourceAwareStrategy.class.getName());
config.put(Config.TOPOLOGY_COMPONENT_CPU_PCORE_PERCENT, 100.0);
config.put(Config.TOPOLOGY_COMPONENT_RESOURCES_OFFHEAP_MEMORY_MB, 500);
config.put(Config.TOPOLOGY_COMPONENT_RESOURCES_ONHEAP_MEMORY_MB, 500);
Map<String, Map<String, Number>> resourceUserPool = new HashMap<String, Map<String, Number>>();
resourceUserPool.put("jerry", new HashMap<String, Number>());
resourceUserPool.get("jerry").put("cpu", 200.0);
resourceUserPool.get("jerry").put("memory", 2000.0);
config.put(Config.RESOURCE_AWARE_SCHEDULER_USER_POOLS, resourceUserPool);
Cluster cluster = new Cluster(iNimbus, supMap, new HashMap<String, SchedulerAssignmentImpl>(), config);
config.put(Config.TOPOLOGY_SUBMITTER_USER, "jerry");
TopologyDetails topo1 = TestUtilsForResourceAwareScheduler.getTopology("topo-1", config, 1, 0, 1, 0, currentTime - 2, 10);
TopologyDetails topo2 = TestUtilsForResourceAwareScheduler.getTopology("topo-2", config, 1, 0, 1, 0, currentTime - 2, 20);
TopologyDetails topo3 = TestUtilsForResourceAwareScheduler.getTopology("topo-3", config, 1, 0, 1, 0, currentTime - 2, 20);
config.put(Config.TOPOLOGY_SUBMITTER_USER, "bobby");
TopologyDetails topo4 = TestUtilsForResourceAwareScheduler.getTopology("topo-4", config, 1, 0, 1, 0, currentTime - 2, 10);
TopologyDetails topo5 = TestUtilsForResourceAwareScheduler.getTopology("topo-5", config, 1, 0, 1, 0, currentTime - 2, 20);
Map<String, TopologyDetails> topoMap = new HashMap<String, TopologyDetails>();
topoMap.put(topo1.getId(), topo1);
topoMap.put(topo2.getId(), topo2);
topoMap.put(topo3.getId(), topo3);
topoMap.put(topo4.getId(), topo4);
topoMap.put(topo5.getId(), topo5);
Topologies topologies = new Topologies(topoMap);
ResourceAwareScheduler rs = new ResourceAwareScheduler();
rs.prepare(config);
rs.schedule(topologies, cluster);
for (TopologyDetails topo : rs.getUser("jerry").getTopologiesRunning()) {
Assert.assertTrue("assert topology success", TestUtilsForResourceAwareScheduler.assertStatusSuccess(cluster.getStatusMap().get(topo.getId())));
}
Assert.assertEquals("# of running topologies", 3, rs.getUser("jerry").getTopologiesRunning().size());
Assert.assertEquals("# of pending topologies", 0, rs.getUser("jerry").getTopologiesPending().size());
Assert.assertEquals("# of attempted topologies", 0, rs.getUser("jerry").getTopologiesAttempted().size());
Assert.assertEquals("# of invalid topologies", 0, rs.getUser("jerry").getTopologiesInvalid().size());
for (TopologyDetails topo : rs.getUser("bobby").getTopologiesRunning()) {
Assert.assertTrue("assert topology success", TestUtilsForResourceAwareScheduler.assertStatusSuccess(cluster.getStatusMap().get(topo.getId())));
}
Assert.assertEquals("# of running topologies", 1, rs.getUser("bobby").getTopologiesRunning().size());
Assert.assertEquals("# of pending topologies", 0, rs.getUser("bobby").getTopologiesPending().size());
Assert.assertEquals("# of attempted topologies", 1, rs.getUser("bobby").getTopologiesAttempted().size());
Assert.assertEquals("# of invalid topologies", 0, rs.getUser("bobby").getTopologiesInvalid().size());
}
Aggregations