use of org.apache.storm.scheduler.Cluster in project storm by apache.
the class TestDefaultEvictionStrategy method TestEvictMultipleTopologies.
@Test
public void TestEvictMultipleTopologies() {
INimbus iNimbus = new TestUtilsForResourceAwareScheduler.INimbusTest();
Map<String, Number> resourceMap = new HashMap<String, Number>();
resourceMap.put(Config.SUPERVISOR_CPU_CAPACITY, 100.0);
resourceMap.put(Config.SUPERVISOR_MEMORY_CAPACITY_MB, 1000.0);
Map<String, SupervisorDetails> supMap = TestUtilsForResourceAwareScheduler.genSupervisors(4, 4, resourceMap);
Config config = new Config();
config.putAll(Utils.readDefaultConfig());
config.put(Config.RESOURCE_AWARE_SCHEDULER_EVICTION_STRATEGY, org.apache.storm.scheduler.resource.strategies.eviction.DefaultEvictionStrategy.class.getName());
config.put(Config.RESOURCE_AWARE_SCHEDULER_PRIORITY_STRATEGY, org.apache.storm.scheduler.resource.strategies.priority.DefaultSchedulingPriorityStrategy.class.getName());
config.put(Config.TOPOLOGY_SCHEDULER_STRATEGY, org.apache.storm.scheduler.resource.strategies.scheduling.DefaultResourceAwareStrategy.class.getName());
config.put(Config.TOPOLOGY_COMPONENT_CPU_PCORE_PERCENT, 100.0);
config.put(Config.TOPOLOGY_COMPONENT_RESOURCES_OFFHEAP_MEMORY_MB, 500);
config.put(Config.TOPOLOGY_COMPONENT_RESOURCES_ONHEAP_MEMORY_MB, 500);
Map<String, Map<String, Number>> resourceUserPool = new HashMap<String, Map<String, Number>>();
resourceUserPool.put("jerry", new HashMap<String, Number>());
resourceUserPool.get("jerry").put("cpu", 200.0);
resourceUserPool.get("jerry").put("memory", 2000.0);
resourceUserPool.put("derek", new HashMap<String, Number>());
resourceUserPool.get("derek").put("cpu", 100.0);
resourceUserPool.get("derek").put("memory", 1000.0);
config.put(Config.RESOURCE_AWARE_SCHEDULER_USER_POOLS, resourceUserPool);
Cluster cluster = new Cluster(iNimbus, supMap, new HashMap<String, SchedulerAssignmentImpl>(), config);
config.put(Config.TOPOLOGY_SUBMITTER_USER, "jerry");
TopologyDetails topo1 = TestUtilsForResourceAwareScheduler.getTopology("topo-1", config, 2, 0, 1, 0, currentTime - 2, 10);
config.put(Config.TOPOLOGY_SUBMITTER_USER, "bobby");
TopologyDetails topo2 = TestUtilsForResourceAwareScheduler.getTopology("topo-2", config, 1, 0, 1, 0, currentTime - 2, 10);
TopologyDetails topo3 = TestUtilsForResourceAwareScheduler.getTopology("topo-3", config, 1, 0, 1, 0, currentTime - 2, 20);
config.put(Config.TOPOLOGY_SUBMITTER_USER, "derek");
TopologyDetails topo4 = TestUtilsForResourceAwareScheduler.getTopology("topo-4", config, 1, 0, 1, 0, currentTime - 2, 29);
TopologyDetails topo5 = TestUtilsForResourceAwareScheduler.getTopology("topo-5", config, 1, 0, 1, 0, currentTime - 2, 29);
Map<String, TopologyDetails> topoMap = new HashMap<String, TopologyDetails>();
topoMap.put(topo2.getId(), topo2);
topoMap.put(topo3.getId(), topo3);
topoMap.put(topo4.getId(), topo4);
topoMap.put(topo5.getId(), topo5);
Topologies topologies = new Topologies(topoMap);
ResourceAwareScheduler rs = new ResourceAwareScheduler();
rs.prepare(config);
rs.schedule(topologies, cluster);
for (TopologyDetails topo : rs.getUser("derek").getTopologiesRunning()) {
Assert.assertTrue("assert topology success", TestUtilsForResourceAwareScheduler.assertStatusSuccess(cluster.getStatusMap().get(topo.getId())));
}
Assert.assertEquals("# of running topologies", 2, rs.getUser("derek").getTopologiesRunning().size());
Assert.assertEquals("# of pending topologies", 0, rs.getUser("derek").getTopologiesPending().size());
Assert.assertEquals("# of attempted topologies", 0, rs.getUser("derek").getTopologiesAttempted().size());
Assert.assertEquals("# of invalid topologies", 0, rs.getUser("derek").getTopologiesInvalid().size());
for (TopologyDetails topo : rs.getUser("bobby").getTopologiesRunning()) {
Assert.assertTrue("assert topology success", TestUtilsForResourceAwareScheduler.assertStatusSuccess(cluster.getStatusMap().get(topo.getId())));
}
Assert.assertEquals("# of running topologies", 2, rs.getUser("bobby").getTopologiesRunning().size());
Assert.assertEquals("# of pending topologies", 0, rs.getUser("bobby").getTopologiesPending().size());
Assert.assertEquals("# of invalid topologies", 0, rs.getUser("bobby").getTopologiesInvalid().size());
Assert.assertEquals("# of attempted topologies", 0, rs.getUser("bobby").getTopologiesAttempted().size());
//user jerry submits another topology
topoMap.put(topo1.getId(), topo1);
topologies = new Topologies(topoMap);
rs.schedule(topologies, cluster);
for (TopologyDetails topo : rs.getUser("jerry").getTopologiesRunning()) {
Assert.assertTrue("assert topology success", TestUtilsForResourceAwareScheduler.assertStatusSuccess(cluster.getStatusMap().get(topo.getId())));
}
Assert.assertEquals("# of running topologies", 1, rs.getUser("jerry").getTopologiesRunning().size());
Assert.assertEquals("# of pending topologies", 0, rs.getUser("jerry").getTopologiesPending().size());
Assert.assertEquals("# of attempted topologies", 0, rs.getUser("jerry").getTopologiesAttempted().size());
Assert.assertEquals("# of invalid topologies", 0, rs.getUser("jerry").getTopologiesInvalid().size());
for (TopologyDetails topo : rs.getUser("derek").getTopologiesRunning()) {
Assert.assertTrue("assert topology success", TestUtilsForResourceAwareScheduler.assertStatusSuccess(cluster.getStatusMap().get(topo.getId())));
}
Assert.assertEquals("# of running topologies", 2, rs.getUser("derek").getTopologiesRunning().size());
Assert.assertEquals("# of pending topologies", 0, rs.getUser("derek").getTopologiesPending().size());
Assert.assertEquals("# of attempted topologies", 0, rs.getUser("derek").getTopologiesAttempted().size());
Assert.assertEquals("# of invalid topologies", 0, rs.getUser("derek").getTopologiesInvalid().size());
for (TopologyDetails topo : rs.getUser("bobby").getTopologiesAttempted()) {
Assert.assertFalse("assert topology success", TestUtilsForResourceAwareScheduler.assertStatusSuccess(cluster.getStatusMap().get(topo.getId())));
}
Assert.assertEquals("# of attempted topologies", 2, rs.getUser("bobby").getTopologiesAttempted().size());
Assert.assertEquals("# of pending topologies", 0, rs.getUser("bobby").getTopologiesPending().size());
Assert.assertEquals("# of invalid topologies", 0, rs.getUser("bobby").getTopologiesInvalid().size());
Assert.assertEquals("# of running topologies", 0, rs.getUser("bobby").getTopologiesRunning().size());
}
use of org.apache.storm.scheduler.Cluster in project storm by apache.
the class TestDefaultResourceAwareStrategy method testMultipleRacks.
/**
* Test whether strategy will choose correct rack
*/
@Test
public void testMultipleRacks() {
final Map<String, SupervisorDetails> supMap = new HashMap<String, SupervisorDetails>();
Map<String, Number> resourceMap = new HashMap<String, Number>();
//generate a rack of supervisors
resourceMap.put(Config.SUPERVISOR_CPU_CAPACITY, 400.0);
resourceMap.put(Config.SUPERVISOR_MEMORY_CAPACITY_MB, 8000.0);
final Map<String, SupervisorDetails> supMapRack1 = TestUtilsForResourceAwareScheduler.genSupervisors(10, 4, 0, resourceMap);
//generate another rack of supervisors with less resources
resourceMap.put(Config.SUPERVISOR_CPU_CAPACITY, 200.0);
resourceMap.put(Config.SUPERVISOR_MEMORY_CAPACITY_MB, 4000.0);
final Map<String, SupervisorDetails> supMapRack2 = TestUtilsForResourceAwareScheduler.genSupervisors(10, 4, 10, resourceMap);
//generate some supervisors that are depleted of one resource
resourceMap.put(Config.SUPERVISOR_CPU_CAPACITY, 0.0);
resourceMap.put(Config.SUPERVISOR_MEMORY_CAPACITY_MB, 8000.0);
final Map<String, SupervisorDetails> supMapRack3 = TestUtilsForResourceAwareScheduler.genSupervisors(10, 4, 20, resourceMap);
//generate some that has alot of memory but little of cpu
resourceMap.put(Config.SUPERVISOR_CPU_CAPACITY, 10.0);
resourceMap.put(Config.SUPERVISOR_MEMORY_CAPACITY_MB, 8000.0 * 2 + 4000.0);
final Map<String, SupervisorDetails> supMapRack4 = TestUtilsForResourceAwareScheduler.genSupervisors(10, 4, 30, resourceMap);
//generate some that has alot of cpu but little of memory
resourceMap.put(Config.SUPERVISOR_CPU_CAPACITY, 400.0 + 200.0 + 10.0);
resourceMap.put(Config.SUPERVISOR_MEMORY_CAPACITY_MB, 1000.0);
final Map<String, SupervisorDetails> supMapRack5 = TestUtilsForResourceAwareScheduler.genSupervisors(10, 4, 40, resourceMap);
supMap.putAll(supMapRack1);
supMap.putAll(supMapRack2);
supMap.putAll(supMapRack3);
supMap.putAll(supMapRack4);
supMap.putAll(supMapRack5);
Config config = new Config();
config.putAll(Utils.readDefaultConfig());
config.put(Config.RESOURCE_AWARE_SCHEDULER_EVICTION_STRATEGY, org.apache.storm.scheduler.resource.strategies.eviction.DefaultEvictionStrategy.class.getName());
config.put(Config.RESOURCE_AWARE_SCHEDULER_PRIORITY_STRATEGY, org.apache.storm.scheduler.resource.strategies.priority.DefaultSchedulingPriorityStrategy.class.getName());
config.put(Config.TOPOLOGY_SCHEDULER_STRATEGY, org.apache.storm.scheduler.resource.strategies.scheduling.DefaultResourceAwareStrategy.class.getName());
config.put(Config.TOPOLOGY_COMPONENT_CPU_PCORE_PERCENT, 100.0);
config.put(Config.TOPOLOGY_COMPONENT_RESOURCES_OFFHEAP_MEMORY_MB, 500);
config.put(Config.TOPOLOGY_COMPONENT_RESOURCES_ONHEAP_MEMORY_MB, 500);
config.put(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB, Double.MAX_VALUE);
INimbus iNimbus = new TestUtilsForResourceAwareScheduler.INimbusTest();
Cluster cluster = new Cluster(iNimbus, supMap, new HashMap<String, SchedulerAssignmentImpl>(), config);
//create test DNSToSwitchMapping plugin
DNSToSwitchMapping TestNetworkTopographyPlugin = new DNSToSwitchMapping() {
@Override
public Map<String, String> resolve(List<String> names) {
Map<String, String> ret = new HashMap<String, String>();
for (SupervisorDetails sup : supMapRack1.values()) {
ret.put(sup.getHost(), "rack-0");
}
for (SupervisorDetails sup : supMapRack2.values()) {
ret.put(sup.getHost(), "rack-1");
}
for (SupervisorDetails sup : supMapRack3.values()) {
ret.put(sup.getHost(), "rack-2");
}
for (SupervisorDetails sup : supMapRack4.values()) {
ret.put(sup.getHost(), "rack-3");
}
for (SupervisorDetails sup : supMapRack5.values()) {
ret.put(sup.getHost(), "rack-4");
}
return ret;
}
};
List<String> supHostnames = new LinkedList<>();
for (SupervisorDetails sup : supMap.values()) {
supHostnames.add(sup.getHost());
}
Map<String, List<String>> rackToNodes = new HashMap<>();
Map<String, String> resolvedSuperVisors = TestNetworkTopographyPlugin.resolve(supHostnames);
for (Map.Entry<String, String> entry : resolvedSuperVisors.entrySet()) {
String hostName = entry.getKey();
String rack = entry.getValue();
List<String> nodesForRack = rackToNodes.get(rack);
if (nodesForRack == null) {
nodesForRack = new ArrayList<String>();
rackToNodes.put(rack, nodesForRack);
}
nodesForRack.add(hostName);
}
cluster.setNetworkTopography(rackToNodes);
//generate topologies
Map<String, TopologyDetails> topoMap = new HashMap<String, TopologyDetails>();
TopologyDetails topo1 = TestUtilsForResourceAwareScheduler.getTopology("topo-1", config, 8, 0, 2, 0, currentTime - 2, 10);
TopologyDetails topo2 = TestUtilsForResourceAwareScheduler.getTopology("topo-2", config, 8, 0, 2, 0, currentTime - 2, 10);
topoMap.put(topo1.getId(), topo1);
Topologies topologies = new Topologies(topoMap);
DefaultResourceAwareStrategy rs = new DefaultResourceAwareStrategy();
rs.prepare(new SchedulingState(new HashMap<String, User>(), cluster, topologies, config));
TreeSet<ObjectResources> sortedRacks = rs.sortRacks(topo1.getId(), new HashMap<WorkerSlot, Collection<ExecutorDetails>>());
Assert.assertEquals("# of racks sorted", 5, sortedRacks.size());
Iterator<ObjectResources> it = sortedRacks.iterator();
// Ranked first since rack-0 has the most balanced set of resources
Assert.assertEquals("rack-0 should be ordered first", "rack-0", it.next().id);
// Ranked second since rack-1 has a balanced set of resources but less than rack-0
Assert.assertEquals("rack-1 should be ordered second", "rack-1", it.next().id);
// Ranked third since rack-4 has a lot of cpu but not a lot of memory
Assert.assertEquals("rack-4 should be ordered third", "rack-4", it.next().id);
// Ranked fourth since rack-3 has alot of memory but not cpu
Assert.assertEquals("rack-3 should be ordered fourth", "rack-3", it.next().id);
//Ranked last since rack-2 has not cpu resources
Assert.assertEquals("rack-2 should be ordered fifth", "rack-2", it.next().id);
SchedulingResult schedulingResult = rs.schedule(topo1);
for (Map.Entry<WorkerSlot, Collection<ExecutorDetails>> entry : schedulingResult.getSchedulingResultMap().entrySet()) {
WorkerSlot ws = entry.getKey();
Collection<ExecutorDetails> execs = entry.getValue();
//make sure all workers on scheduled in rack-0
Assert.assertEquals("assert worker scheduled on rack-0", "rack-0", resolvedSuperVisors.get(rs.idToNode(ws.getNodeId()).getHostname()));
// make actual assignments
cluster.assign(ws, topo1.getId(), execs);
}
Assert.assertEquals("All executors in topo-1 scheduled", 0, cluster.getUnassignedExecutors(topo1).size());
//Test if topology is already partially scheduled on one rack
topoMap.put(topo2.getId(), topo2);
topologies = new Topologies(topoMap);
RAS_Nodes nodes = new RAS_Nodes(cluster, topologies);
Iterator<ExecutorDetails> executorIterator = topo2.getExecutors().iterator();
List<String> nodeHostnames = rackToNodes.get("rack-1");
for (int i = 0; i < topo2.getExecutors().size() / 2; i++) {
String nodeHostname = nodeHostnames.get(i % nodeHostnames.size());
RAS_Node node = rs.idToNode(rs.NodeHostnameToId(nodeHostname));
WorkerSlot targetSlot = node.getFreeSlots().iterator().next();
ExecutorDetails targetExec = executorIterator.next();
// to keep track of free slots
node.assign(targetSlot, topo2, Arrays.asList(targetExec));
// to actually assign
cluster.assign(targetSlot, topo2.getId(), Arrays.asList(targetExec));
}
rs = new DefaultResourceAwareStrategy();
rs.prepare(new SchedulingState(new HashMap<String, User>(), cluster, topologies, config));
// schedule topo2
schedulingResult = rs.schedule(topo2);
// checking assignments
for (Map.Entry<WorkerSlot, Collection<ExecutorDetails>> entry : schedulingResult.getSchedulingResultMap().entrySet()) {
WorkerSlot ws = entry.getKey();
Collection<ExecutorDetails> execs = entry.getValue();
//make sure all workers on scheduled in rack-1
Assert.assertEquals("assert worker scheduled on rack-1", "rack-1", resolvedSuperVisors.get(rs.idToNode(ws.getNodeId()).getHostname()));
// make actual assignments
cluster.assign(ws, topo2.getId(), execs);
}
Assert.assertEquals("All executors in topo-2 scheduled", 0, cluster.getUnassignedExecutors(topo1).size());
}
use of org.apache.storm.scheduler.Cluster in project storm by apache.
the class TestDefaultResourceAwareStrategy method testDefaultResourceAwareStrategy.
/**
* test if the scheduling logic for the DefaultResourceAwareStrategy is correct
*/
@Test
public void testDefaultResourceAwareStrategy() {
int spoutParallelism = 1;
int boltParallelism = 2;
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("spout", new TestUtilsForResourceAwareScheduler.TestSpout(), spoutParallelism);
builder.setBolt("bolt-1", new TestUtilsForResourceAwareScheduler.TestBolt(), boltParallelism).shuffleGrouping("spout");
builder.setBolt("bolt-2", new TestUtilsForResourceAwareScheduler.TestBolt(), boltParallelism).shuffleGrouping("bolt-1");
builder.setBolt("bolt-3", new TestUtilsForResourceAwareScheduler.TestBolt(), boltParallelism).shuffleGrouping("bolt-2");
StormTopology stormToplogy = builder.createTopology();
Config conf = new Config();
INimbus iNimbus = new TestUtilsForResourceAwareScheduler.INimbusTest();
Map<String, Number> resourceMap = new HashMap<String, Number>();
resourceMap.put(Config.SUPERVISOR_CPU_CAPACITY, 150.0);
resourceMap.put(Config.SUPERVISOR_MEMORY_CAPACITY_MB, 1500.0);
Map<String, SupervisorDetails> supMap = TestUtilsForResourceAwareScheduler.genSupervisors(4, 4, resourceMap);
conf.putAll(Utils.readDefaultConfig());
conf.put(Config.RESOURCE_AWARE_SCHEDULER_EVICTION_STRATEGY, org.apache.storm.scheduler.resource.strategies.eviction.DefaultEvictionStrategy.class.getName());
conf.put(Config.RESOURCE_AWARE_SCHEDULER_PRIORITY_STRATEGY, org.apache.storm.scheduler.resource.strategies.priority.DefaultSchedulingPriorityStrategy.class.getName());
conf.put(Config.TOPOLOGY_SCHEDULER_STRATEGY, org.apache.storm.scheduler.resource.strategies.scheduling.DefaultResourceAwareStrategy.class.getName());
conf.put(Config.TOPOLOGY_COMPONENT_CPU_PCORE_PERCENT, 50.0);
conf.put(Config.TOPOLOGY_COMPONENT_RESOURCES_OFFHEAP_MEMORY_MB, 250);
conf.put(Config.TOPOLOGY_COMPONENT_RESOURCES_ONHEAP_MEMORY_MB, 250);
conf.put(Config.TOPOLOGY_PRIORITY, 0);
conf.put(Config.TOPOLOGY_NAME, "testTopology");
conf.put(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB, Double.MAX_VALUE);
TopologyDetails topo = new TopologyDetails("testTopology-id", conf, stormToplogy, 0, TestUtilsForResourceAwareScheduler.genExecsAndComps(stormToplogy), this.currentTime);
Map<String, TopologyDetails> topoMap = new HashMap<String, TopologyDetails>();
topoMap.put(topo.getId(), topo);
Topologies topologies = new Topologies(topoMap);
Cluster cluster = new Cluster(iNimbus, supMap, new HashMap<String, SchedulerAssignmentImpl>(), conf);
ResourceAwareScheduler rs = new ResourceAwareScheduler();
rs.prepare(conf);
rs.schedule(topologies, cluster);
Map<String, List<String>> nodeToComps = new HashMap<String, List<String>>();
for (Map.Entry<ExecutorDetails, WorkerSlot> entry : cluster.getAssignments().get("testTopology-id").getExecutorToSlot().entrySet()) {
WorkerSlot ws = entry.getValue();
ExecutorDetails exec = entry.getKey();
if (!nodeToComps.containsKey(ws.getNodeId())) {
nodeToComps.put(ws.getNodeId(), new LinkedList<String>());
}
nodeToComps.get(ws.getNodeId()).add(topo.getExecutorToComponent().get(exec));
}
/**
* check for correct scheduling
* Since all the resource availabilites on nodes are the same in the beginining
* DefaultResourceAwareStrategy can arbitrarily pick one thus we must find if a particular scheduling
* exists on a node the the cluster.
*/
//one node should have the below scheduling
List<String> node1 = new LinkedList<>();
node1.add("spout");
node1.add("bolt-1");
node1.add("bolt-2");
Assert.assertTrue("Check DefaultResourceAwareStrategy scheduling", checkDefaultStrategyScheduling(nodeToComps, node1));
//one node should have the below scheduling
List<String> node2 = new LinkedList<>();
node2.add("bolt-3");
node2.add("bolt-1");
node2.add("bolt-2");
Assert.assertTrue("Check DefaultResourceAwareStrategy scheduling", checkDefaultStrategyScheduling(nodeToComps, node2));
//one node should have the below scheduling
List<String> node3 = new LinkedList<>();
node3.add("bolt-3");
Assert.assertTrue("Check DefaultResourceAwareStrategy scheduling", checkDefaultStrategyScheduling(nodeToComps, node3));
//three used and one node should be empty
Assert.assertEquals("only three nodes should be used", 3, nodeToComps.size());
}
use of org.apache.storm.scheduler.Cluster in project storm by apache.
the class TestResourceAwareScheduler method testHandlingClusterSubscription.
@Test
public void testHandlingClusterSubscription() {
INimbus iNimbus = new TestUtilsForResourceAwareScheduler.INimbusTest();
Map<String, Number> resourceMap = new HashMap<String, Number>();
resourceMap.put(Config.SUPERVISOR_CPU_CAPACITY, 200.0);
resourceMap.put(Config.SUPERVISOR_MEMORY_CAPACITY_MB, 1024.0 * 10);
Map<String, SupervisorDetails> supMap = TestUtilsForResourceAwareScheduler.genSupervisors(1, 4, resourceMap);
Config config = new Config();
config.putAll(Utils.readDefaultConfig());
config.put(Config.RESOURCE_AWARE_SCHEDULER_EVICTION_STRATEGY, org.apache.storm.scheduler.resource.strategies.eviction.DefaultEvictionStrategy.class.getName());
config.put(Config.RESOURCE_AWARE_SCHEDULER_PRIORITY_STRATEGY, org.apache.storm.scheduler.resource.strategies.priority.DefaultSchedulingPriorityStrategy.class.getName());
config.put(Config.TOPOLOGY_SCHEDULER_STRATEGY, org.apache.storm.scheduler.resource.strategies.scheduling.DefaultResourceAwareStrategy.class.getName());
Map<String, Map<String, Number>> resourceUserPool = new HashMap<String, Map<String, Number>>();
resourceUserPool.put("jerry", new HashMap<String, Number>());
resourceUserPool.get("jerry").put("cpu", 1000);
resourceUserPool.get("jerry").put("memory", 8192.0);
resourceUserPool.put("bobby", new HashMap<String, Number>());
resourceUserPool.get("bobby").put("cpu", 10000.0);
resourceUserPool.get("bobby").put("memory", 32768);
resourceUserPool.put("derek", new HashMap<String, Number>());
resourceUserPool.get("derek").put("cpu", 5000.0);
resourceUserPool.get("derek").put("memory", 16384.0);
config.put(Config.RESOURCE_AWARE_SCHEDULER_USER_POOLS, resourceUserPool);
Cluster cluster = new Cluster(iNimbus, supMap, new HashMap<String, SchedulerAssignmentImpl>(), config);
config.put(Config.TOPOLOGY_SUBMITTER_USER, "jerry");
TopologyDetails topo1 = TestUtilsForResourceAwareScheduler.getTopology("topo-1", config, 5, 15, 1, 1, currentTime - 2, 20);
TopologyDetails topo2 = TestUtilsForResourceAwareScheduler.getTopology("topo-2", config, 5, 15, 1, 1, currentTime - 8, 29);
Map<String, TopologyDetails> topoMap = new HashMap<String, TopologyDetails>();
topoMap.put(topo1.getId(), topo1);
topoMap.put(topo2.getId(), topo2);
Topologies topologies = new Topologies(topoMap);
ResourceAwareScheduler rs = new ResourceAwareScheduler();
rs.prepare(config);
rs.schedule(topologies, cluster);
int fullyScheduled = 0;
for (TopologyDetails topo : topoMap.values()) {
if (TestUtilsForResourceAwareScheduler.assertStatusSuccess(cluster.getStatusMap().get(topo.getId()))) {
fullyScheduled++;
}
}
Assert.assertEquals("# of Fully scheduled", 1, fullyScheduled);
Assert.assertEquals("# of topologies schedule attempted", 1, rs.getUser("jerry").getTopologiesAttempted().size());
Assert.assertEquals("# of topologies running", 1, rs.getUser("jerry").getTopologiesRunning().size());
Assert.assertEquals("# of topologies schedule pending", 0, rs.getUser("jerry").getTopologiesPending().size());
}
use of org.apache.storm.scheduler.Cluster in project storm by apache.
the class TestResourceAwareScheduler method testScheduleResilience.
@Test
public void testScheduleResilience() {
INimbus iNimbus = new TestUtilsForResourceAwareScheduler.INimbusTest();
Map<String, Number> resourceMap = new HashMap<>();
resourceMap.put(Config.SUPERVISOR_CPU_CAPACITY, 400.0);
resourceMap.put(Config.SUPERVISOR_MEMORY_CAPACITY_MB, 2000.0);
Map<String, SupervisorDetails> supMap = TestUtilsForResourceAwareScheduler.genSupervisors(2, 2, resourceMap);
TopologyBuilder builder1 = new TopologyBuilder();
builder1.setSpout("wordSpout1", new TestWordSpout(), 3);
StormTopology stormTopology1 = builder1.createTopology();
Config config1 = new Config();
config1.putAll(defaultTopologyConf);
Map<ExecutorDetails, String> executorMap1 = TestUtilsForResourceAwareScheduler.genExecsAndComps(stormTopology1);
TopologyDetails topology1 = new TopologyDetails("topology1", config1, stormTopology1, 3, executorMap1, 0);
TopologyBuilder builder2 = new TopologyBuilder();
builder2.setSpout("wordSpout2", new TestWordSpout(), 2);
StormTopology stormTopology2 = builder2.createTopology();
Config config2 = new Config();
config2.putAll(defaultTopologyConf);
// memory requirement is large enough so that two executors can not be fully assigned to one node
config2.put(Config.TOPOLOGY_COMPONENT_RESOURCES_ONHEAP_MEMORY_MB, 1280.0);
Map<ExecutorDetails, String> executorMap2 = TestUtilsForResourceAwareScheduler.genExecsAndComps(stormTopology2);
TopologyDetails topology2 = new TopologyDetails("topology2", config2, stormTopology2, 2, executorMap2, 0);
// Test1: When a worker fails, RAS does not alter existing assignments on healthy workers
Cluster cluster = new Cluster(iNimbus, supMap, new HashMap<String, SchedulerAssignmentImpl>(), config1);
ResourceAwareScheduler rs = new ResourceAwareScheduler();
Map<String, TopologyDetails> topoMap = new HashMap<>();
topoMap.put(topology2.getId(), topology2);
Topologies topologies = new Topologies(topoMap);
rs.prepare(config1);
rs.schedule(topologies, cluster);
SchedulerAssignmentImpl assignment = (SchedulerAssignmentImpl) cluster.getAssignmentById(topology2.getId());
// pick a worker to mock as failed
WorkerSlot failedWorker = new ArrayList<WorkerSlot>(assignment.getSlots()).get(0);
Map<ExecutorDetails, WorkerSlot> executorToSlot = assignment.getExecutorToSlot();
List<ExecutorDetails> failedExecutors = new ArrayList<>();
for (Map.Entry<ExecutorDetails, WorkerSlot> entry : executorToSlot.entrySet()) {
if (entry.getValue().equals(failedWorker)) {
failedExecutors.add(entry.getKey());
}
}
for (ExecutorDetails executor : failedExecutors) {
// remove executor details assigned to the failed worker
executorToSlot.remove(executor);
}
Map<ExecutorDetails, WorkerSlot> copyOfOldMapping = new HashMap<>(executorToSlot);
Set<ExecutorDetails> healthyExecutors = copyOfOldMapping.keySet();
rs.schedule(topologies, cluster);
SchedulerAssignment newAssignment = cluster.getAssignmentById(topology2.getId());
Map<ExecutorDetails, WorkerSlot> newExecutorToSlot = newAssignment.getExecutorToSlot();
for (ExecutorDetails executor : healthyExecutors) {
Assert.assertEquals(copyOfOldMapping.get(executor), newExecutorToSlot.get(executor));
}
Assert.assertEquals("Running - Fully Scheduled by DefaultResourceAwareStrategy", cluster.getStatusMap().get(topology2.getId()));
// end of Test1
// Test2: When a supervisor fails, RAS does not alter existing assignments
executorToSlot = new HashMap<>();
executorToSlot.put(new ExecutorDetails(0, 0), new WorkerSlot("sup-0", 0));
executorToSlot.put(new ExecutorDetails(1, 1), new WorkerSlot("sup-0", 1));
executorToSlot.put(new ExecutorDetails(2, 2), new WorkerSlot("sup-1", 1));
Map<String, SchedulerAssignmentImpl> existingAssignments = new HashMap<>();
assignment = new SchedulerAssignmentImpl(topology1.getId(), executorToSlot);
existingAssignments.put(topology1.getId(), assignment);
copyOfOldMapping = new HashMap<>(executorToSlot);
Set<ExecutorDetails> existingExecutors = copyOfOldMapping.keySet();
Map<String, SupervisorDetails> supMap1 = new HashMap<>(supMap);
// mock the supervisor sup-0 as a failed supervisor
supMap1.remove("sup-0");
Cluster cluster1 = new Cluster(iNimbus, supMap1, existingAssignments, config1);
topoMap = new HashMap<>();
topoMap.put(topology1.getId(), topology1);
topologies = new Topologies(topoMap);
rs.schedule(topologies, cluster1);
newAssignment = cluster1.getAssignmentById(topology1.getId());
newExecutorToSlot = newAssignment.getExecutorToSlot();
for (ExecutorDetails executor : existingExecutors) {
Assert.assertEquals(copyOfOldMapping.get(executor), newExecutorToSlot.get(executor));
}
Assert.assertEquals("Fully Scheduled", cluster1.getStatusMap().get(topology1.getId()));
// end of Test2
// Test3: When a supervisor and a worker on it fails, RAS does not alter existing assignments
executorToSlot = new HashMap<>();
// the worker to orphan
executorToSlot.put(new ExecutorDetails(0, 0), new WorkerSlot("sup-0", 1));
// the worker that fails
executorToSlot.put(new ExecutorDetails(1, 1), new WorkerSlot("sup-0", 2));
// the healthy worker
executorToSlot.put(new ExecutorDetails(2, 2), new WorkerSlot("sup-1", 1));
existingAssignments = new HashMap<>();
assignment = new SchedulerAssignmentImpl(topology1.getId(), executorToSlot);
existingAssignments.put(topology1.getId(), assignment);
// delete one worker of sup-0 (failed) from topo1 assignment to enable actual schedule for testing
executorToSlot.remove(new ExecutorDetails(1, 1));
copyOfOldMapping = new HashMap<>(executorToSlot);
// namely the two eds on the orphaned worker and the healthy worker
existingExecutors = copyOfOldMapping.keySet();
supMap1 = new HashMap<>(supMap);
// mock the supervisor sup-0 as a failed supervisor
supMap1.remove("sup-0");
cluster1 = new Cluster(iNimbus, supMap1, existingAssignments, config1);
topoMap = new HashMap<>();
topoMap.put(topology1.getId(), topology1);
topologies = new Topologies(topoMap);
rs.schedule(topologies, cluster1);
newAssignment = cluster1.getAssignmentById(topology1.getId());
newExecutorToSlot = newAssignment.getExecutorToSlot();
for (ExecutorDetails executor : existingExecutors) {
Assert.assertEquals(copyOfOldMapping.get(executor), newExecutorToSlot.get(executor));
}
Assert.assertEquals("Fully Scheduled", cluster1.getStatusMap().get(topology1.getId()));
// end of Test3
// Test4: Scheduling a new topology does not disturb other assignments unnecessarily
cluster1 = new Cluster(iNimbus, supMap, new HashMap<String, SchedulerAssignmentImpl>(), config1);
topoMap = new HashMap<>();
topoMap.put(topology1.getId(), topology1);
topologies = new Topologies(topoMap);
rs.schedule(topologies, cluster1);
assignment = (SchedulerAssignmentImpl) cluster1.getAssignmentById(topology1.getId());
executorToSlot = assignment.getExecutorToSlot();
copyOfOldMapping = new HashMap<>(executorToSlot);
topoMap.put(topology2.getId(), topology2);
topologies = new Topologies(topoMap);
rs.schedule(topologies, cluster1);
newAssignment = (SchedulerAssignmentImpl) cluster1.getAssignmentById(topology1.getId());
newExecutorToSlot = newAssignment.getExecutorToSlot();
for (ExecutorDetails executor : copyOfOldMapping.keySet()) {
Assert.assertEquals(copyOfOldMapping.get(executor), newExecutorToSlot.get(executor));
}
Assert.assertEquals("Running - Fully Scheduled by DefaultResourceAwareStrategy", cluster1.getStatusMap().get(topology1.getId()));
Assert.assertEquals("Running - Fully Scheduled by DefaultResourceAwareStrategy", cluster1.getStatusMap().get(topology2.getId()));
}
Aggregations