use of org.apache.storm.scheduler.Cluster in project storm by apache.
the class TestGenericResourceAwareSchedulingPriorityStrategy method testDefaultSchedulingPriorityStrategyEvicting.
/*
* DefaultSchedulingPriorityStrategy does not take generic resources into account when calculating score
* So even if a user is requesting a lot of generic resources other than CPU and memory, scheduler will still score it very low and kick out other topologies
*
* Ethan asks for medium cpu and memory while Rui asks for little cpu and memory but heavy generic resource
* However, Rui's generic request can not be met and default scoring system is not taking generic resources into account,
* so the score of Rui's new topo will be much lower than all Ethan's topos'.
* Then all Ethan's topo will be evicted in trying to make rooms for Rui.
*/
@Test
public void testDefaultSchedulingPriorityStrategyEvicting() {
Map<String, Double> requestedgenericResourcesMap = new HashMap<>();
requestedgenericResourcesMap.put("generic.resource.1", 40.0);
Config ruiConf = createGrasClusterConfig(10, 10, 10, null, requestedgenericResourcesMap);
Config ethanConf = createGrasClusterConfig(60, 200, 300, null, Collections.emptyMap());
Topologies topologies = new Topologies(genTopology("ethan-topo-1", ethanConf, 1, 0, 1, 0, currentTime - 2, 10, "ethan"), genTopology("ethan-topo-2", ethanConf, 1, 0, 1, 0, currentTime - 2, 20, "ethan"), genTopology("ethan-topo-3", ethanConf, 1, 0, 1, 0, currentTime - 2, 28, "ethan"), genTopology("ethan-topo-4", ethanConf, 1, 0, 1, 0, currentTime - 2, 29, "ethan"));
Topologies withNewTopo = addTopologies(topologies, genTopology("rui-topo-1", ruiConf, 1, 0, 5, 0, currentTime - 2, 10, "rui"));
Config config = mkClusterConfig(DefaultSchedulingPriorityStrategy.class.getName());
Cluster cluster = mkTestCluster(topologies, config);
scheduler = new ResourceAwareScheduler();
scheduler.prepare(config, new StormMetricsRegistry());
scheduler.schedule(topologies, cluster);
assertTopologiesFullyScheduled(cluster, "ethan-topo-1", "ethan-topo-2", "ethan-topo-3", "ethan-topo-4");
cluster = new Cluster(cluster, withNewTopo);
scheduler.schedule(withNewTopo, cluster);
Map<String, Set<String>> evictedTopos = ((ResourceAwareScheduler) scheduler).getEvictedTopologiesMap();
assertTopologiesFullyScheduled(cluster, "ethan-topo-1", "ethan-topo-2", "ethan-topo-3", "ethan-topo-4");
assertTopologiesBeenEvicted(cluster, collectMapValues(evictedTopos), "ethan-topo-1", "ethan-topo-2", "ethan-topo-3", "ethan-topo-4");
assertTopologiesNotScheduled(cluster, "rui-topo-1");
}
use of org.apache.storm.scheduler.Cluster in project storm by apache.
the class TestConstraintSolverStrategy method testZeroExecutorScheduling.
@Test
public void testZeroExecutorScheduling() {
ConstraintSolverStrategy cs = new ConstraintSolverStrategy();
cs.prepare(new HashMap<>());
Map<String, Object> topoConf = Utils.readDefaultConfig();
topoConf.put(Config.TOPOLOGY_RAS_CONSTRAINT_MAX_STATE_SEARCH, 1_000);
topoConf.put(Config.TOPOLOGY_RAS_ONE_EXECUTOR_PER_WORKER, false);
topoConf.put(Config.TOPOLOGY_RAS_ONE_COMPONENT_PER_WORKER, false);
TopologyDetails topo = makeTopology(topoConf, 1);
Cluster cluster = makeCluster(new Topologies(topo));
cs.schedule(cluster, topo);
LOG.info("********************* Scheduling Zero Unassigned Executors *********************");
// reschedule a fully schedule topology
cs.schedule(cluster, topo);
LOG.info("********************* End of Scheduling Zero Unassigned Executors *********************");
}
use of org.apache.storm.scheduler.Cluster in project storm by apache.
the class TestConstraintSolverStrategy method basicUnitTestWithKillAndRecover.
public void basicUnitTestWithKillAndRecover(ConstraintSolverStrategy cs, int boltParallel, int coLocationCnt) {
Map<String, Object> config = makeTestTopoConf(coLocationCnt);
cs.prepare(config);
TopologyDetails topo = makeTopology(config, boltParallel);
Topologies topologies = new Topologies(topo);
Cluster cluster = makeCluster(topologies);
LOG.info("Scheduling...");
SchedulingResult result = cs.schedule(cluster, topo);
LOG.info("Done scheduling {}...", result);
Assert.assertTrue("Assert scheduling topology success " + result, result.isSuccess());
Assert.assertEquals("Assert no unassigned executors, found unassigned: " + cluster.getUnassignedExecutors(topo), 0, cluster.getUnassignedExecutors(topo).size());
Assert.assertTrue("Valid Scheduling?", ConstraintSolverStrategy.validateSolution(cluster, topo));
LOG.info("Slots Used {}", cluster.getAssignmentById(topo.getId()).getSlots());
LOG.info("Assignment {}", cluster.getAssignmentById(topo.getId()).getSlotToExecutors());
// simulate worker loss
SchedulerAssignment assignment = cluster.getAssignmentById(topo.getId());
Set<WorkerSlot> slotsToDelete = new HashSet<>();
Set<WorkerSlot> slots = assignment.getSlots();
int i = 0;
for (WorkerSlot slot : slots) {
if (i % 2 == 0) {
slotsToDelete.add(slot);
}
i++;
}
LOG.info("KILL WORKER(s) {}", slotsToDelete);
for (WorkerSlot slot : slotsToDelete) {
cluster.freeSlot(slot);
}
cs = new ConstraintSolverStrategy();
cs.prepare(config);
LOG.info("Scheduling again...");
result = cs.schedule(cluster, topo);
LOG.info("Done scheduling {}...", result);
Assert.assertTrue("Assert scheduling topology success " + result, result.isSuccess());
Assert.assertEquals("topo all executors scheduled?", 0, cluster.getUnassignedExecutors(topo).size());
Assert.assertTrue("Valid Scheduling?", ConstraintSolverStrategy.validateSolution(cluster, topo));
}
use of org.apache.storm.scheduler.Cluster in project storm by apache.
the class TestConstraintSolverStrategy method testConstraintSolverForceBacktrackWithSpreadCoLocation.
@Test
public void testConstraintSolverForceBacktrackWithSpreadCoLocation() {
// to find an answer.
if (CO_LOCATION_CNT > 1 && !consolidatedConfigFlag) {
LOG.info("INFO: Skipping Test {} with {}={} (required 1), and consolidatedConfigFlag={} (required false)", "testConstraintSolverForceBacktrackWithSpreadCoLocation", ConstraintSolverConfig.CONSTRAINT_TYPE_MAX_NODE_CO_LOCATION_CNT, CO_LOCATION_CNT, consolidatedConfigFlag);
return;
}
ConstraintSolverStrategy cs = new ConstraintSolverStrategy() {
protected void prepareForScheduling(Cluster cluster, TopologyDetails topologyDetails) {
super.prepareForScheduling(cluster, topologyDetails);
// set a reversing execSorter instance
IExecSorter execSorter = new ExecSorterByConstraintSeverity(cluster, topologyDetails) {
@Override
public List<ExecutorDetails> sortExecutors(Set<ExecutorDetails> unassignedExecutors) {
List<ExecutorDetails> tmp = super.sortExecutors(unassignedExecutors);
List<ExecutorDetails> reversed = new ArrayList<>();
while (!tmp.isEmpty()) {
reversed.add(0, tmp.remove(0));
}
return reversed;
}
};
setExecSorter(execSorter);
}
};
basicUnitTestWithKillAndRecover(cs, BACKTRACK_BOLT_PARALLEL, CO_LOCATION_CNT);
}
use of org.apache.storm.scheduler.Cluster in project storm by apache.
the class TestDefaultResourceAwareStrategy method testMultipleRacks.
/**
* Test whether strategy will choose correct rack
*/
@Test
public void testMultipleRacks() {
final Map<String, SupervisorDetails> supMap = new HashMap<>();
final Map<String, SupervisorDetails> supMapRack0 = genSupervisors(10, 4, 0, 400, 8000);
// generate another rack of supervisors with less resources
final Map<String, SupervisorDetails> supMapRack1 = genSupervisors(10, 4, 10, 200, 4000);
// generate some supervisors that are depleted of one resource
final Map<String, SupervisorDetails> supMapRack2 = genSupervisors(10, 4, 20, 0, 8000);
// generate some that has alot of memory but little of cpu
final Map<String, SupervisorDetails> supMapRack3 = genSupervisors(10, 4, 30, 10, 8000 * 2 + 4000);
// generate some that has alot of cpu but little of memory
final Map<String, SupervisorDetails> supMapRack4 = genSupervisors(10, 4, 40, 400 + 200 + 10, 1000);
// Generate some that have neither resource, to verify that the strategy will prioritize this last
// Also put a generic resource with 0 value in the resources list, to verify that it doesn't affect the sorting
final Map<String, SupervisorDetails> supMapRack5 = genSupervisors(10, 4, 50, 0.0, 0.0, Collections.singletonMap("gpu.count", 0.0));
supMap.putAll(supMapRack0);
supMap.putAll(supMapRack1);
supMap.putAll(supMapRack2);
supMap.putAll(supMapRack3);
supMap.putAll(supMapRack4);
supMap.putAll(supMapRack5);
Config config = createClusterConfig(100, 500, 500, null);
config.put(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB, Double.MAX_VALUE);
INimbus iNimbus = new INimbusTest();
// create test DNSToSwitchMapping plugin
DNSToSwitchMapping TestNetworkTopographyPlugin = new TestDNSToSwitchMapping(supMapRack0, supMapRack1, supMapRack2, supMapRack3, supMapRack4, supMapRack5);
// generate topologies
TopologyDetails topo1 = genTopology("topo-1", config, 8, 0, 2, 0, CURRENT_TIME - 2, 10, "user");
TopologyDetails topo2 = genTopology("topo-2", config, 8, 0, 2, 0, CURRENT_TIME - 2, 10, "user");
Topologies topologies = new Topologies(topo1, topo2);
Cluster cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, config);
List<String> supHostnames = new LinkedList<>();
for (SupervisorDetails sup : supMap.values()) {
supHostnames.add(sup.getHost());
}
Map<String, List<String>> rackToNodes = new HashMap<>();
Map<String, String> resolvedSuperVisors = TestNetworkTopographyPlugin.resolve(supHostnames);
for (Map.Entry<String, String> entry : resolvedSuperVisors.entrySet()) {
String hostName = entry.getKey();
String rack = entry.getValue();
rackToNodes.computeIfAbsent(rack, rid -> new ArrayList<>()).add(hostName);
}
cluster.setNetworkTopography(rackToNodes);
DefaultResourceAwareStrategyOld rs = new DefaultResourceAwareStrategyOld();
rs.prepareForScheduling(cluster, topo1);
INodeSorter nodeSorter = new NodeSorterHostProximity(cluster, topo1, BaseResourceAwareStrategy.NodeSortType.DEFAULT_RAS);
nodeSorter.prepare(null);
Iterable<ObjectResourcesItem> sortedRacks = nodeSorter.getSortedRacks();
Iterator<ObjectResourcesItem> it = sortedRacks.iterator();
// Ranked first since rack-0 has the most balanced set of resources
Assert.assertEquals("rack-0 should be ordered first", "rack-0", it.next().id);
// Ranked second since rack-1 has a balanced set of resources but less than rack-0
Assert.assertEquals("rack-1 should be ordered second", "rack-1", it.next().id);
// Ranked third since rack-4 has a lot of cpu but not a lot of memory
Assert.assertEquals("rack-4 should be ordered third", "rack-4", it.next().id);
// Ranked fourth since rack-3 has alot of memory but not cpu
Assert.assertEquals("rack-3 should be ordered fourth", "rack-3", it.next().id);
// Ranked fifth since rack-2 has not cpu resources
Assert.assertEquals("rack-2 should be ordered fifth", "rack-2", it.next().id);
// Ranked last since rack-5 has neither CPU nor memory available
assertEquals("Rack-5 should be ordered sixth", "rack-5", it.next().id);
SchedulingResult schedulingResult = rs.schedule(cluster, topo1);
assert (schedulingResult.isSuccess());
SchedulerAssignment assignment = cluster.getAssignmentById(topo1.getId());
for (WorkerSlot ws : assignment.getSlotToExecutors().keySet()) {
// make sure all workers on scheduled in rack-0
Assert.assertEquals("assert worker scheduled on rack-0", "rack-0", resolvedSuperVisors.get(rs.idToNode(ws.getNodeId()).getHostname()));
}
Assert.assertEquals("All executors in topo-1 scheduled", 0, cluster.getUnassignedExecutors(topo1).size());
// Test if topology is already partially scheduled on one rack
Iterator<ExecutorDetails> executorIterator = topo2.getExecutors().iterator();
List<String> nodeHostnames = rackToNodes.get("rack-1");
for (int i = 0; i < topo2.getExecutors().size() / 2; i++) {
String nodeHostname = nodeHostnames.get(i % nodeHostnames.size());
RasNode node = rs.hostnameToNodes(nodeHostname).get(0);
WorkerSlot targetSlot = node.getFreeSlots().iterator().next();
ExecutorDetails targetExec = executorIterator.next();
// to keep track of free slots
node.assign(targetSlot, topo2, Arrays.asList(targetExec));
}
rs = new DefaultResourceAwareStrategyOld();
// schedule topo2
schedulingResult = rs.schedule(cluster, topo2);
assert (schedulingResult.isSuccess());
assignment = cluster.getAssignmentById(topo2.getId());
for (WorkerSlot ws : assignment.getSlotToExecutors().keySet()) {
// make sure all workers on scheduled in rack-1
Assert.assertEquals("assert worker scheduled on rack-1", "rack-1", resolvedSuperVisors.get(rs.idToNode(ws.getNodeId()).getHostname()));
}
Assert.assertEquals("All executors in topo-2 scheduled", 0, cluster.getUnassignedExecutors(topo2).size());
}
Aggregations