use of org.apache.storm.scheduler.INimbus in project storm by apache.
the class TestNodeSorterHostProximity method testMultipleRacksWithHostProximity.
/**
* Test if hosts are presented together regardless of resource availability.
* Supervisors are created with multiple Numa zones in such a manner that resources on two numa zones on the same host
* differ widely in resource availability.
*/
@Test
public void testMultipleRacksWithHostProximity() {
final Map<String, SupervisorDetails> supMap = new HashMap<>();
final int numRacks = 1;
final int numSupersPerRack = 12;
final int numPortsPerSuper = 4;
final int numZonesPerHost = 3;
final double numaResourceMultiplier = 0.4;
int rackStartNum = 0;
int supStartNum = 0;
final Map<String, SupervisorDetails> supMapRack0 = genSupervisorsWithRacksAndNuma(numRacks, numSupersPerRack, numZonesPerHost, numPortsPerSuper, rackStartNum++, supStartNum, 400, 8000, Collections.emptyMap(), numaResourceMultiplier);
// generate another rack of supervisors with less resources
supStartNum += numSupersPerRack;
final Map<String, SupervisorDetails> supMapRack1 = genSupervisorsWithRacksAndNuma(numRacks, numSupersPerRack, numZonesPerHost, numPortsPerSuper, rackStartNum++, supStartNum, 200, 4000, Collections.emptyMap(), numaResourceMultiplier);
// generate some supervisors that are depleted of one resource
supStartNum += numSupersPerRack;
final Map<String, SupervisorDetails> supMapRack2 = genSupervisorsWithRacksAndNuma(numRacks, numSupersPerRack, numZonesPerHost, numPortsPerSuper, rackStartNum++, supStartNum, 0, 8000, Collections.emptyMap(), numaResourceMultiplier);
// generate some that has a lot of memory but little of cpu
supStartNum += numSupersPerRack;
final Map<String, SupervisorDetails> supMapRack3 = genSupervisorsWithRacksAndNuma(numRacks, numSupersPerRack, numZonesPerHost, numPortsPerSuper, rackStartNum++, supStartNum, 10, 8000 * 2 + 4000, Collections.emptyMap(), numaResourceMultiplier);
// generate some that has a lot of cpu but little of memory
supStartNum += numSupersPerRack;
final Map<String, SupervisorDetails> supMapRack4 = genSupervisorsWithRacksAndNuma(numRacks, numSupersPerRack, numZonesPerHost, numPortsPerSuper, rackStartNum++, supStartNum, 400 + 200 + 10, 1000, Collections.emptyMap(), numaResourceMultiplier);
supMap.putAll(supMapRack0);
supMap.putAll(supMapRack1);
supMap.putAll(supMapRack2);
supMap.putAll(supMapRack3);
supMap.putAll(supMapRack4);
Config config = createClusterConfig(100, 500, 500, null);
config.put(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB, Double.MAX_VALUE);
INimbus iNimbus = new INimbusTest();
// create test DNSToSwitchMapping plugin
TestDNSToSwitchMapping testDNSToSwitchMapping = new TestDNSToSwitchMapping(supMapRack0, supMapRack1, supMapRack2, supMapRack3, supMapRack4);
Config t1Conf = new Config();
t1Conf.putAll(config);
final List<String> t1FavoredHostNames = Arrays.asList("host-41", "host-42", "host-43");
t1Conf.put(Config.TOPOLOGY_SCHEDULER_FAVORED_NODES, t1FavoredHostNames);
final List<String> t1UnfavoredHostIds = Arrays.asList("host-1", "host-2", "host-3");
t1Conf.put(Config.TOPOLOGY_SCHEDULER_UNFAVORED_NODES, t1UnfavoredHostIds);
// generate topologies
TopologyDetails topo1 = genTopology("topo-1", t1Conf, 8, 0, 2, 0, CURRENT_TIME - 2, 10, "user");
Config t2Conf = new Config();
t2Conf.putAll(config);
t2Conf.put(Config.TOPOLOGY_SCHEDULER_FAVORED_NODES, Arrays.asList("host-31", "host-32", "host-33"));
t2Conf.put(Config.TOPOLOGY_SCHEDULER_UNFAVORED_NODES, Arrays.asList("host-11", "host-12", "host-13"));
TopologyDetails topo2 = genTopology("topo-2", t2Conf, 8, 0, 2, 0, CURRENT_TIME - 2, 10, "user");
Topologies topologies = new Topologies(topo1, topo2);
Cluster cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, config);
cluster.setNetworkTopography(testDNSToSwitchMapping.getRackToHosts());
INodeSorter nodeSorter = new NodeSorterHostProximity(cluster, topo1);
nodeSorter.prepare(null);
Set<String> seenHosts = new HashSet<>();
String prevHost = null;
List<String> errLines = new ArrayList();
Map<String, String> nodeToHost = new RasNodes(cluster).getNodeIdToHostname();
for (String nodeId : nodeSorter.sortAllNodes()) {
String host = nodeToHost.getOrDefault(nodeId, "no-host-for-node-" + nodeId);
errLines.add(String.format("\tnodeId:%s, host:%s", nodeId, host));
if (!host.equals(prevHost) && seenHosts.contains(host)) {
String err = String.format("Host %s for node %s is out of order:\n\t%s", host, nodeId, String.join("\n\t", errLines));
Assert.fail(err);
}
seenHosts.add(host);
prevHost = host;
}
}
use of org.apache.storm.scheduler.INimbus in project storm by apache.
the class TestNodeSorterHostProximity method testWithBlackListedHosts.
/**
* Black list all nodes for a rack before sorting nodes.
* Confirm that {@link NodeSorterHostProximity#sortAllNodes()} still works.
*/
@Test
void testWithBlackListedHosts() {
INimbus iNimbus = new INimbusTest();
double compPcore = 100;
double compOnHeap = 775;
double compOffHeap = 25;
int topo1NumSpouts = 1;
int topo1NumBolts = 5;
int topo1SpoutParallelism = 100;
int topo1BoltParallelism = 200;
final int numSupersPerRack = 10;
final int numPortsPerSuper = 66;
long compPerRack = (topo1NumSpouts * topo1SpoutParallelism + topo1NumBolts * topo1BoltParallelism + 10);
long compPerSuper = compPerRack / numSupersPerRack;
double cpuPerSuper = compPcore * compPerSuper;
double memPerSuper = (compOnHeap + compOffHeap) * compPerSuper;
double topo1MaxHeapSize = memPerSuper;
final String topoName1 = "topology1";
int numRacks = 3;
Map<String, SupervisorDetails> supMap = genSupervisorsWithRacks(numRacks, numSupersPerRack, numPortsPerSuper, 0, 0, cpuPerSuper, memPerSuper, new HashMap<>());
TestDNSToSwitchMapping testDNSToSwitchMapping = new TestDNSToSwitchMapping(supMap.values());
Config config = new Config();
config.putAll(createGrasClusterConfig(compPcore, compOnHeap, compOffHeap, null, null));
config.put(Config.TOPOLOGY_SCHEDULER_STRATEGY, GenericResourceAwareStrategy.class.getName());
IScheduler scheduler = new ResourceAwareScheduler();
scheduler.prepare(config, new StormMetricsRegistry());
TopologyDetails td1 = genTopology(topoName1, config, topo1NumSpouts, topo1NumBolts, topo1SpoutParallelism, topo1BoltParallelism, 0, 0, "user", topo1MaxHeapSize);
Topologies topologies = new Topologies(td1);
Cluster cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, config);
cluster.setNetworkTopography(testDNSToSwitchMapping.getRackToHosts());
Map<String, List<String>> networkTopography = cluster.getNetworkTopography();
assertEquals("Expecting " + numRacks + " racks found " + networkTopography.size(), numRacks, networkTopography.size());
assertTrue("Expecting racks count to be >= 3, found " + networkTopography.size(), networkTopography.size() >= 3);
Set<String> blackListedHosts = new HashSet<>();
List<SupervisorDetails> supArray = new ArrayList<>(supMap.values());
for (int i = 0; i < numSupersPerRack; i++) {
blackListedHosts.add(supArray.get(i).getHost());
}
blacklistHostsAndSortNodes(blackListedHosts, supMap.values(), cluster, td1);
String rackToClear = cluster.getNetworkTopography().keySet().stream().findFirst().get();
blackListedHosts = new HashSet<>(cluster.getNetworkTopography().get(rackToClear));
blacklistHostsAndSortNodes(blackListedHosts, supMap.values(), cluster, td1);
}
use of org.apache.storm.scheduler.INimbus in project storm by apache.
the class TestNodeSorterHostProximity method testWithImpairedClusterNetworkTopography.
/**
* Assign and then clear out a rack to host list mapping in cluster.networkTopography.
* Expected behavior is that:
* <li>the rack without hosts does not show up in {@link NodeSorterHostProximity#getSortedRacks()}</li>
* <li>all the supervisor nodes still get returned in {@link NodeSorterHostProximity#sortAllNodes()} ()}</li>
* <li>supervisors on cleared rack show up under {@link DNSToSwitchMapping#DEFAULT_RACK}</li>
*
* <p>
* Force an usual condition, where one of the racks is still passed to LazyNodeSortingIterator with
* an empty list and then ensure that code is resilient.
* </p>
*/
@Test
void testWithImpairedClusterNetworkTopography() {
INimbus iNimbus = new INimbusTest();
double compPcore = 100;
double compOnHeap = 775;
double compOffHeap = 25;
int topo1NumSpouts = 1;
int topo1NumBolts = 5;
int topo1SpoutParallelism = 100;
int topo1BoltParallelism = 200;
final int numSupersPerRack = 10;
final int numPortsPerSuper = 66;
long compPerRack = (topo1NumSpouts * topo1SpoutParallelism + topo1NumBolts * topo1BoltParallelism + 10);
long compPerSuper = compPerRack / numSupersPerRack;
double cpuPerSuper = compPcore * compPerSuper;
double memPerSuper = (compOnHeap + compOffHeap) * compPerSuper;
double topo1MaxHeapSize = memPerSuper;
final String topoName1 = "topology1";
int numRacks = 3;
Map<String, SupervisorDetails> supMap = genSupervisorsWithRacks(numRacks, numSupersPerRack, numPortsPerSuper, 0, 0, cpuPerSuper, memPerSuper, new HashMap<>());
TestDNSToSwitchMapping testDNSToSwitchMapping = new TestDNSToSwitchMapping(supMap.values());
Config config = new Config();
config.putAll(createGrasClusterConfig(compPcore, compOnHeap, compOffHeap, null, null));
config.put(Config.TOPOLOGY_SCHEDULER_STRATEGY, GenericResourceAwareStrategy.class.getName());
IScheduler scheduler = new ResourceAwareScheduler();
scheduler.prepare(config, new StormMetricsRegistry());
TopologyDetails td1 = genTopology(topoName1, config, topo1NumSpouts, topo1NumBolts, topo1SpoutParallelism, topo1BoltParallelism, 0, 0, "user", topo1MaxHeapSize);
Topologies topologies = new Topologies(td1);
Cluster cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, config);
cluster.setNetworkTopography(testDNSToSwitchMapping.getRackToHosts());
Map<String, List<String>> networkTopography = cluster.getNetworkTopography();
assertEquals("Expecting " + numRacks + " racks found " + networkTopography.size(), numRacks, networkTopography.size());
assertTrue("Expecting racks count to be >= 3, found " + networkTopography.size(), networkTopography.size() >= 3);
// Impair cluster.networkTopography and set one rack to have zero hosts, getSortedRacks should exclude this rack.
// Keep, the supervisorDetails unchanged - confirm that these nodes are not lost even with incomplete networkTopography
String rackIdToZero = networkTopography.keySet().stream().findFirst().get();
impairClusterRack(cluster, rackIdToZero, true, false);
NodeSorterHostProximity nodeSorterHostProximity = new NodeSorterHostProximity(cluster, td1);
nodeSorterHostProximity.getSortedRacks().forEach(x -> assertNotEquals(x.id, rackIdToZero));
// confirm that the above action has not lost the hosts and that they appear under the DEFAULT rack
{
Set<String> seenRacks = new HashSet<>();
nodeSorterHostProximity.getSortedRacks().forEach(x -> seenRacks.add(x.id));
assertEquals("Expecting rack cnt to be still " + numRacks, numRacks, seenRacks.size());
assertTrue("Expecting to see default-rack=" + DNSToSwitchMapping.DEFAULT_RACK + " in sortedRacks", seenRacks.contains(DNSToSwitchMapping.DEFAULT_RACK));
}
// now check if node/supervisor is missing when sorting all nodes
Set<String> expectedNodes = supMap.keySet();
Set<String> seenNodes = new HashSet<>();
nodeSorterHostProximity.prepare(null);
nodeSorterHostProximity.sortAllNodes().forEach(n -> seenNodes.add(n));
assertEquals("Expecting see all supervisors ", expectedNodes, seenNodes);
// Now fully impair the cluster - confirm no default rack
{
cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, config);
cluster.setNetworkTopography(new TestDNSToSwitchMapping(supMap.values()).getRackToHosts());
impairClusterRack(cluster, rackIdToZero, true, true);
Set<String> seenRacks = new HashSet<>();
NodeSorterHostProximity nodeSorterHostProximity2 = new NodeSorterHostProximity(cluster, td1);
nodeSorterHostProximity2.getSortedRacks().forEach(x -> seenRacks.add(x.id));
Map<String, Set<String>> rackIdToHosts = nodeSorterHostProximity2.getRackIdToHosts();
String dumpOfRacks = rackIdToHosts.entrySet().stream().map(x -> String.format("rack %s -> hosts [%s]", x.getKey(), String.join(",", x.getValue()))).collect(Collectors.joining("\n\t"));
assertEquals("Expecting rack cnt to be " + (numRacks - 1) + " but found " + seenRacks.size() + "\n\t" + dumpOfRacks, numRacks - 1, seenRacks.size());
assertFalse("Found default-rack=" + DNSToSwitchMapping.DEFAULT_RACK + " in \n\t" + dumpOfRacks, seenRacks.contains(DNSToSwitchMapping.DEFAULT_RACK));
}
}
use of org.apache.storm.scheduler.INimbus in project storm by apache.
the class TestNodeSorterHostProximity method testMultipleRacks.
/**
* Test whether strategy will choose correct rack.
*/
@Test
public void testMultipleRacks() {
final Map<String, SupervisorDetails> supMap = new HashMap<>();
final int numRacks = 1;
final int numSupersPerRack = 10;
final int numPortsPerSuper = 4;
final int numZonesPerHost = 1;
final double numaResourceMultiplier = 1.0;
int rackStartNum = 0;
int supStartNum = 0;
final Map<String, SupervisorDetails> supMapRack0 = genSupervisorsWithRacksAndNuma(numRacks, numSupersPerRack, numZonesPerHost, numPortsPerSuper, rackStartNum++, supStartNum, 400, 8000, Collections.emptyMap(), numaResourceMultiplier);
// generate another rack of supervisors with less resources
supStartNum += numSupersPerRack;
final Map<String, SupervisorDetails> supMapRack1 = genSupervisorsWithRacksAndNuma(numRacks, numSupersPerRack, numZonesPerHost, numPortsPerSuper, rackStartNum++, supStartNum, 200, 4000, Collections.emptyMap(), numaResourceMultiplier);
// generate some supervisors that are depleted of one resource
supStartNum += numSupersPerRack;
final Map<String, SupervisorDetails> supMapRack2 = genSupervisorsWithRacksAndNuma(numRacks, numSupersPerRack, numZonesPerHost, numPortsPerSuper, rackStartNum++, supStartNum, 0, 8000, Collections.emptyMap(), numaResourceMultiplier);
// generate some that has a lot of memory but little of cpu
supStartNum += numSupersPerRack;
final Map<String, SupervisorDetails> supMapRack3 = genSupervisorsWithRacksAndNuma(numRacks, numSupersPerRack, numZonesPerHost, numPortsPerSuper, rackStartNum++, supStartNum, 10, 8000 * 2 + 4000, Collections.emptyMap(), numaResourceMultiplier);
// generate some that has a lot of cpu but little of memory
supStartNum += numSupersPerRack;
final Map<String, SupervisorDetails> supMapRack4 = genSupervisorsWithRacksAndNuma(numRacks, numSupersPerRack, numZonesPerHost, numPortsPerSuper, rackStartNum++, supStartNum, 400 + 200 + 10, 1000, Collections.emptyMap(), numaResourceMultiplier);
// Generate some that have neither resource, to verify that the strategy will prioritize this last
// Also put a generic resource with 0 value in the resources list, to verify that it doesn't affect the sorting
supStartNum += numSupersPerRack;
final Map<String, SupervisorDetails> supMapRack5 = genSupervisorsWithRacksAndNuma(numRacks, numSupersPerRack, numZonesPerHost, numPortsPerSuper, rackStartNum++, supStartNum, 0.0, 0.0, Collections.singletonMap("gpu.count", 0.0), numaResourceMultiplier);
supMap.putAll(supMapRack0);
supMap.putAll(supMapRack1);
supMap.putAll(supMapRack2);
supMap.putAll(supMapRack3);
supMap.putAll(supMapRack4);
supMap.putAll(supMapRack5);
Config config = createClusterConfig(100, 500, 500, null);
config.put(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB, Double.MAX_VALUE);
INimbus iNimbus = new INimbusTest();
// create test DNSToSwitchMapping plugin
TestDNSToSwitchMapping testDNSToSwitchMapping = new TestDNSToSwitchMapping(supMapRack0, supMapRack1, supMapRack2, supMapRack3, supMapRack4, supMapRack5);
// generate topologies
TopologyDetails topo1 = genTopology("topo-1", config, 8, 0, 2, 0, CURRENT_TIME - 2, 10, "user");
TopologyDetails topo2 = genTopology("topo-2", config, 8, 0, 2, 0, CURRENT_TIME - 2, 10, "user");
Topologies topologies = new Topologies(topo1, topo2);
Cluster cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, config);
List<String> supHostnames = new LinkedList<>();
for (SupervisorDetails sup : supMap.values()) {
supHostnames.add(sup.getHost());
}
Map<String, List<String>> rackToHosts = testDNSToSwitchMapping.getRackToHosts();
cluster.setNetworkTopography(rackToHosts);
NodeSorterHostProximity nodeSorter = new NodeSorterHostProximity(cluster, topo1, BaseResourceAwareStrategy.NodeSortType.DEFAULT_RAS);
nodeSorter.prepare(null);
List<ObjectResourcesItem> sortedRacks = StreamSupport.stream(nodeSorter.getSortedRacks().spliterator(), false).collect(Collectors.toList());
String rackSummaries = sortedRacks.stream().map(x -> String.format("Rack %s -> scheduled-cnt %d, min-avail %f, avg-avail %f, cpu %f, mem %f", x.id, nodeSorter.getScheduledExecCntByRackId().getOrDefault(x.id, new AtomicInteger(-1)).get(), x.minResourcePercent, x.avgResourcePercent, x.availableResources.getTotalCpu(), x.availableResources.getTotalMemoryMb())).collect(Collectors.joining("\n\t"));
Assert.assertEquals(rackSummaries + "\n# of racks sorted", 6, sortedRacks.size());
Iterator<ObjectResourcesItem> it = sortedRacks.iterator();
Assert.assertEquals(rackSummaries + "\nrack-000 should be ordered first since it has the most balanced set of resources", "rack-000", it.next().id);
Assert.assertEquals(rackSummaries + "\nrack-001 should be ordered second since it has a balanced set of resources but less than rack-000", "rack-001", it.next().id);
Assert.assertEquals(rackSummaries + "\nrack-004 should be ordered third since it has a lot of cpu but not a lot of memory", "rack-004", it.next().id);
Assert.assertEquals(rackSummaries + "\nrack-003 should be ordered fourth since it has a lot of memory but not cpu", "rack-003", it.next().id);
Assert.assertEquals(rackSummaries + "\nrack-002 should be ordered fifth since it has not cpu resources", "rack-002", it.next().id);
Assert.assertEquals(rackSummaries + "\nRack-005 should be ordered sixth since it has neither CPU nor memory available", "rack-005", it.next().id);
}
use of org.apache.storm.scheduler.INimbus in project storm by apache.
the class TestResourceAwareScheduler method minCpuPreventsThirdTopo.
/**
* Min CPU for worker set to 40%. 1 supervisor with 100% CPU.
* 2 topologies with 2 10% components should schedule. A third topology should then fail scheduling due to lack of CPU.
*/
@Test
public void minCpuPreventsThirdTopo() {
INimbus iNimbus = new INimbusTest();
Map<String, SupervisorDetails> supMap = genSupervisors(1, 4, 100, 60000);
Config config = createClusterConfig(10, 500, 500, null);
config.put(DaemonConfig.STORM_WORKER_MIN_CPU_PCORE_PERCENT, 40.0);
TopologyDetails topo1 = genTopology("topo-1", config, 2, 0, 1, 1, currentTime - 2, 20, "jerry");
TopologyDetails topo2 = genTopology("topo-2", config, 2, 0, 1, 1, currentTime - 2, 20, "jerry");
TopologyDetails topo3 = genTopology("topo-3", config, 2, 0, 1, 1, currentTime - 2, 20, "jerry");
Topologies topologies = new Topologies(topo1, topo2, topo3);
Cluster cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<String, SchedulerAssignmentImpl>(), topologies, config);
scheduler = new ResourceAwareScheduler();
scheduler.prepare(config, new StormMetricsRegistry());
scheduler.schedule(topologies, cluster);
assertFalse(cluster.needsSchedulingRas(topo1));
assertFalse(cluster.needsSchedulingRas(topo2));
assertTrue(cluster.needsSchedulingRas(topo3));
assertTrue("topo-1 scheduled?", cluster.getAssignmentById(topo1.getId()) != null);
assertTrue("topo-2 scheduled?", cluster.getAssignmentById(topo2.getId()) != null);
assertFalse("topo-3 unscheduled?", cluster.getAssignmentById(topo3.getId()) != null);
SchedulerAssignment assignment1 = cluster.getAssignmentById(topo1.getId());
assertEquals(1, assignment1.getSlots().size());
Map<WorkerSlot, WorkerResources> assignedSlots1 = assignment1.getScheduledResources();
double assignedCpu = 0.0;
for (Entry<WorkerSlot, WorkerResources> entry : assignedSlots1.entrySet()) {
WorkerResources wr = entry.getValue();
assignedCpu += wr.get_cpu();
}
assertEquals(40.0, assignedCpu, 0.001);
SchedulerAssignment assignment2 = cluster.getAssignmentById(topo2.getId());
assertEquals(1, assignment2.getSlots().size());
Map<WorkerSlot, WorkerResources> assignedSlots2 = assignment2.getScheduledResources();
assignedCpu = 0.0;
for (Entry<WorkerSlot, WorkerResources> entry : assignedSlots2.entrySet()) {
WorkerResources wr = entry.getValue();
assignedCpu += wr.get_cpu();
}
assertEquals(40.0, assignedCpu, 0.001);
}
Aggregations