use of org.apache.storm.scheduler.resource.normalization.NormalizedResourceRequest in project storm by apache.
the class TestNodeSorterHostProximity method testMultipleRacksOrderedByCapacity.
/**
* Racks should be returned in order of decreasing capacity.
*/
@Test
public void testMultipleRacksOrderedByCapacity() {
final Map<String, SupervisorDetails> supMap = new HashMap<>();
final int numRacks = 1;
final int numSupersPerRack = 10;
final int numPortsPerSuper = 4;
final int numZonesPerHost = 1;
final double numaResourceMultiplier = 1.0;
int rackStartNum = 0;
int supStartNum = 0;
final Map<String, SupervisorDetails> supMapRack0 = genSupervisorsWithRacksAndNuma(numRacks, numSupersPerRack, numZonesPerHost, numPortsPerSuper, rackStartNum++, supStartNum, 600, 8000 - rackStartNum, Collections.emptyMap(), numaResourceMultiplier);
supStartNum += numSupersPerRack;
final Map<String, SupervisorDetails> supMapRack1 = genSupervisorsWithRacksAndNuma(numRacks, numSupersPerRack, numZonesPerHost, numPortsPerSuper, rackStartNum++, supStartNum, 500, 8000 - rackStartNum, Collections.emptyMap(), numaResourceMultiplier);
supStartNum += numSupersPerRack;
final Map<String, SupervisorDetails> supMapRack2 = genSupervisorsWithRacksAndNuma(numRacks, numSupersPerRack, numZonesPerHost, numPortsPerSuper, rackStartNum++, supStartNum, 400, 8000 - rackStartNum, Collections.emptyMap(), numaResourceMultiplier);
supStartNum += numSupersPerRack;
final Map<String, SupervisorDetails> supMapRack3 = genSupervisorsWithRacksAndNuma(numRacks, numSupersPerRack, numZonesPerHost, numPortsPerSuper, rackStartNum++, supStartNum, 300, 8000 - rackStartNum, Collections.emptyMap(), numaResourceMultiplier);
supStartNum += numSupersPerRack;
final Map<String, SupervisorDetails> supMapRack4 = genSupervisorsWithRacksAndNuma(numRacks, numSupersPerRack, numZonesPerHost, numPortsPerSuper, rackStartNum++, supStartNum, 200, 8000 - rackStartNum, Collections.emptyMap(), numaResourceMultiplier);
// too small to hold topology
supStartNum += numSupersPerRack;
final Map<String, SupervisorDetails> supMapRack5 = genSupervisorsWithRacksAndNuma(numRacks, numSupersPerRack, numZonesPerHost, numPortsPerSuper, rackStartNum++, supStartNum, 100, 8000 - rackStartNum, Collections.singletonMap("gpu.count", 0.0), numaResourceMultiplier);
supMap.putAll(supMapRack0);
supMap.putAll(supMapRack1);
supMap.putAll(supMapRack2);
supMap.putAll(supMapRack3);
supMap.putAll(supMapRack4);
supMap.putAll(supMapRack5);
Config config = createClusterConfig(100, 500, 500, null);
config.put(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB, Double.MAX_VALUE);
INimbus iNimbus = new INimbusTest();
// create test DNSToSwitchMapping plugin
TestDNSToSwitchMapping testDNSToSwitchMapping = new TestDNSToSwitchMapping(supMapRack0, supMapRack1, supMapRack2, supMapRack3, supMapRack4, supMapRack5);
// generate topologies
TopologyDetails topo1 = genTopology("topo-1", config, 8, 0, 2, 0, CURRENT_TIME - 2, 10, "user");
TopologyDetails topo2 = genTopology("topo-2", config, 8, 0, 2, 0, CURRENT_TIME - 2, 10, "user");
Topologies topologies = new Topologies(topo1, topo2);
Cluster cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, config);
cluster.setNetworkTopography(testDNSToSwitchMapping.getRackToHosts());
NodeSorterHostProximity nodeSorter = new NodeSorterHostProximity(cluster, topo1);
nodeSorter.prepare(null);
List<ObjectResourcesItem> sortedRacks = StreamSupport.stream(nodeSorter.getSortedRacks().spliterator(), false).collect(Collectors.toList());
String rackSummaries = sortedRacks.stream().map(x -> String.format("Rack %s -> scheduled-cnt %d, min-avail %f, avg-avail %f, cpu %f, mem %f", x.id, nodeSorter.getScheduledExecCntByRackId().getOrDefault(x.id, new AtomicInteger(-1)).get(), x.minResourcePercent, x.avgResourcePercent, x.availableResources.getTotalCpu(), x.availableResources.getTotalMemoryMb())).collect(Collectors.joining("\n\t"));
NormalizedResourceRequest topoResourceRequest = topo1.getApproximateTotalResources();
String topoRequest = String.format("Topo %s, approx-requested-resources %s", topo1.getId(), topoResourceRequest.toString());
Iterator<ObjectResourcesItem> it = sortedRacks.iterator();
Assert.assertEquals(topoRequest + "\n\t" + rackSummaries + "\nRack-000 should be ordered first since it has the largest capacity", "rack-000", it.next().id);
Assert.assertEquals(topoRequest + "\n\t" + rackSummaries + "\nrack-001 should be ordered second since it smaller than rack-000", "rack-001", it.next().id);
Assert.assertEquals(topoRequest + "\n\t" + rackSummaries + "\nrack-002 should be ordered third since it is smaller than rack-001", "rack-002", it.next().id);
Assert.assertEquals(topoRequest + "\n\t" + rackSummaries + "\nrack-003 should be ordered fourth since it since it is smaller than rack-002", "rack-003", it.next().id);
Assert.assertEquals(topoRequest + "\n\t" + rackSummaries + "\nrack-004 should be ordered fifth since it since it is smaller than rack-003", "rack-004", it.next().id);
Assert.assertEquals(topoRequest + "\n\t" + rackSummaries + "\nrack-005 should be ordered last since it since it is has smallest capacity", "rack-005", it.next().id);
}
use of org.apache.storm.scheduler.resource.normalization.NormalizedResourceRequest in project storm by apache.
the class ServerUtils method getAckerExecutorMemoryUsageForTopo.
private static double getAckerExecutorMemoryUsageForTopo(StormTopology topology, Map<String, Object> topologyConf) throws InvalidTopologyException {
topology = StormCommon.systemTopology(topologyConf, topology);
Map<String, NormalizedResourceRequest> boltResources = ResourceUtils.getBoltsResources(topology, topologyConf);
NormalizedResourceRequest entry = boltResources.get(Acker.ACKER_COMPONENT_ID);
return entry.getTotalMemoryMb();
}
use of org.apache.storm.scheduler.resource.normalization.NormalizedResourceRequest in project storm by apache.
the class NodeSorterHostProximity method sortObjectResourcesGeneric.
/**
* Sort objects by the following two criteria.
*
* <li>the number executors of the topology that needs to be scheduled is already on the
* object (node or rack) in descending order. The reasoning to sort based on criterion 1 is so we schedule the rest
* of a topology on the same object (node or rack) as the existing executors of the topology.</li>
*
* <li>the subordinate/subservient resource availability percentage of a rack in descending order We calculate the
* resource availability percentage by dividing the resource availability of the object (node or rack) by the
* resource availability of the entire rack or cluster depending on if object references a node or a rack.
* How this differs from the DefaultResourceAwareStrategy is that the percentage boosts the node or rack if it is
* requested by the executor that the sorting is being done for and pulls it down if it is not.
* By doing this calculation, objects (node or rack) that have exhausted or little of one of the resources mentioned
* above will be ranked after racks that have more balanced resource availability and nodes or racks that have
* resources that are not requested will be ranked below . So we will be less likely to pick a rack that
* have a lot of one resource but a low amount of another and have a lot of resources that are not requested by the executor.</li>
*
* @param allResources contains all individual ObjectResources as well as cumulative stats
* @param exec executor for which the sorting is done
* @param existingScheduleFunc a function to get existing executors already scheduled on this object
* @return an {@link Iterable} of sorted {@link ObjectResourcesItem}
*/
@Deprecated
private Iterable<ObjectResourcesItem> sortObjectResourcesGeneric(final ObjectResourcesSummary allResources, ExecutorDetails exec, final ExistingScheduleFunc existingScheduleFunc) {
ObjectResourcesSummary affinityBasedAllResources = new ObjectResourcesSummary(allResources);
final NormalizedResourceOffer availableResourcesOverall = allResources.getAvailableResourcesOverall();
final NormalizedResourceRequest requestedResources = (exec != null) ? topologyDetails.getTotalResources(exec) : null;
affinityBasedAllResources.getObjectResources().forEach(x -> {
if (requestedResources != null) {
// negate unrequested resources
x.availableResources.updateForRareResourceAffinity(requestedResources);
}
x.minResourcePercent = availableResourcesOverall.calculateMinPercentageUsedBy(x.availableResources);
x.avgResourcePercent = availableResourcesOverall.calculateAveragePercentageUsedBy(x.availableResources);
LOG.trace("for {}: minResourcePercent={}, avgResourcePercent={}, numExistingSchedule={}", x.id, x.minResourcePercent, x.avgResourcePercent, existingScheduleFunc.getNumExistingSchedule(x.id));
});
Comparator<ObjectResourcesItem> comparator = (o1, o2) -> {
int execsScheduled1 = existingScheduleFunc.getNumExistingSchedule(o1.id);
int execsScheduled2 = existingScheduleFunc.getNumExistingSchedule(o2.id);
if (execsScheduled1 > execsScheduled2) {
return -1;
} else if (execsScheduled1 < execsScheduled2) {
return 1;
}
double o1Avg = o1.avgResourcePercent;
double o2Avg = o2.avgResourcePercent;
if (o1Avg > o2Avg) {
return -1;
} else if (o1Avg < o2Avg) {
return 1;
}
return o1.id.compareTo(o2.id);
};
TreeSet<ObjectResourcesItem> sortedObjectResources = new TreeSet<>(comparator);
sortedObjectResources.addAll(affinityBasedAllResources.getObjectResources());
LOG.debug("Sorted Object Resources: {}", sortedObjectResources);
return sortedObjectResources;
}
use of org.apache.storm.scheduler.resource.normalization.NormalizedResourceRequest in project storm by apache.
the class NodeSorter method sortObjectResourcesGeneric.
/**
* Sort objects by the following two criteria.
*
* <li>the number executors of the topology that needs to be scheduled is already on the
* object (node or rack) in descending order. The reasoning to sort based on criterion 1 is so we schedule the rest
* of a topology on the same object (node or rack) as the existing executors of the topology.</li>
*
* <li>the subordinate/subservient resource availability percentage of a rack in descending order We calculate the
* resource availability percentage by dividing the resource availability of the object (node or rack) by the
* resource availability of the entire rack or cluster depending on if object references a node or a rack.
* How this differs from the DefaultResourceAwareStrategy is that the percentage boosts the node or rack if it is
* requested by the executor that the sorting is being done for and pulls it down if it is not.
* By doing this calculation, objects (node or rack) that have exhausted or little of one of the resources mentioned
* above will be ranked after racks that have more balanced resource availability and nodes or racks that have
* resources that are not requested will be ranked below . So we will be less likely to pick a rack that
* have a lot of one resource but a low amount of another and have a lot of resources that are not requested by the executor.</li>
*
* @param allResources contains all individual ObjectResources as well as cumulative stats
* @param exec executor for which the sorting is done
* @param existingScheduleFunc a function to get existing executors already scheduled on this object
* @return a sorted list of ObjectResources
*/
@Deprecated
private List<ObjectResourcesItem> sortObjectResourcesGeneric(final ObjectResourcesSummary allResources, ExecutorDetails exec, final ExistingScheduleFunc existingScheduleFunc) {
ObjectResourcesSummary affinityBasedAllResources = new ObjectResourcesSummary(allResources);
NormalizedResourceRequest requestedResources = topologyDetails.getTotalResources(exec);
affinityBasedAllResources.getObjectResources().forEach(x -> x.availableResources.updateForRareResourceAffinity(requestedResources));
final NormalizedResourceOffer availableResourcesOverall = allResources.getAvailableResourcesOverall();
List<ObjectResourcesItem> sortedObjectResources = new ArrayList<>();
Comparator<ObjectResourcesItem> comparator = (o1, o2) -> {
int execsScheduled1 = existingScheduleFunc.getNumExistingSchedule(o1.id);
int execsScheduled2 = existingScheduleFunc.getNumExistingSchedule(o2.id);
if (execsScheduled1 > execsScheduled2) {
return -1;
} else if (execsScheduled1 < execsScheduled2) {
return 1;
}
double o1Avg = availableResourcesOverall.calculateAveragePercentageUsedBy(o1.availableResources);
double o2Avg = availableResourcesOverall.calculateAveragePercentageUsedBy(o2.availableResources);
if (o1Avg > o2Avg) {
return -1;
} else if (o1Avg < o2Avg) {
return 1;
}
return o1.id.compareTo(o2.id);
};
sortedObjectResources.addAll(affinityBasedAllResources.getObjectResources());
sortedObjectResources.sort(comparator);
LOG.debug("Sorted Object Resources: {}", sortedObjectResources);
return sortedObjectResources;
}
use of org.apache.storm.scheduler.resource.normalization.NormalizedResourceRequest in project storm by apache.
the class RasBlacklistStrategy method releaseBlacklistWhenNeeded.
@Override
protected Set<String> releaseBlacklistWhenNeeded(Cluster cluster, final List<String> blacklistedNodeIds) {
LOG.info("RAS We have {} nodes blacklisted...", blacklistedNodeIds.size());
Set<String> readyToRemove = new HashSet<>();
if (blacklistedNodeIds.size() > 0) {
int availableSlots = cluster.getNonBlacklistedAvailableSlots(blacklistedNodeIds).size();
int neededSlots = 0;
NormalizedResourceOffer available = cluster.getNonBlacklistedClusterAvailableResources(blacklistedNodeIds);
NormalizedResourceOffer needed = new NormalizedResourceOffer();
for (TopologyDetails td : cluster.getTopologies()) {
if (cluster.needsSchedulingRas(td)) {
int slots = 0;
try {
slots = ServerUtils.getEstimatedWorkerCountForRasTopo(td.getConf(), td.getTopology());
} catch (InvalidTopologyException e) {
LOG.warn("Could not guess the number of slots needed for {}", td.getName(), e);
}
int assignedSlots = cluster.getAssignedNumWorkers(td);
int tdSlotsNeeded = slots - assignedSlots;
neededSlots += tdSlotsNeeded;
NormalizedResourceRequest resources = td.getApproximateTotalResources();
needed.add(resources);
LOG.warn("{} needs to be scheduled with {} and {} slots", td.getName(), resources, tdSlotsNeeded);
}
}
// Now we need to free up some resources...
Map<String, SupervisorDetails> availableSupervisors = cluster.getSupervisors();
NormalizedResourceOffer shortage = new NormalizedResourceOffer(needed);
shortage.remove(available, cluster.getResourceMetrics());
int shortageSlots = neededSlots - availableSlots;
LOG.debug("Need {} and {} slots.", needed, neededSlots);
LOG.debug("Available {} and {} slots.", available, availableSlots);
LOG.debug("Shortage {} and {} slots.", shortage, shortageSlots);
if (shortage.areAnyOverZero() || shortageSlots > 0) {
LOG.info("Need {} and {} slots more. Releasing some blacklisted nodes to cover it.", shortage, shortageSlots);
// release earliest blacklist - but release all supervisors on a given blacklisted host.
Map<String, Set<String>> hostToSupervisorIds = createHostToSupervisorMap(blacklistedNodeIds, cluster);
for (Set<String> supervisorIds : hostToSupervisorIds.values()) {
for (String supervisorId : supervisorIds) {
SupervisorDetails sd = availableSupervisors.get(supervisorId);
if (sd != null) {
NormalizedResourcesWithMemory sdAvailable = cluster.getAvailableResources(sd);
int sdAvailableSlots = cluster.getAvailablePorts(sd).size();
readyToRemove.add(supervisorId);
shortage.remove(sdAvailable, cluster.getResourceMetrics());
shortageSlots -= sdAvailableSlots;
LOG.info("Releasing {} with {} and {} slots leaving {} and {} slots to go", supervisorId, sdAvailable, sdAvailableSlots, shortage, shortageSlots);
}
}
// make sure we've handled all supervisors on the host before we break
if (!shortage.areAnyOverZero() && shortageSlots <= 0) {
// we have enough resources now...
break;
}
}
}
}
return readyToRemove;
}
Aggregations