use of org.apache.storm.scheduler.ExecutorDetails in project storm by apache.
the class IsolationScheduler method computeWorkerSpecs.
private Set<Set<ExecutorDetails>> computeWorkerSpecs(TopologyDetails topology) {
Map<String, List<ExecutorDetails>> compExecutors = Utils.reverseMap(topology.getExecutorToComponent());
List<ExecutorDetails> allExecutors = new ArrayList<ExecutorDetails>();
Collection<List<ExecutorDetails>> values = compExecutors.values();
for (List<ExecutorDetails> eList : values) {
allExecutors.addAll(eList);
}
int numWorkers = topology.getNumWorkers();
int bucketIndex = 0;
Map<Integer, Set<ExecutorDetails>> bucketExecutors = new HashMap<Integer, Set<ExecutorDetails>>(numWorkers);
for (ExecutorDetails executor : allExecutors) {
Set<ExecutorDetails> executors = bucketExecutors.get(bucketIndex);
if (executors == null) {
executors = new HashSet<ExecutorDetails>();
bucketExecutors.put(bucketIndex, executors);
}
executors.add(executor);
bucketIndex = (bucketIndex + 1) % numWorkers;
}
return new HashSet<Set<ExecutorDetails>>(bucketExecutors.values());
}
use of org.apache.storm.scheduler.ExecutorDetails in project storm by apache.
the class DefaultResourceAwareStrategy method getWorkerScheduledMemoryUse.
/**
* Get the amount of memory already assigned to a worker
*
* @param ws the worker to get the amount of memory assigned to a worker
* @param td the topology that has executors running on the worker
* @param scheduleAssignmentMap the schedulings calculated so far
* @return the amount of memory
*/
private Double getWorkerScheduledMemoryUse(WorkerSlot ws, TopologyDetails td, Map<WorkerSlot, Collection<ExecutorDetails>> scheduleAssignmentMap) {
Double totalMem = 0.0;
Collection<ExecutorDetails> execs = scheduleAssignmentMap.get(ws);
if (execs != null) {
for (ExecutorDetails exec : execs) {
totalMem += td.getTotalMemReqTask(exec);
}
}
return totalMem;
}
use of org.apache.storm.scheduler.ExecutorDetails in project storm by apache.
the class DefaultResourceAwareStrategy method sortRacks.
/**
* Sort racks
*
* @param topoId topology id
* @param scheduleAssignmentMap calculated assignments so far
* @return a sorted list of racks
* Racks are sorted by two criteria. 1) the number executors of the topology that needs to be scheduled is already on the rack in descending order.
* The reasoning to sort based on criterion 1 is so we schedule the rest of a topology on the same rack as the existing executors of the topology.
* 2) the subordinate/subservient resource availability percentage of a rack in descending order
* We calculate the resource availability percentage by dividing the resource availability on the rack by the resource availability of the entire cluster
* By doing this calculation, racks that have exhausted or little of one of the resources mentioned above will be ranked after racks that have more balanced resource availability.
* So we will be less likely to pick a rack that have a lot of one resource but a low amount of another.
*/
TreeSet<ObjectResources> sortRacks(final String topoId, final Map<WorkerSlot, Collection<ExecutorDetails>> scheduleAssignmentMap) {
AllResources allResources = new AllResources("Cluster");
List<ObjectResources> racks = allResources.objectResources;
final Map<String, String> nodeIdToRackId = new HashMap<String, String>();
for (Map.Entry<String, List<String>> entry : _clusterInfo.entrySet()) {
String rackId = entry.getKey();
List<String> nodeIds = entry.getValue();
ObjectResources rack = new ObjectResources(rackId);
racks.add(rack);
for (String nodeId : nodeIds) {
RAS_Node node = _nodes.getNodeById(this.NodeHostnameToId(nodeId));
double availMem = node.getAvailableMemoryResources();
double availCpu = node.getAvailableCpuResources();
double totalMem = node.getTotalMemoryResources();
double totalCpu = node.getTotalCpuResources();
rack.availMem += availMem;
rack.totalMem += totalMem;
rack.availCpu += availCpu;
rack.totalCpu += totalCpu;
nodeIdToRackId.put(nodeId, rack.id);
allResources.availMemResourcesOverall += availMem;
allResources.availCpuResourcesOverall += availCpu;
allResources.totalMemResourcesOverall += totalMem;
allResources.totalCpuResourcesOverall += totalCpu;
}
}
LOG.debug("Cluster Overall Avail [ CPU {} MEM {} ] Total [ CPU {} MEM {} ]", allResources.availCpuResourcesOverall, allResources.availMemResourcesOverall, allResources.totalCpuResourcesOverall, allResources.totalMemResourcesOverall);
return sortObjectResources(allResources, new ExistingScheduleFunc() {
@Override
public int getNumExistingSchedule(String objectId) {
String rackId = objectId;
//Get execs already assigned in rack
Collection<ExecutorDetails> execs = new LinkedList<ExecutorDetails>();
if (_cluster.getAssignmentById(topoId) != null) {
for (Map.Entry<ExecutorDetails, WorkerSlot> entry : _cluster.getAssignmentById(topoId).getExecutorToSlot().entrySet()) {
String nodeId = entry.getValue().getNodeId();
String hostname = idToNode(nodeId).getHostname();
ExecutorDetails exec = entry.getKey();
if (nodeIdToRackId.get(hostname) != null && nodeIdToRackId.get(hostname).equals(rackId)) {
execs.add(exec);
}
}
}
// get execs already scheduled in the current scheduling
for (Map.Entry<WorkerSlot, Collection<ExecutorDetails>> entry : scheduleAssignmentMap.entrySet()) {
WorkerSlot workerSlot = entry.getKey();
String nodeId = workerSlot.getNodeId();
String hostname = idToNode(nodeId).getHostname();
if (nodeIdToRackId.get(hostname).equals(rackId)) {
execs.addAll(entry.getValue());
}
}
return execs.size();
}
});
}
use of org.apache.storm.scheduler.ExecutorDetails in project storm by apache.
the class RAS_Node method intializeResources.
/**
* initializes resource usages on node
*/
private void intializeResources() {
for (Entry<String, Map<String, Collection<ExecutorDetails>>> entry : _topIdToUsedSlots.entrySet()) {
String topoId = entry.getKey();
Map<String, Collection<ExecutorDetails>> assignment = entry.getValue();
Map<ExecutorDetails, Double> topoMemoryResourceList = _topologies.getById(topoId).getTotalMemoryResourceList();
for (Collection<ExecutorDetails> execs : assignment.values()) {
for (ExecutorDetails exec : execs) {
if (!_isAlive) {
continue;
// We do not free the assigned slots (the orphaned slots) on the inactive supervisors
// The inactive node will be treated as a 0-resource node and not available for other unassigned workers
}
if (topoMemoryResourceList.containsKey(exec)) {
consumeResourcesforTask(exec, _topologies.getById(topoId));
} else {
throw new IllegalStateException("Executor " + exec + "not found!");
}
}
}
}
}
use of org.apache.storm.scheduler.ExecutorDetails in project storm by apache.
the class RAS_Node method getExecutors.
public static Collection<ExecutorDetails> getExecutors(WorkerSlot ws, Cluster cluster) {
Collection<ExecutorDetails> retList = new ArrayList<ExecutorDetails>();
for (Entry<String, SchedulerAssignment> entry : cluster.getAssignments().entrySet()) {
Map<ExecutorDetails, WorkerSlot> executorToSlot = entry.getValue().getExecutorToSlot();
for (Map.Entry<ExecutorDetails, WorkerSlot> execToSlot : executorToSlot.entrySet()) {
WorkerSlot slot = execToSlot.getValue();
if (ws.getPort() == slot.getPort() && ws.getNodeId().equals(slot.getNodeId())) {
ExecutorDetails exec = execToSlot.getKey();
retList.add(exec);
}
}
}
return retList;
}
Aggregations