use of org.apache.storm.scheduler.WorkerSlot in project storm by apache.
the class DefaultPool method addTopology.
@Override
public void addTopology(TopologyDetails td) {
String topId = td.getId();
LOG.debug("Adding in Topology {}", topId);
_tds.put(topId, td);
SchedulerAssignment assignment = _cluster.getAssignmentById(topId);
if (assignment != null) {
for (WorkerSlot ws : assignment.getSlots()) {
Node n = _nodeIdToNode.get(ws.getNodeId());
_nodes.add(n);
}
}
}
use of org.apache.storm.scheduler.WorkerSlot in project storm by apache.
the class IsolatedPool method addTopology.
@Override
public void addTopology(TopologyDetails td) {
String topId = td.getId();
LOG.debug("Adding in Topology {}", topId);
SchedulerAssignment assignment = _cluster.getAssignmentById(topId);
Set<Node> assignedNodes = new HashSet<>();
if (assignment != null) {
for (WorkerSlot ws : assignment.getSlots()) {
Node n = _nodeIdToNode.get(ws.getNodeId());
assignedNodes.add(n);
}
}
_usedNodes += assignedNodes.size();
_topologyIdToNodes.put(topId, assignedNodes);
_tds.put(topId, td);
if (td.getConf().get(Config.TOPOLOGY_ISOLATED_MACHINES) != null) {
_isolated.add(topId);
}
}
use of org.apache.storm.scheduler.WorkerSlot in project storm by apache.
the class DefaultEvictionStrategy method evictTopology.
private void evictTopology(TopologyDetails topologyEvict) {
Collection<WorkerSlot> workersToEvict = this.cluster.getUsedSlotsByTopologyId(topologyEvict.getId());
User submitter = this.userMap.get(topologyEvict.getTopologySubmitter());
LOG.info("Evicting Topology {} with workers: {} from user {}", topologyEvict.getName(), workersToEvict, topologyEvict.getTopologySubmitter());
this.nodes.freeSlots(workersToEvict);
submitter.moveTopoFromRunningToPending(topologyEvict, this.cluster);
}
use of org.apache.storm.scheduler.WorkerSlot in project storm by apache.
the class DefaultResourceAwareStrategy method sortRacks.
/**
* Sort racks
*
* @param topoId topology id
* @param scheduleAssignmentMap calculated assignments so far
* @return a sorted list of racks
* Racks are sorted by two criteria. 1) the number executors of the topology that needs to be scheduled is already on the rack in descending order.
* The reasoning to sort based on criterion 1 is so we schedule the rest of a topology on the same rack as the existing executors of the topology.
* 2) the subordinate/subservient resource availability percentage of a rack in descending order
* We calculate the resource availability percentage by dividing the resource availability on the rack by the resource availability of the entire cluster
* By doing this calculation, racks that have exhausted or little of one of the resources mentioned above will be ranked after racks that have more balanced resource availability.
* So we will be less likely to pick a rack that have a lot of one resource but a low amount of another.
*/
TreeSet<ObjectResources> sortRacks(final String topoId, final Map<WorkerSlot, Collection<ExecutorDetails>> scheduleAssignmentMap) {
AllResources allResources = new AllResources("Cluster");
List<ObjectResources> racks = allResources.objectResources;
final Map<String, String> nodeIdToRackId = new HashMap<String, String>();
for (Map.Entry<String, List<String>> entry : _clusterInfo.entrySet()) {
String rackId = entry.getKey();
List<String> nodeIds = entry.getValue();
ObjectResources rack = new ObjectResources(rackId);
racks.add(rack);
for (String nodeId : nodeIds) {
RAS_Node node = _nodes.getNodeById(this.NodeHostnameToId(nodeId));
double availMem = node.getAvailableMemoryResources();
double availCpu = node.getAvailableCpuResources();
double totalMem = node.getTotalMemoryResources();
double totalCpu = node.getTotalCpuResources();
rack.availMem += availMem;
rack.totalMem += totalMem;
rack.availCpu += availCpu;
rack.totalCpu += totalCpu;
nodeIdToRackId.put(nodeId, rack.id);
allResources.availMemResourcesOverall += availMem;
allResources.availCpuResourcesOverall += availCpu;
allResources.totalMemResourcesOverall += totalMem;
allResources.totalCpuResourcesOverall += totalCpu;
}
}
LOG.debug("Cluster Overall Avail [ CPU {} MEM {} ] Total [ CPU {} MEM {} ]", allResources.availCpuResourcesOverall, allResources.availMemResourcesOverall, allResources.totalCpuResourcesOverall, allResources.totalMemResourcesOverall);
return sortObjectResources(allResources, new ExistingScheduleFunc() {
@Override
public int getNumExistingSchedule(String objectId) {
String rackId = objectId;
//Get execs already assigned in rack
Collection<ExecutorDetails> execs = new LinkedList<ExecutorDetails>();
if (_cluster.getAssignmentById(topoId) != null) {
for (Map.Entry<ExecutorDetails, WorkerSlot> entry : _cluster.getAssignmentById(topoId).getExecutorToSlot().entrySet()) {
String nodeId = entry.getValue().getNodeId();
String hostname = idToNode(nodeId).getHostname();
ExecutorDetails exec = entry.getKey();
if (nodeIdToRackId.get(hostname) != null && nodeIdToRackId.get(hostname).equals(rackId)) {
execs.add(exec);
}
}
}
// get execs already scheduled in the current scheduling
for (Map.Entry<WorkerSlot, Collection<ExecutorDetails>> entry : scheduleAssignmentMap.entrySet()) {
WorkerSlot workerSlot = entry.getKey();
String nodeId = workerSlot.getNodeId();
String hostname = idToNode(nodeId).getHostname();
if (nodeIdToRackId.get(hostname).equals(rackId)) {
execs.addAll(entry.getValue());
}
}
return execs.size();
}
});
}
use of org.apache.storm.scheduler.WorkerSlot in project storm by apache.
the class Node method freeTopology.
/**
* Frees all the slots for a topology.
* @param topId the topology to free slots for
* @param cluster the cluster to update
*/
public void freeTopology(String topId, Cluster cluster) {
Set<WorkerSlot> slots = _topIdToUsedSlots.get(topId);
if (slots == null || slots.isEmpty())
return;
for (WorkerSlot ws : slots) {
cluster.freeSlot(ws);
if (_isAlive) {
_freeSlots.add(ws);
}
}
_topIdToUsedSlots.remove(topId);
}
Aggregations