Search in sources :

Example 11 with WorkerSlot

use of org.apache.storm.scheduler.WorkerSlot in project storm by apache.

the class DefaultPool method addTopology.

@Override
public void addTopology(TopologyDetails td) {
    String topId = td.getId();
    LOG.debug("Adding in Topology {}", topId);
    _tds.put(topId, td);
    SchedulerAssignment assignment = _cluster.getAssignmentById(topId);
    if (assignment != null) {
        for (WorkerSlot ws : assignment.getSlots()) {
            Node n = _nodeIdToNode.get(ws.getNodeId());
            _nodes.add(n);
        }
    }
}
Also used : SchedulerAssignment(org.apache.storm.scheduler.SchedulerAssignment) WorkerSlot(org.apache.storm.scheduler.WorkerSlot)

Example 12 with WorkerSlot

use of org.apache.storm.scheduler.WorkerSlot in project storm by apache.

the class IsolatedPool method addTopology.

@Override
public void addTopology(TopologyDetails td) {
    String topId = td.getId();
    LOG.debug("Adding in Topology {}", topId);
    SchedulerAssignment assignment = _cluster.getAssignmentById(topId);
    Set<Node> assignedNodes = new HashSet<>();
    if (assignment != null) {
        for (WorkerSlot ws : assignment.getSlots()) {
            Node n = _nodeIdToNode.get(ws.getNodeId());
            assignedNodes.add(n);
        }
    }
    _usedNodes += assignedNodes.size();
    _topologyIdToNodes.put(topId, assignedNodes);
    _tds.put(topId, td);
    if (td.getConf().get(Config.TOPOLOGY_ISOLATED_MACHINES) != null) {
        _isolated.add(topId);
    }
}
Also used : SchedulerAssignment(org.apache.storm.scheduler.SchedulerAssignment) WorkerSlot(org.apache.storm.scheduler.WorkerSlot) HashSet(java.util.HashSet)

Example 13 with WorkerSlot

use of org.apache.storm.scheduler.WorkerSlot in project storm by apache.

the class DefaultEvictionStrategy method evictTopology.

private void evictTopology(TopologyDetails topologyEvict) {
    Collection<WorkerSlot> workersToEvict = this.cluster.getUsedSlotsByTopologyId(topologyEvict.getId());
    User submitter = this.userMap.get(topologyEvict.getTopologySubmitter());
    LOG.info("Evicting Topology {} with workers: {} from user {}", topologyEvict.getName(), workersToEvict, topologyEvict.getTopologySubmitter());
    this.nodes.freeSlots(workersToEvict);
    submitter.moveTopoFromRunningToPending(topologyEvict, this.cluster);
}
Also used : User(org.apache.storm.scheduler.resource.User) WorkerSlot(org.apache.storm.scheduler.WorkerSlot)

Example 14 with WorkerSlot

use of org.apache.storm.scheduler.WorkerSlot in project storm by apache.

the class DefaultResourceAwareStrategy method sortRacks.

/**
     * Sort racks
     *
     * @param topoId                topology id
     * @param scheduleAssignmentMap calculated assignments so far
     * @return a sorted list of racks
     * Racks are sorted by two criteria. 1) the number executors of the topology that needs to be scheduled is already on the rack in descending order.
     * The reasoning to sort based on  criterion 1 is so we schedule the rest of a topology on the same rack as the existing executors of the topology.
     * 2) the subordinate/subservient resource availability percentage of a rack in descending order
     * We calculate the resource availability percentage by dividing the resource availability on the rack by the resource availability of the entire cluster
     * By doing this calculation, racks that have exhausted or little of one of the resources mentioned above will be ranked after racks that have more balanced resource availability.
     * So we will be less likely to pick a rack that have a lot of one resource but a low amount of another.
     */
TreeSet<ObjectResources> sortRacks(final String topoId, final Map<WorkerSlot, Collection<ExecutorDetails>> scheduleAssignmentMap) {
    AllResources allResources = new AllResources("Cluster");
    List<ObjectResources> racks = allResources.objectResources;
    final Map<String, String> nodeIdToRackId = new HashMap<String, String>();
    for (Map.Entry<String, List<String>> entry : _clusterInfo.entrySet()) {
        String rackId = entry.getKey();
        List<String> nodeIds = entry.getValue();
        ObjectResources rack = new ObjectResources(rackId);
        racks.add(rack);
        for (String nodeId : nodeIds) {
            RAS_Node node = _nodes.getNodeById(this.NodeHostnameToId(nodeId));
            double availMem = node.getAvailableMemoryResources();
            double availCpu = node.getAvailableCpuResources();
            double totalMem = node.getTotalMemoryResources();
            double totalCpu = node.getTotalCpuResources();
            rack.availMem += availMem;
            rack.totalMem += totalMem;
            rack.availCpu += availCpu;
            rack.totalCpu += totalCpu;
            nodeIdToRackId.put(nodeId, rack.id);
            allResources.availMemResourcesOverall += availMem;
            allResources.availCpuResourcesOverall += availCpu;
            allResources.totalMemResourcesOverall += totalMem;
            allResources.totalCpuResourcesOverall += totalCpu;
        }
    }
    LOG.debug("Cluster Overall Avail [ CPU {} MEM {} ] Total [ CPU {} MEM {} ]", allResources.availCpuResourcesOverall, allResources.availMemResourcesOverall, allResources.totalCpuResourcesOverall, allResources.totalMemResourcesOverall);
    return sortObjectResources(allResources, new ExistingScheduleFunc() {

        @Override
        public int getNumExistingSchedule(String objectId) {
            String rackId = objectId;
            //Get execs already assigned in rack
            Collection<ExecutorDetails> execs = new LinkedList<ExecutorDetails>();
            if (_cluster.getAssignmentById(topoId) != null) {
                for (Map.Entry<ExecutorDetails, WorkerSlot> entry : _cluster.getAssignmentById(topoId).getExecutorToSlot().entrySet()) {
                    String nodeId = entry.getValue().getNodeId();
                    String hostname = idToNode(nodeId).getHostname();
                    ExecutorDetails exec = entry.getKey();
                    if (nodeIdToRackId.get(hostname) != null && nodeIdToRackId.get(hostname).equals(rackId)) {
                        execs.add(exec);
                    }
                }
            }
            // get execs already scheduled in the current scheduling
            for (Map.Entry<WorkerSlot, Collection<ExecutorDetails>> entry : scheduleAssignmentMap.entrySet()) {
                WorkerSlot workerSlot = entry.getKey();
                String nodeId = workerSlot.getNodeId();
                String hostname = idToNode(nodeId).getHostname();
                if (nodeIdToRackId.get(hostname).equals(rackId)) {
                    execs.addAll(entry.getValue());
                }
            }
            return execs.size();
        }
    });
}
Also used : ExecutorDetails(org.apache.storm.scheduler.ExecutorDetails) HashMap(java.util.HashMap) RAS_Node(org.apache.storm.scheduler.resource.RAS_Node) WorkerSlot(org.apache.storm.scheduler.WorkerSlot) Collection(java.util.Collection) ArrayList(java.util.ArrayList) LinkedList(java.util.LinkedList) List(java.util.List) HashMap(java.util.HashMap) Map(java.util.Map)

Example 15 with WorkerSlot

use of org.apache.storm.scheduler.WorkerSlot in project storm by apache.

the class Node method freeTopology.

/**
   * Frees all the slots for a topology.
   * @param topId the topology to free slots for
   * @param cluster the cluster to update
   */
public void freeTopology(String topId, Cluster cluster) {
    Set<WorkerSlot> slots = _topIdToUsedSlots.get(topId);
    if (slots == null || slots.isEmpty())
        return;
    for (WorkerSlot ws : slots) {
        cluster.freeSlot(ws);
        if (_isAlive) {
            _freeSlots.add(ws);
        }
    }
    _topIdToUsedSlots.remove(topId);
}
Also used : WorkerSlot(org.apache.storm.scheduler.WorkerSlot)

Aggregations

WorkerSlot (org.apache.storm.scheduler.WorkerSlot)45 HashMap (java.util.HashMap)33 ExecutorDetails (org.apache.storm.scheduler.ExecutorDetails)23 SchedulerAssignment (org.apache.storm.scheduler.SchedulerAssignment)23 ArrayList (java.util.ArrayList)22 Map (java.util.Map)22 List (java.util.List)17 SupervisorDetails (org.apache.storm.scheduler.SupervisorDetails)16 TopologyDetails (org.apache.storm.scheduler.TopologyDetails)15 HashSet (java.util.HashSet)13 SchedulerAssignmentImpl (org.apache.storm.scheduler.SchedulerAssignmentImpl)11 Topologies (org.apache.storm.scheduler.Topologies)11 Cluster (org.apache.storm.scheduler.Cluster)10 LinkedList (java.util.LinkedList)9 Config (org.apache.storm.Config)9 INimbus (org.apache.storm.scheduler.INimbus)9 Test (org.junit.Test)9 Collection (java.util.Collection)8 WorkerResources (org.apache.storm.generated.WorkerResources)8 ImmutableMap (com.google.common.collect.ImmutableMap)6