Search in sources :

Example 6 with ExecutorDetails

use of org.apache.storm.scheduler.ExecutorDetails in project storm by apache.

the class IsolationScheduler method computeWorkerSpecs.

private Set<Set<ExecutorDetails>> computeWorkerSpecs(TopologyDetails topology) {
    Map<String, List<ExecutorDetails>> compExecutors = Utils.reverseMap(topology.getExecutorToComponent());
    List<ExecutorDetails> allExecutors = new ArrayList<ExecutorDetails>();
    Collection<List<ExecutorDetails>> values = compExecutors.values();
    for (List<ExecutorDetails> eList : values) {
        allExecutors.addAll(eList);
    }
    int numWorkers = topology.getNumWorkers();
    int bucketIndex = 0;
    Map<Integer, Set<ExecutorDetails>> bucketExecutors = new HashMap<Integer, Set<ExecutorDetails>>(numWorkers);
    for (ExecutorDetails executor : allExecutors) {
        Set<ExecutorDetails> executors = bucketExecutors.get(bucketIndex);
        if (executors == null) {
            executors = new HashSet<ExecutorDetails>();
            bucketExecutors.put(bucketIndex, executors);
        }
        executors.add(executor);
        bucketIndex = (bucketIndex + 1) % numWorkers;
    }
    return new HashSet<Set<ExecutorDetails>>(bucketExecutors.values());
}
Also used : ExecutorDetails(org.apache.storm.scheduler.ExecutorDetails) Set(java.util.Set) HashSet(java.util.HashSet) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ArrayList(java.util.ArrayList) List(java.util.List) LinkedList(java.util.LinkedList) HashSet(java.util.HashSet)

Example 7 with ExecutorDetails

use of org.apache.storm.scheduler.ExecutorDetails in project storm by apache.

the class DefaultResourceAwareStrategy method getWorkerScheduledMemoryUse.

/**
     * Get the amount of memory already assigned to a worker
     *
     * @param ws                    the worker to get the amount of memory assigned to a worker
     * @param td                    the topology that has executors running on the worker
     * @param scheduleAssignmentMap the schedulings calculated so far
     * @return the amount of memory
     */
private Double getWorkerScheduledMemoryUse(WorkerSlot ws, TopologyDetails td, Map<WorkerSlot, Collection<ExecutorDetails>> scheduleAssignmentMap) {
    Double totalMem = 0.0;
    Collection<ExecutorDetails> execs = scheduleAssignmentMap.get(ws);
    if (execs != null) {
        for (ExecutorDetails exec : execs) {
            totalMem += td.getTotalMemReqTask(exec);
        }
    }
    return totalMem;
}
Also used : ExecutorDetails(org.apache.storm.scheduler.ExecutorDetails)

Example 8 with ExecutorDetails

use of org.apache.storm.scheduler.ExecutorDetails in project storm by apache.

the class DefaultResourceAwareStrategy method sortRacks.

/**
     * Sort racks
     *
     * @param topoId                topology id
     * @param scheduleAssignmentMap calculated assignments so far
     * @return a sorted list of racks
     * Racks are sorted by two criteria. 1) the number executors of the topology that needs to be scheduled is already on the rack in descending order.
     * The reasoning to sort based on  criterion 1 is so we schedule the rest of a topology on the same rack as the existing executors of the topology.
     * 2) the subordinate/subservient resource availability percentage of a rack in descending order
     * We calculate the resource availability percentage by dividing the resource availability on the rack by the resource availability of the entire cluster
     * By doing this calculation, racks that have exhausted or little of one of the resources mentioned above will be ranked after racks that have more balanced resource availability.
     * So we will be less likely to pick a rack that have a lot of one resource but a low amount of another.
     */
TreeSet<ObjectResources> sortRacks(final String topoId, final Map<WorkerSlot, Collection<ExecutorDetails>> scheduleAssignmentMap) {
    AllResources allResources = new AllResources("Cluster");
    List<ObjectResources> racks = allResources.objectResources;
    final Map<String, String> nodeIdToRackId = new HashMap<String, String>();
    for (Map.Entry<String, List<String>> entry : _clusterInfo.entrySet()) {
        String rackId = entry.getKey();
        List<String> nodeIds = entry.getValue();
        ObjectResources rack = new ObjectResources(rackId);
        racks.add(rack);
        for (String nodeId : nodeIds) {
            RAS_Node node = _nodes.getNodeById(this.NodeHostnameToId(nodeId));
            double availMem = node.getAvailableMemoryResources();
            double availCpu = node.getAvailableCpuResources();
            double totalMem = node.getTotalMemoryResources();
            double totalCpu = node.getTotalCpuResources();
            rack.availMem += availMem;
            rack.totalMem += totalMem;
            rack.availCpu += availCpu;
            rack.totalCpu += totalCpu;
            nodeIdToRackId.put(nodeId, rack.id);
            allResources.availMemResourcesOverall += availMem;
            allResources.availCpuResourcesOverall += availCpu;
            allResources.totalMemResourcesOverall += totalMem;
            allResources.totalCpuResourcesOverall += totalCpu;
        }
    }
    LOG.debug("Cluster Overall Avail [ CPU {} MEM {} ] Total [ CPU {} MEM {} ]", allResources.availCpuResourcesOverall, allResources.availMemResourcesOverall, allResources.totalCpuResourcesOverall, allResources.totalMemResourcesOverall);
    return sortObjectResources(allResources, new ExistingScheduleFunc() {

        @Override
        public int getNumExistingSchedule(String objectId) {
            String rackId = objectId;
            //Get execs already assigned in rack
            Collection<ExecutorDetails> execs = new LinkedList<ExecutorDetails>();
            if (_cluster.getAssignmentById(topoId) != null) {
                for (Map.Entry<ExecutorDetails, WorkerSlot> entry : _cluster.getAssignmentById(topoId).getExecutorToSlot().entrySet()) {
                    String nodeId = entry.getValue().getNodeId();
                    String hostname = idToNode(nodeId).getHostname();
                    ExecutorDetails exec = entry.getKey();
                    if (nodeIdToRackId.get(hostname) != null && nodeIdToRackId.get(hostname).equals(rackId)) {
                        execs.add(exec);
                    }
                }
            }
            // get execs already scheduled in the current scheduling
            for (Map.Entry<WorkerSlot, Collection<ExecutorDetails>> entry : scheduleAssignmentMap.entrySet()) {
                WorkerSlot workerSlot = entry.getKey();
                String nodeId = workerSlot.getNodeId();
                String hostname = idToNode(nodeId).getHostname();
                if (nodeIdToRackId.get(hostname).equals(rackId)) {
                    execs.addAll(entry.getValue());
                }
            }
            return execs.size();
        }
    });
}
Also used : ExecutorDetails(org.apache.storm.scheduler.ExecutorDetails) HashMap(java.util.HashMap) RAS_Node(org.apache.storm.scheduler.resource.RAS_Node) WorkerSlot(org.apache.storm.scheduler.WorkerSlot) Collection(java.util.Collection) ArrayList(java.util.ArrayList) LinkedList(java.util.LinkedList) List(java.util.List) HashMap(java.util.HashMap) Map(java.util.Map)

Example 9 with ExecutorDetails

use of org.apache.storm.scheduler.ExecutorDetails in project storm by apache.

the class RAS_Node method intializeResources.

/**
     * initializes resource usages on node
     */
private void intializeResources() {
    for (Entry<String, Map<String, Collection<ExecutorDetails>>> entry : _topIdToUsedSlots.entrySet()) {
        String topoId = entry.getKey();
        Map<String, Collection<ExecutorDetails>> assignment = entry.getValue();
        Map<ExecutorDetails, Double> topoMemoryResourceList = _topologies.getById(topoId).getTotalMemoryResourceList();
        for (Collection<ExecutorDetails> execs : assignment.values()) {
            for (ExecutorDetails exec : execs) {
                if (!_isAlive) {
                    continue;
                // We do not free the assigned slots (the orphaned slots) on the inactive supervisors
                // The inactive node will be treated as a 0-resource node and not available for other unassigned workers
                }
                if (topoMemoryResourceList.containsKey(exec)) {
                    consumeResourcesforTask(exec, _topologies.getById(topoId));
                } else {
                    throw new IllegalStateException("Executor " + exec + "not found!");
                }
            }
        }
    }
}
Also used : ExecutorDetails(org.apache.storm.scheduler.ExecutorDetails) Collection(java.util.Collection) HashMap(java.util.HashMap) Map(java.util.Map)

Example 10 with ExecutorDetails

use of org.apache.storm.scheduler.ExecutorDetails in project storm by apache.

the class RAS_Node method getExecutors.

public static Collection<ExecutorDetails> getExecutors(WorkerSlot ws, Cluster cluster) {
    Collection<ExecutorDetails> retList = new ArrayList<ExecutorDetails>();
    for (Entry<String, SchedulerAssignment> entry : cluster.getAssignments().entrySet()) {
        Map<ExecutorDetails, WorkerSlot> executorToSlot = entry.getValue().getExecutorToSlot();
        for (Map.Entry<ExecutorDetails, WorkerSlot> execToSlot : executorToSlot.entrySet()) {
            WorkerSlot slot = execToSlot.getValue();
            if (ws.getPort() == slot.getPort() && ws.getNodeId().equals(slot.getNodeId())) {
                ExecutorDetails exec = execToSlot.getKey();
                retList.add(exec);
            }
        }
    }
    return retList;
}
Also used : ExecutorDetails(org.apache.storm.scheduler.ExecutorDetails) SchedulerAssignment(org.apache.storm.scheduler.SchedulerAssignment) WorkerSlot(org.apache.storm.scheduler.WorkerSlot) ArrayList(java.util.ArrayList) HashMap(java.util.HashMap) Map(java.util.Map)

Aggregations

ExecutorDetails (org.apache.storm.scheduler.ExecutorDetails)72 HashMap (java.util.HashMap)50 TopologyDetails (org.apache.storm.scheduler.TopologyDetails)42 WorkerSlot (org.apache.storm.scheduler.WorkerSlot)41 SchedulerAssignment (org.apache.storm.scheduler.SchedulerAssignment)36 ArrayList (java.util.ArrayList)35 Map (java.util.Map)34 Cluster (org.apache.storm.scheduler.Cluster)31 Config (org.apache.storm.Config)29 HashSet (java.util.HashSet)28 List (java.util.List)28 SupervisorDetails (org.apache.storm.scheduler.SupervisorDetails)28 Topologies (org.apache.storm.scheduler.Topologies)23 LinkedList (java.util.LinkedList)21 INimbus (org.apache.storm.scheduler.INimbus)21 Collection (java.util.Collection)20 StormMetricsRegistry (org.apache.storm.metric.StormMetricsRegistry)19 StormTopology (org.apache.storm.generated.StormTopology)18 TestUtilsForResourceAwareScheduler (org.apache.storm.scheduler.resource.TestUtilsForResourceAwareScheduler)18 ResourceMetrics (org.apache.storm.scheduler.resource.normalization.ResourceMetrics)18