Search in sources :

Example 16 with NormalizedResourceRequest

use of org.apache.storm.scheduler.resource.normalization.NormalizedResourceRequest in project storm by apache.

the class TopologyDetails method getApproximateResources.

/**
 * Get approximate resources for given topology executors. ignores shared memory.
 *
 * @param execs the executors the inquiry is concerning.
 * @return the approximate resources for the executors.
 */
public NormalizedResourceRequest getApproximateResources(Set<ExecutorDetails> execs) {
    NormalizedResourceRequest ret = new NormalizedResourceRequest();
    execs.stream().filter(x -> hasExecInTopo(x)).forEach(x -> ret.add(resourceList.get(x)));
    return ret;
}
Also used : NormalizedResourceRequest(org.apache.storm.scheduler.resource.normalization.NormalizedResourceRequest) Logger(org.slf4j.Logger) Collection(java.util.Collection) LoggerFactory(org.slf4j.LoggerFactory) Set(java.util.Set) HashMap(java.util.HashMap) Utils(org.apache.storm.utils.Utils) ComponentCommon(org.apache.storm.generated.ComponentCommon) GlobalStreamId(org.apache.storm.generated.GlobalStreamId) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) Bolt(org.apache.storm.generated.Bolt) Time(org.apache.storm.utils.Time) List(java.util.List) ObjectReader(org.apache.storm.utils.ObjectReader) SharedMemory(org.apache.storm.generated.SharedMemory) StormTopology(org.apache.storm.generated.StormTopology) Map(java.util.Map) SpoutSpec(org.apache.storm.generated.SpoutSpec) Config(org.apache.storm.Config) ComponentType(org.apache.storm.generated.ComponentType) NormalizedResourceRequest(org.apache.storm.scheduler.resource.normalization.NormalizedResourceRequest)

Example 17 with NormalizedResourceRequest

use of org.apache.storm.scheduler.resource.normalization.NormalizedResourceRequest in project storm by apache.

the class ResourceUtils method getBoltsResources.

public static Map<String, NormalizedResourceRequest> getBoltsResources(StormTopology topology, Map<String, Object> topologyConf) {
    Map<String, NormalizedResourceRequest> boltResources = new HashMap<>();
    if (topology.get_bolts() != null) {
        for (Map.Entry<String, Bolt> bolt : topology.get_bolts().entrySet()) {
            NormalizedResourceRequest topologyResources = new NormalizedResourceRequest(bolt.getValue().get_common(), topologyConf, bolt.getKey());
            if (LOG.isTraceEnabled()) {
                LOG.trace("Turned {} into {}", bolt.getValue().get_common().get_json_conf(), topologyResources);
            }
            boltResources.put(bolt.getKey(), topologyResources);
        }
    }
    return boltResources;
}
Also used : NormalizedResourceRequest(org.apache.storm.scheduler.resource.normalization.NormalizedResourceRequest) HashMap(java.util.HashMap) Bolt(org.apache.storm.generated.Bolt) HashMap(java.util.HashMap) Map(java.util.Map)

Example 18 with NormalizedResourceRequest

use of org.apache.storm.scheduler.resource.normalization.NormalizedResourceRequest in project storm by apache.

the class Cluster method updateCachesForWorkerSlot.

/**
 * This method updates ScheduledResources and UsedSlots cache for given workerSlot.
 */
private void updateCachesForWorkerSlot(WorkerSlot workerSlot, WorkerResources workerResources, String topologyId, Double sharedOffHeapNodeMemory) {
    String nodeId = workerSlot.getNodeId();
    NormalizedResourceRequest normalizedResourceRequest = new NormalizedResourceRequest();
    normalizedResourceRequest.add(workerResources);
    nodeToScheduledResourcesCache.computeIfAbsent(nodeId, Cluster::makeMap).put(workerSlot, normalizedResourceRequest);
    nodeToScheduledOffHeapNodeMemoryCache.computeIfAbsent(nodeId, Cluster::makeMap).put(topologyId, sharedOffHeapNodeMemory);
    nodeToUsedSlotsCache.computeIfAbsent(nodeId, Cluster::makeSet).add(workerSlot);
}
Also used : NormalizedResourceRequest(org.apache.storm.scheduler.resource.normalization.NormalizedResourceRequest)

Example 19 with NormalizedResourceRequest

use of org.apache.storm.scheduler.resource.normalization.NormalizedResourceRequest in project storm by apache.

the class Cluster method wouldFit.

@Override
public boolean wouldFit(WorkerSlot ws, ExecutorDetails exec, TopologyDetails td, NormalizedResourceOffer resourcesAvailable, double maxHeap) {
    NormalizedResourceRequest requestedResources = td.getTotalResources(exec);
    if (!resourcesAvailable.couldFit(minWorkerCpu, requestedResources)) {
        return false;
    }
    double currentTotal = 0.0;
    double currentCpuTotal = 0.0;
    Set<ExecutorDetails> wouldBeAssigned = new HashSet<>();
    wouldBeAssigned.add(exec);
    SchedulerAssignmentImpl assignment = assignments.get(td.getId());
    if (assignment != null) {
        Collection<ExecutorDetails> currentlyAssigned = assignment.getSlotToExecutors().get(ws);
        if (currentlyAssigned != null) {
            wouldBeAssigned.addAll(currentlyAssigned);
            WorkerResources wrCurrent = calculateWorkerResources(td, currentlyAssigned);
            currentTotal = wrCurrent.get_mem_off_heap() + wrCurrent.get_mem_on_heap();
            currentCpuTotal = wrCurrent.get_cpu();
        }
        currentTotal += calculateSharedOffHeapNodeMemory(ws.getNodeId(), td);
    }
    WorkerResources wrAfter = calculateWorkerResources(td, wouldBeAssigned);
    double afterTotal = wrAfter.get_mem_off_heap() + wrAfter.get_mem_on_heap();
    afterTotal += calculateSharedOffHeapNodeMemory(ws.getNodeId(), td, exec);
    double afterOnHeap = wrAfter.get_mem_on_heap();
    double afterCpuTotal = wrAfter.get_cpu();
    double cpuAdded = afterCpuTotal - currentCpuTotal;
    double cpuAvailable = resourcesAvailable.getTotalCpu();
    if (cpuAdded > cpuAvailable) {
        if (LOG.isTraceEnabled()) {
            LOG.trace("Could not schedule {}:{} on {} not enough CPU {} > {}", td.getName(), exec, ws, cpuAdded, cpuAvailable);
        }
        return false;
    }
    double memoryAdded = afterTotal - currentTotal;
    double memoryAvailable = resourcesAvailable.getTotalMemoryMb();
    if (memoryAdded > memoryAvailable) {
        if (LOG.isTraceEnabled()) {
            LOG.trace("Could not schedule {}:{} on {} not enough Mem {} > {}", td.getName(), exec, ws, memoryAdded, memoryAvailable);
        }
        return false;
    }
    if (afterOnHeap > maxHeap) {
        if (LOG.isTraceEnabled()) {
            LOG.trace("Could not schedule {}:{} on {} HEAP would be too large {} > {}", td.getName(), exec, ws, afterOnHeap, maxHeap);
        }
        return false;
    }
    return true;
}
Also used : NormalizedResourceRequest(org.apache.storm.scheduler.resource.normalization.NormalizedResourceRequest) WorkerResources(org.apache.storm.generated.WorkerResources) HashSet(java.util.HashSet)

Example 20 with NormalizedResourceRequest

use of org.apache.storm.scheduler.resource.normalization.NormalizedResourceRequest in project storm by apache.

the class Cluster method calculateWorkerResources.

private WorkerResources calculateWorkerResources(TopologyDetails td, Collection<ExecutorDetails> executors) {
    NormalizedResourceRequest totalResources = new NormalizedResourceRequest();
    Map<String, Double> sharedTotalResources = new HashMap<>();
    for (ExecutorDetails exec : executors) {
        NormalizedResourceRequest allResources = td.getTotalResources(exec);
        if (allResources == null) {
            continue;
        }
        totalResources.add(allResources);
    }
    for (SharedMemory shared : td.getSharedMemoryRequests(executors)) {
        totalResources.addOffHeap(shared.get_off_heap_worker());
        totalResources.addOnHeap(shared.get_on_heap());
        addResource(sharedTotalResources, Constants.COMMON_OFFHEAP_MEMORY_RESOURCE_NAME, shared.get_off_heap_worker());
        addResource(sharedTotalResources, Constants.COMMON_ONHEAP_MEMORY_RESOURCE_NAME, shared.get_on_heap());
    }
    sharedTotalResources = NormalizedResources.RESOURCE_NAME_NORMALIZER.normalizedResourceMap(sharedTotalResources);
    Map<String, Double> totalResourcesMap = totalResources.toNormalizedMap();
    Double cpu = totalResources.getTotalCpu();
    if (cpu < minWorkerCpu) {
        cpu = minWorkerCpu;
        totalResourcesMap.put(Constants.COMMON_CPU_RESOURCE_NAME, cpu);
    }
    WorkerResources ret = new WorkerResources();
    ret.set_resources(totalResourcesMap);
    ret.set_shared_resources(sharedTotalResources);
    ret.set_cpu(cpu);
    ret.set_mem_off_heap(totalResources.getOffHeapMemoryMb());
    ret.set_mem_on_heap(totalResources.getOnHeapMemoryMb());
    ret.set_shared_mem_off_heap(sharedTotalResources.getOrDefault(Constants.COMMON_OFFHEAP_MEMORY_RESOURCE_NAME, 0.0));
    ret.set_shared_mem_on_heap(sharedTotalResources.getOrDefault(Constants.COMMON_ONHEAP_MEMORY_RESOURCE_NAME, 0.0));
    return ret;
}
Also used : NormalizedResourceRequest(org.apache.storm.scheduler.resource.normalization.NormalizedResourceRequest) HashMap(java.util.HashMap) WorkerResources(org.apache.storm.generated.WorkerResources) SharedMemory(org.apache.storm.generated.SharedMemory)

Aggregations

NormalizedResourceRequest (org.apache.storm.scheduler.resource.normalization.NormalizedResourceRequest)21 HashMap (java.util.HashMap)12 Map (java.util.Map)10 HashSet (java.util.HashSet)9 ArrayList (java.util.ArrayList)8 List (java.util.List)8 Set (java.util.Set)8 Collection (java.util.Collection)7 Config (org.apache.storm.Config)7 SchedulerAssignment (org.apache.storm.scheduler.SchedulerAssignment)7 TopologyDetails (org.apache.storm.scheduler.TopologyDetails)7 Logger (org.slf4j.Logger)7 LoggerFactory (org.slf4j.LoggerFactory)7 Collections (java.util.Collections)6 Iterator (java.util.Iterator)6 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)6 Collectors (java.util.stream.Collectors)6 DNSToSwitchMapping (org.apache.storm.networktopography.DNSToSwitchMapping)6 Cluster (org.apache.storm.scheduler.Cluster)6 ExecutorDetails (org.apache.storm.scheduler.ExecutorDetails)6