Search in sources :

Example 51 with ExecutorDetails

use of org.apache.storm.scheduler.ExecutorDetails in project storm by apache.

the class TestResourceAwareScheduler method testHeterogeneousCluster.

@Test
public void testHeterogeneousCluster() {
    INimbus iNimbus = new TestUtilsForResourceAwareScheduler.INimbusTest();
    // strong supervisor node
    Map<String, Number> resourceMap1 = new HashMap<>();
    resourceMap1.put(Config.SUPERVISOR_CPU_CAPACITY, 800.0);
    resourceMap1.put(Config.SUPERVISOR_MEMORY_CAPACITY_MB, 4096.0);
    // weak supervisor node
    Map<String, Number> resourceMap2 = new HashMap<>();
    resourceMap2.put(Config.SUPERVISOR_CPU_CAPACITY, 200.0);
    resourceMap2.put(Config.SUPERVISOR_MEMORY_CAPACITY_MB, 1024.0);
    Map<String, SupervisorDetails> supMap = new HashMap<String, SupervisorDetails>();
    for (int i = 0; i < 2; i++) {
        List<Number> ports = new LinkedList<Number>();
        for (int j = 0; j < 4; j++) {
            ports.add(j);
        }
        SupervisorDetails sup = new SupervisorDetails("sup-" + i, "host-" + i, null, ports, (Map) (i == 0 ? resourceMap1 : resourceMap2));
        supMap.put(sup.getId(), sup);
    }
    // topo1 has one single huge task that can not be handled by the small-super
    TopologyBuilder builder1 = new TopologyBuilder();
    builder1.setSpout("wordSpout1", new TestWordSpout(), 1).setCPULoad(300.0).setMemoryLoad(2000.0, 48.0);
    StormTopology stormTopology1 = builder1.createTopology();
    Config config1 = new Config();
    config1.putAll(defaultTopologyConf);
    Map<ExecutorDetails, String> executorMap1 = TestUtilsForResourceAwareScheduler.genExecsAndComps(stormTopology1);
    TopologyDetails topology1 = new TopologyDetails("topology1", config1, stormTopology1, 1, executorMap1, 0);
    // topo2 has 4 large tasks
    TopologyBuilder builder2 = new TopologyBuilder();
    builder2.setSpout("wordSpout2", new TestWordSpout(), 4).setCPULoad(100.0).setMemoryLoad(500.0, 12.0);
    StormTopology stormTopology2 = builder2.createTopology();
    Config config2 = new Config();
    config2.putAll(defaultTopologyConf);
    Map<ExecutorDetails, String> executorMap2 = TestUtilsForResourceAwareScheduler.genExecsAndComps(stormTopology2);
    TopologyDetails topology2 = new TopologyDetails("topology2", config2, stormTopology2, 1, executorMap2, 0);
    // topo3 has 4 large tasks
    TopologyBuilder builder3 = new TopologyBuilder();
    builder3.setSpout("wordSpout3", new TestWordSpout(), 4).setCPULoad(20.0).setMemoryLoad(200.0, 56.0);
    StormTopology stormTopology3 = builder3.createTopology();
    Config config3 = new Config();
    config3.putAll(defaultTopologyConf);
    Map<ExecutorDetails, String> executorMap3 = TestUtilsForResourceAwareScheduler.genExecsAndComps(stormTopology3);
    TopologyDetails topology3 = new TopologyDetails("topology3", config2, stormTopology3, 1, executorMap3, 0);
    // topo4 has 12 small tasks, whose mem usage does not exactly divide a node's mem capacity
    TopologyBuilder builder4 = new TopologyBuilder();
    builder4.setSpout("wordSpout4", new TestWordSpout(), 12).setCPULoad(30.0).setMemoryLoad(100.0, 0.0);
    StormTopology stormTopology4 = builder4.createTopology();
    Config config4 = new Config();
    config4.putAll(defaultTopologyConf);
    Map<ExecutorDetails, String> executorMap4 = TestUtilsForResourceAwareScheduler.genExecsAndComps(stormTopology4);
    TopologyDetails topology4 = new TopologyDetails("topology4", config4, stormTopology4, 1, executorMap4, 0);
    // topo5 has 40 small tasks, it should be able to exactly use up both the cpu and mem in the cluster
    TopologyBuilder builder5 = new TopologyBuilder();
    builder5.setSpout("wordSpout5", new TestWordSpout(), 40).setCPULoad(25.0).setMemoryLoad(100.0, 28.0);
    StormTopology stormTopology5 = builder5.createTopology();
    Config config5 = new Config();
    config5.putAll(defaultTopologyConf);
    Map<ExecutorDetails, String> executorMap5 = TestUtilsForResourceAwareScheduler.genExecsAndComps(stormTopology5);
    TopologyDetails topology5 = new TopologyDetails("topology5", config5, stormTopology5, 1, executorMap5, 0);
    // Test1: Launch topo 1-3 together, it should be able to use up either mem or cpu resource due to exact division
    Cluster cluster = new Cluster(iNimbus, supMap, new HashMap<String, SchedulerAssignmentImpl>(), config1);
    ResourceAwareScheduler rs = new ResourceAwareScheduler();
    Map<String, TopologyDetails> topoMap = new HashMap<>();
    topoMap.put(topology1.getId(), topology1);
    topoMap.put(topology2.getId(), topology2);
    topoMap.put(topology3.getId(), topology3);
    Topologies topologies = new Topologies(topoMap);
    rs.prepare(config1);
    rs.schedule(topologies, cluster);
    Assert.assertEquals("Running - Fully Scheduled by DefaultResourceAwareStrategy", cluster.getStatusMap().get(topology1.getId()));
    Assert.assertEquals("Running - Fully Scheduled by DefaultResourceAwareStrategy", cluster.getStatusMap().get(topology2.getId()));
    Assert.assertEquals("Running - Fully Scheduled by DefaultResourceAwareStrategy", cluster.getStatusMap().get(topology3.getId()));
    Map<SupervisorDetails, Double> superToCpu = TestUtilsForResourceAwareScheduler.getSupervisorToCpuUsage(cluster, topologies);
    Map<SupervisorDetails, Double> superToMem = TestUtilsForResourceAwareScheduler.getSupervisorToMemoryUsage(cluster, topologies);
    final Double EPSILON = 0.0001;
    for (SupervisorDetails supervisor : supMap.values()) {
        Double cpuAvailable = supervisor.getTotalCPU();
        Double memAvailable = supervisor.getTotalMemory();
        Double cpuUsed = superToCpu.get(supervisor);
        Double memUsed = superToMem.get(supervisor);
        Assert.assertTrue((Math.abs(memAvailable - memUsed) < EPSILON) || (Math.abs(cpuAvailable - cpuUsed) < EPSILON));
    }
    // end of Test1
    // Test2: Launch topo 1, 2 and 4, they together request a little more mem than available, so one of the 3 topos will not be scheduled
    cluster = new Cluster(iNimbus, supMap, new HashMap<String, SchedulerAssignmentImpl>(), config1);
    topoMap = new HashMap<>();
    topoMap.put(topology1.getId(), topology1);
    topoMap.put(topology2.getId(), topology2);
    topoMap.put(topology4.getId(), topology4);
    topologies = new Topologies(topoMap);
    rs.prepare(config1);
    rs.schedule(topologies, cluster);
    int numTopologiesAssigned = 0;
    if (cluster.getStatusMap().get(topology1.getId()).equals("Running - Fully Scheduled by DefaultResourceAwareStrategy")) {
        numTopologiesAssigned++;
    }
    if (cluster.getStatusMap().get(topology2.getId()).equals("Running - Fully Scheduled by DefaultResourceAwareStrategy")) {
        numTopologiesAssigned++;
    }
    if (cluster.getStatusMap().get(topology4.getId()).equals("Running - Fully Scheduled by DefaultResourceAwareStrategy")) {
        numTopologiesAssigned++;
    }
    Assert.assertEquals(2, numTopologiesAssigned);
    //end of Test2
    //Test3: "Launch topo5 only, both mem and cpu should be exactly used up"
    cluster = new Cluster(iNimbus, supMap, new HashMap<String, SchedulerAssignmentImpl>(), config1);
    topoMap = new HashMap<>();
    topoMap.put(topology5.getId(), topology5);
    topologies = new Topologies(topoMap);
    rs.prepare(config1);
    rs.schedule(topologies, cluster);
    superToCpu = TestUtilsForResourceAwareScheduler.getSupervisorToCpuUsage(cluster, topologies);
    superToMem = TestUtilsForResourceAwareScheduler.getSupervisorToMemoryUsage(cluster, topologies);
    for (SupervisorDetails supervisor : supMap.values()) {
        Double cpuAvailable = supervisor.getTotalCPU();
        Double memAvailable = supervisor.getTotalMemory();
        Double cpuUsed = superToCpu.get(supervisor);
        Double memUsed = superToMem.get(supervisor);
        Assert.assertEquals(cpuAvailable, cpuUsed, 0.0001);
        Assert.assertEquals(memAvailable, memUsed, 0.0001);
    }
//end of Test3
}
Also used : ExecutorDetails(org.apache.storm.scheduler.ExecutorDetails) HashMap(java.util.HashMap) TopologyBuilder(org.apache.storm.topology.TopologyBuilder) Config(org.apache.storm.Config) StormTopology(org.apache.storm.generated.StormTopology) SchedulerAssignmentImpl(org.apache.storm.scheduler.SchedulerAssignmentImpl) Topologies(org.apache.storm.scheduler.Topologies) SupervisorDetails(org.apache.storm.scheduler.SupervisorDetails) Cluster(org.apache.storm.scheduler.Cluster) INimbus(org.apache.storm.scheduler.INimbus) TopologyDetails(org.apache.storm.scheduler.TopologyDetails) LinkedList(java.util.LinkedList) TestWordSpout(org.apache.storm.testing.TestWordSpout) Test(org.junit.Test)

Example 52 with ExecutorDetails

use of org.apache.storm.scheduler.ExecutorDetails in project storm by apache.

the class NodeSorterHostProximity method sortObjectResourcesGeneric.

/**
 * Sort objects by the following two criteria.
 *
 * <li>the number executors of the topology that needs to be scheduled is already on the
 * object (node or rack) in descending order. The reasoning to sort based on criterion 1 is so we schedule the rest
 * of a topology on the same object (node or rack) as the existing executors of the topology.</li>
 *
 * <li>the subordinate/subservient resource availability percentage of a rack in descending order We calculate the
 * resource availability percentage by dividing the resource availability of the object (node or rack) by the
 * resource availability of the entire rack or cluster depending on if object references a node or a rack.
 * How this differs from the DefaultResourceAwareStrategy is that the percentage boosts the node or rack if it is
 * requested by the executor that the sorting is being done for and pulls it down if it is not.
 * By doing this calculation, objects (node or rack) that have exhausted or little of one of the resources mentioned
 * above will be ranked after racks that have more balanced resource availability and nodes or racks that have
 * resources that are not requested will be ranked below . So we will be less likely to pick a rack that
 * have a lot of one resource but a low amount of another and have a lot of resources that are not requested by the executor.</li>
 *
 * @param allResources         contains all individual ObjectResources as well as cumulative stats
 * @param exec                 executor for which the sorting is done
 * @param existingScheduleFunc a function to get existing executors already scheduled on this object
 * @return an {@link Iterable} of sorted {@link ObjectResourcesItem}
 */
@Deprecated
private Iterable<ObjectResourcesItem> sortObjectResourcesGeneric(final ObjectResourcesSummary allResources, ExecutorDetails exec, final ExistingScheduleFunc existingScheduleFunc) {
    ObjectResourcesSummary affinityBasedAllResources = new ObjectResourcesSummary(allResources);
    final NormalizedResourceOffer availableResourcesOverall = allResources.getAvailableResourcesOverall();
    final NormalizedResourceRequest requestedResources = (exec != null) ? topologyDetails.getTotalResources(exec) : null;
    affinityBasedAllResources.getObjectResources().forEach(x -> {
        if (requestedResources != null) {
            // negate unrequested resources
            x.availableResources.updateForRareResourceAffinity(requestedResources);
        }
        x.minResourcePercent = availableResourcesOverall.calculateMinPercentageUsedBy(x.availableResources);
        x.avgResourcePercent = availableResourcesOverall.calculateAveragePercentageUsedBy(x.availableResources);
        LOG.trace("for {}: minResourcePercent={}, avgResourcePercent={}, numExistingSchedule={}", x.id, x.minResourcePercent, x.avgResourcePercent, existingScheduleFunc.getNumExistingSchedule(x.id));
    });
    Comparator<ObjectResourcesItem> comparator = (o1, o2) -> {
        int execsScheduled1 = existingScheduleFunc.getNumExistingSchedule(o1.id);
        int execsScheduled2 = existingScheduleFunc.getNumExistingSchedule(o2.id);
        if (execsScheduled1 > execsScheduled2) {
            return -1;
        } else if (execsScheduled1 < execsScheduled2) {
            return 1;
        }
        double o1Avg = o1.avgResourcePercent;
        double o2Avg = o2.avgResourcePercent;
        if (o1Avg > o2Avg) {
            return -1;
        } else if (o1Avg < o2Avg) {
            return 1;
        }
        return o1.id.compareTo(o2.id);
    };
    TreeSet<ObjectResourcesItem> sortedObjectResources = new TreeSet<>(comparator);
    sortedObjectResources.addAll(affinityBasedAllResources.getObjectResources());
    LOG.debug("Sorted Object Resources: {}", sortedObjectResources);
    return sortedObjectResources;
}
Also used : NormalizedResourceOffer(org.apache.storm.scheduler.resource.normalization.NormalizedResourceOffer) NormalizedResourceRequest(org.apache.storm.scheduler.resource.normalization.NormalizedResourceRequest) RasNode(org.apache.storm.scheduler.resource.RasNode) LoggerFactory(org.slf4j.LoggerFactory) NormalizedResourceOffer(org.apache.storm.scheduler.resource.normalization.NormalizedResourceOffer) HashMap(java.util.HashMap) RasNodes(org.apache.storm.scheduler.resource.RasNodes) BaseResourceAwareStrategy(org.apache.storm.scheduler.resource.strategies.scheduling.BaseResourceAwareStrategy) TreeSet(java.util.TreeSet) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) VisibleForTesting(org.apache.storm.shade.com.google.common.annotations.VisibleForTesting) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Map(java.util.Map) WorkerSlot(org.apache.storm.scheduler.WorkerSlot) NoSuchElementException(java.util.NoSuchElementException) SchedulerAssignment(org.apache.storm.scheduler.SchedulerAssignment) Logger(org.slf4j.Logger) Iterator(java.util.Iterator) DNSToSwitchMapping(org.apache.storm.networktopography.DNSToSwitchMapping) ObjectResourcesSummary(org.apache.storm.scheduler.resource.strategies.scheduling.ObjectResourcesSummary) Collection(java.util.Collection) TopologyDetails(org.apache.storm.scheduler.TopologyDetails) Set(java.util.Set) Collectors(java.util.stream.Collectors) Cluster(org.apache.storm.scheduler.Cluster) List(java.util.List) Stream(java.util.stream.Stream) ObjectResourcesItem(org.apache.storm.scheduler.resource.strategies.scheduling.ObjectResourcesItem) Config(org.apache.storm.Config) Comparator(java.util.Comparator) Collections(java.util.Collections) ExecutorDetails(org.apache.storm.scheduler.ExecutorDetails) ObjectResourcesSummary(org.apache.storm.scheduler.resource.strategies.scheduling.ObjectResourcesSummary) NormalizedResourceRequest(org.apache.storm.scheduler.resource.normalization.NormalizedResourceRequest) TreeSet(java.util.TreeSet) ObjectResourcesItem(org.apache.storm.scheduler.resource.strategies.scheduling.ObjectResourcesItem)

Example 53 with ExecutorDetails

use of org.apache.storm.scheduler.ExecutorDetails in project storm by apache.

the class ExecSorterByProximity method takeExecutors.

/**
 * Take unscheduled executors from current and all its downstream components in a particular order.
 * First, take one executor from the current component;
 * then for every child (direct downstream component) of this component,
 *     if it's shuffle grouping from the current component to this child,
 *         the number of executors to take from this child is the max of
 *         1 and (the number of unscheduled executors this child has / the number of unscheduled executors the current component has);
 *     otherwise, the number of executors to take is 1;
 *     for every executor to take from this child, call takeExecutors(...).
 * @param currComp The current component.
 * @param componentMap The map from component Id to component object.
 * @param compToExecsToSchedule The map from component Id to unscheduled executors.
 * @return The executors to schedule in order.
 */
private List<ExecutorDetails> takeExecutors(Component currComp, final Map<String, Component> componentMap, final Map<String, Queue<ExecutorDetails>> compToExecsToSchedule) {
    List<ExecutorDetails> execsScheduled = new ArrayList<>();
    Queue<ExecutorDetails> currQueue = compToExecsToSchedule.get(currComp.getId());
    int currUnscheduledNumExecs = currQueue.size();
    // Just for defensive programming as this won't actually happen.
    if (currUnscheduledNumExecs == 0) {
        return execsScheduled;
    }
    execsScheduled.add(currQueue.poll());
    Set<String> sortedChildren = getSortedChildren(currComp, componentMap);
    for (String childId : sortedChildren) {
        Component childComponent = componentMap.get(childId);
        Queue<ExecutorDetails> childQueue = compToExecsToSchedule.get(childId);
        int childUnscheduledNumExecs = childQueue.size();
        if (childUnscheduledNumExecs == 0) {
            continue;
        }
        int numExecsToTake = 1;
        if (hasShuffleGroupingFromParentToChild(currComp, childComponent)) {
            // if it's shuffle grouping, truncate
            numExecsToTake = Math.max(1, childUnscheduledNumExecs / currUnscheduledNumExecs);
        }
        // otherwise, one-by-one
        for (int i = 0; i < numExecsToTake; i++) {
            execsScheduled.addAll(takeExecutors(childComponent, componentMap, compToExecsToSchedule));
        }
    }
    return execsScheduled;
}
Also used : ExecutorDetails(org.apache.storm.scheduler.ExecutorDetails) ArrayList(java.util.ArrayList) Component(org.apache.storm.scheduler.Component)

Example 54 with ExecutorDetails

use of org.apache.storm.scheduler.ExecutorDetails in project storm by apache.

the class ExecSorterByProximity method sortExecutors.

/**
 * Order executors by network proximity needs. First add all executors for components that
 * are in topological sorted order. Then add back executors not accounted for - which are
 * system executors.
 *
 * @param unassignedExecutors an unmodifiable set of executors that need to be scheduled.
 * @return a list of executors in sorted order for scheduling.
 */
public List<ExecutorDetails> sortExecutors(Set<ExecutorDetails> unassignedExecutors) {
    // excludes system components
    Map<String, Component> componentMap = topologyDetails.getUserTopolgyComponents();
    // in insert order
    LinkedHashSet<ExecutorDetails> orderedExecutorSet = new LinkedHashSet<>();
    Map<String, Queue<ExecutorDetails>> compToExecsToSchedule = new HashMap<>();
    for (Component component : componentMap.values()) {
        compToExecsToSchedule.put(component.getId(), new LinkedList<>());
        for (ExecutorDetails exec : component.getExecs()) {
            if (unassignedExecutors.contains(exec)) {
                compToExecsToSchedule.get(component.getId()).add(exec);
            }
        }
    }
    List<Component> sortedComponents = topologicalSortComponents(componentMap);
    for (Component currComp : sortedComponents) {
        int numExecs = compToExecsToSchedule.get(currComp.getId()).size();
        for (int i = 0; i < numExecs; i++) {
            orderedExecutorSet.addAll(takeExecutors(currComp, componentMap, compToExecsToSchedule));
        }
    }
    // add executors not in sorted list - which may be system executors
    orderedExecutorSet.addAll(unassignedExecutors);
    return new LinkedList<>(orderedExecutorSet);
}
Also used : ExecutorDetails(org.apache.storm.scheduler.ExecutorDetails) LinkedHashSet(java.util.LinkedHashSet) HashMap(java.util.HashMap) LinkedList(java.util.LinkedList) Component(org.apache.storm.scheduler.Component) Queue(java.util.Queue)

Example 55 with ExecutorDetails

use of org.apache.storm.scheduler.ExecutorDetails in project storm by apache.

the class NodeSorter method sortObjectResourcesGeneric.

/**
 * Sort objects by the following two criteria.
 *
 * <li>the number executors of the topology that needs to be scheduled is already on the
 * object (node or rack) in descending order. The reasoning to sort based on criterion 1 is so we schedule the rest
 * of a topology on the same object (node or rack) as the existing executors of the topology.</li>
 *
 * <li>the subordinate/subservient resource availability percentage of a rack in descending order We calculate the
 * resource availability percentage by dividing the resource availability of the object (node or rack) by the
 * resource availability of the entire rack or cluster depending on if object references a node or a rack.
 * How this differs from the DefaultResourceAwareStrategy is that the percentage boosts the node or rack if it is
 * requested by the executor that the sorting is being done for and pulls it down if it is not.
 * By doing this calculation, objects (node or rack) that have exhausted or little of one of the resources mentioned
 * above will be ranked after racks that have more balanced resource availability and nodes or racks that have
 * resources that are not requested will be ranked below . So we will be less likely to pick a rack that
 * have a lot of one resource but a low amount of another and have a lot of resources that are not requested by the executor.</li>
 *
 * @param allResources         contains all individual ObjectResources as well as cumulative stats
 * @param exec                 executor for which the sorting is done
 * @param existingScheduleFunc a function to get existing executors already scheduled on this object
 * @return a sorted list of ObjectResources
 */
@Deprecated
private List<ObjectResourcesItem> sortObjectResourcesGeneric(final ObjectResourcesSummary allResources, ExecutorDetails exec, final ExistingScheduleFunc existingScheduleFunc) {
    ObjectResourcesSummary affinityBasedAllResources = new ObjectResourcesSummary(allResources);
    NormalizedResourceRequest requestedResources = topologyDetails.getTotalResources(exec);
    affinityBasedAllResources.getObjectResources().forEach(x -> x.availableResources.updateForRareResourceAffinity(requestedResources));
    final NormalizedResourceOffer availableResourcesOverall = allResources.getAvailableResourcesOverall();
    List<ObjectResourcesItem> sortedObjectResources = new ArrayList<>();
    Comparator<ObjectResourcesItem> comparator = (o1, o2) -> {
        int execsScheduled1 = existingScheduleFunc.getNumExistingSchedule(o1.id);
        int execsScheduled2 = existingScheduleFunc.getNumExistingSchedule(o2.id);
        if (execsScheduled1 > execsScheduled2) {
            return -1;
        } else if (execsScheduled1 < execsScheduled2) {
            return 1;
        }
        double o1Avg = availableResourcesOverall.calculateAveragePercentageUsedBy(o1.availableResources);
        double o2Avg = availableResourcesOverall.calculateAveragePercentageUsedBy(o2.availableResources);
        if (o1Avg > o2Avg) {
            return -1;
        } else if (o1Avg < o2Avg) {
            return 1;
        }
        return o1.id.compareTo(o2.id);
    };
    sortedObjectResources.addAll(affinityBasedAllResources.getObjectResources());
    sortedObjectResources.sort(comparator);
    LOG.debug("Sorted Object Resources: {}", sortedObjectResources);
    return sortedObjectResources;
}
Also used : NormalizedResourceOffer(org.apache.storm.scheduler.resource.normalization.NormalizedResourceOffer) NormalizedResourceRequest(org.apache.storm.scheduler.resource.normalization.NormalizedResourceRequest) RasNode(org.apache.storm.scheduler.resource.RasNode) LoggerFactory(org.slf4j.LoggerFactory) NormalizedResourceOffer(org.apache.storm.scheduler.resource.normalization.NormalizedResourceOffer) HashMap(java.util.HashMap) RasNodes(org.apache.storm.scheduler.resource.RasNodes) BaseResourceAwareStrategy(org.apache.storm.scheduler.resource.strategies.scheduling.BaseResourceAwareStrategy) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Map(java.util.Map) WorkerSlot(org.apache.storm.scheduler.WorkerSlot) NoSuchElementException(java.util.NoSuchElementException) SchedulerAssignment(org.apache.storm.scheduler.SchedulerAssignment) Logger(org.slf4j.Logger) Iterator(java.util.Iterator) DNSToSwitchMapping(org.apache.storm.networktopography.DNSToSwitchMapping) ObjectResourcesSummary(org.apache.storm.scheduler.resource.strategies.scheduling.ObjectResourcesSummary) Collection(java.util.Collection) TopologyDetails(org.apache.storm.scheduler.TopologyDetails) Set(java.util.Set) Collectors(java.util.stream.Collectors) Cluster(org.apache.storm.scheduler.Cluster) List(java.util.List) Stream(java.util.stream.Stream) ObjectResourcesItem(org.apache.storm.scheduler.resource.strategies.scheduling.ObjectResourcesItem) Config(org.apache.storm.Config) Comparator(java.util.Comparator) Collections(java.util.Collections) ExecutorDetails(org.apache.storm.scheduler.ExecutorDetails) ObjectResourcesSummary(org.apache.storm.scheduler.resource.strategies.scheduling.ObjectResourcesSummary) NormalizedResourceRequest(org.apache.storm.scheduler.resource.normalization.NormalizedResourceRequest) ObjectResourcesItem(org.apache.storm.scheduler.resource.strategies.scheduling.ObjectResourcesItem) ArrayList(java.util.ArrayList)

Aggregations

ExecutorDetails (org.apache.storm.scheduler.ExecutorDetails)72 HashMap (java.util.HashMap)50 TopologyDetails (org.apache.storm.scheduler.TopologyDetails)42 WorkerSlot (org.apache.storm.scheduler.WorkerSlot)41 SchedulerAssignment (org.apache.storm.scheduler.SchedulerAssignment)36 ArrayList (java.util.ArrayList)35 Map (java.util.Map)34 Cluster (org.apache.storm.scheduler.Cluster)31 Config (org.apache.storm.Config)29 HashSet (java.util.HashSet)28 List (java.util.List)28 SupervisorDetails (org.apache.storm.scheduler.SupervisorDetails)28 Topologies (org.apache.storm.scheduler.Topologies)23 LinkedList (java.util.LinkedList)21 INimbus (org.apache.storm.scheduler.INimbus)21 Collection (java.util.Collection)20 StormMetricsRegistry (org.apache.storm.metric.StormMetricsRegistry)19 StormTopology (org.apache.storm.generated.StormTopology)18 TestUtilsForResourceAwareScheduler (org.apache.storm.scheduler.resource.TestUtilsForResourceAwareScheduler)18 ResourceMetrics (org.apache.storm.scheduler.resource.normalization.ResourceMetrics)18