Search in sources :

Example 61 with Cluster

use of org.apache.storm.scheduler.Cluster in project storm by apache.

the class BaseResourceAwareStrategy method createSearcherState.

/**
 * Create an instance of {@link SchedulingSearcherState}. This method is called by
 * {@link #prepareForScheduling(Cluster, TopologyDetails)} and depends on variables initialized therein prior.
 *
 * @return a new instance of {@link SchedulingSearcherState}.
 */
private SchedulingSearcherState createSearcherState() {
    Map<WorkerSlot, Map<String, Integer>> workerCompCnts = new HashMap<>();
    Map<RasNode, Map<String, Integer>> nodeCompCnts = new HashMap<>();
    // populate with existing assignments
    SchedulerAssignment existingAssignment = cluster.getAssignmentById(topologyDetails.getId());
    if (existingAssignment != null) {
        existingAssignment.getExecutorToSlot().forEach((exec, ws) -> {
            String compId = execToComp.get(exec);
            RasNode node = nodes.getNodeById(ws.getNodeId());
            Map<String, Integer> compCnts = nodeCompCnts.computeIfAbsent(node, (k) -> new HashMap<>());
            // increment
            compCnts.put(compId, compCnts.getOrDefault(compId, 0) + 1);
            // populate worker to comp assignments
            compCnts = workerCompCnts.computeIfAbsent(ws, (k) -> new HashMap<>());
            // increment
            compCnts.put(compId, compCnts.getOrDefault(compId, 0) + 1);
        });
    }
    LinkedList<ExecutorDetails> unassignedAckers = new LinkedList<>();
    if (compToExecs.containsKey(Acker.ACKER_COMPONENT_ID)) {
        for (ExecutorDetails acker : compToExecs.get(Acker.ACKER_COMPONENT_ID)) {
            if (unassignedExecutors.contains(acker)) {
                unassignedAckers.add(acker);
            }
        }
    }
    return new SchedulingSearcherState(workerCompCnts, nodeCompCnts, maxStateSearch, maxSchedulingTimeMs, new ArrayList<>(unassignedExecutors), unassignedAckers, topologyDetails, execToComp);
}
Also used : Acker(org.apache.storm.daemon.Acker) IExecSorter(org.apache.storm.scheduler.resource.strategies.scheduling.sorter.IExecSorter) ExecSorterByConnectionCount(org.apache.storm.scheduler.resource.strategies.scheduling.sorter.ExecSorterByConnectionCount) RasNode(org.apache.storm.scheduler.resource.RasNode) LoggerFactory(org.slf4j.LoggerFactory) HashMap(java.util.HashMap) NodeSorter(org.apache.storm.scheduler.resource.strategies.scheduling.sorter.NodeSorter) RasNodes(org.apache.storm.scheduler.resource.RasNodes) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) ExecSorterByProximity(org.apache.storm.scheduler.resource.strategies.scheduling.sorter.ExecSorterByProximity) DaemonConfig(org.apache.storm.DaemonConfig) Map(java.util.Map) WorkerSlot(org.apache.storm.scheduler.WorkerSlot) LinkedList(java.util.LinkedList) NodeSorterHostProximity(org.apache.storm.scheduler.resource.strategies.scheduling.sorter.NodeSorterHostProximity) SchedulerAssignment(org.apache.storm.scheduler.SchedulerAssignment) Logger(org.slf4j.Logger) TopologyDetails(org.apache.storm.scheduler.TopologyDetails) Set(java.util.Set) INodeSorter(org.apache.storm.scheduler.resource.strategies.scheduling.sorter.INodeSorter) SchedulingStatus(org.apache.storm.scheduler.resource.SchedulingStatus) Cluster(org.apache.storm.scheduler.Cluster) Time(org.apache.storm.utils.Time) SchedulingResult(org.apache.storm.scheduler.resource.SchedulingResult) List(java.util.List) ObjectReader(org.apache.storm.utils.ObjectReader) Config(org.apache.storm.Config) Collections(java.util.Collections) ExecutorDetails(org.apache.storm.scheduler.ExecutorDetails) ExecutorDetails(org.apache.storm.scheduler.ExecutorDetails) HashMap(java.util.HashMap) LinkedList(java.util.LinkedList) SchedulerAssignment(org.apache.storm.scheduler.SchedulerAssignment) WorkerSlot(org.apache.storm.scheduler.WorkerSlot) RasNode(org.apache.storm.scheduler.resource.RasNode) HashMap(java.util.HashMap) Map(java.util.Map)

Example 62 with Cluster

use of org.apache.storm.scheduler.Cluster in project storm by apache.

the class TestConstraintSolverStrategy method basicFailureTest.

public void basicFailureTest(String confKey, Object confValue, ConstraintSolverStrategy cs) {
    Map<String, Object> config = makeTestTopoConf();
    config.put(confKey, confValue);
    cs.prepare(config);
    TopologyDetails topo = makeTopology(config, NORMAL_BOLT_PARALLEL);
    Topologies topologies = new Topologies(topo);
    Cluster cluster = makeCluster(topologies);
    LOG.info("Scheduling...");
    SchedulingResult result = cs.schedule(cluster, topo);
    LOG.info("Done scheduling {}...", result);
    Assert.assertTrue("Assert scheduling topology success " + result, !result.isSuccess());
}
Also used : Topologies(org.apache.storm.scheduler.Topologies) Cluster(org.apache.storm.scheduler.Cluster) SchedulingResult(org.apache.storm.scheduler.resource.SchedulingResult) TopologyDetails(org.apache.storm.scheduler.TopologyDetails)

Example 63 with Cluster

use of org.apache.storm.scheduler.Cluster in project storm by apache.

the class TestGenericResourceAwareStrategy method testGenericResourceAwareStrategyWithoutSettingAckerExecutors.

/**
 * Test if the scheduling logic for the GenericResourceAwareStrategy is correct
 * without setting {@link Config#TOPOLOGY_ACKER_EXECUTORS}.
 *
 * Test details refer to {@link TestDefaultResourceAwareStrategy#testDefaultResourceAwareStrategyWithoutSettingAckerExecutors(int)}
 */
@ParameterizedTest
@ValueSource(ints = { -1, 0, 1, 2 })
public void testGenericResourceAwareStrategyWithoutSettingAckerExecutors(int numOfAckersPerWorker) throws InvalidTopologyException {
    int spoutParallelism = 1;
    int boltParallelism = 2;
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("spout", new TestSpout(), spoutParallelism);
    builder.setBolt("bolt-1", new TestBolt(), boltParallelism).shuffleGrouping("spout");
    builder.setBolt("bolt-2", new TestBolt(), boltParallelism).shuffleGrouping("bolt-1").addResource("gpu.count", 1.0);
    builder.setBolt("bolt-3", new TestBolt(), boltParallelism).shuffleGrouping("bolt-2").addResource("gpu.count", 2.0);
    String topoName = "testTopology";
    StormTopology stormToplogy = builder.createTopology();
    INimbus iNimbus = new INimbusTest();
    Config conf = createGrasClusterConfig(50, 500, 0, null, Collections.emptyMap());
    Map<String, Double> genericResourcesMap = new HashMap<>();
    genericResourcesMap.put("gpu.count", 2.0);
    Map<String, SupervisorDetails> supMap = genSupervisors(4, 4, 200, 2000, genericResourcesMap);
    conf.put(Config.TOPOLOGY_PRIORITY, 0);
    conf.put(Config.TOPOLOGY_NAME, topoName);
    conf.put(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB, 2000);
    conf.put(Config.TOPOLOGY_SUBMITTER_USER, "user");
    // Parameterized test on different numOfAckersPerWorker
    if (numOfAckersPerWorker == -1) {
    // Both Config.TOPOLOGY_ACKER_EXECUTORS and Config.TOPOLOGY_RAS_ACKER_EXECUTORS_PER_WORKER are not set
    // Default will be 2 (estimate num of workers) and 1 respectively
    } else {
        conf.put(Config.TOPOLOGY_RAS_ACKER_EXECUTORS_PER_WORKER, numOfAckersPerWorker);
    }
    int estimatedNumWorker = ServerUtils.getEstimatedWorkerCountForRasTopo(conf, stormToplogy);
    Nimbus.setUpAckerExecutorConfigs(topoName, conf, conf, estimatedNumWorker);
    conf.put(Config.TOPOLOGY_ACKER_RESOURCES_ONHEAP_MEMORY_MB, 250);
    conf.put(Config.TOPOLOGY_ACKER_CPU_PCORE_PERCENT, 50);
    TopologyDetails topo = new TopologyDetails("testTopology-id", conf, stormToplogy, 0, genExecsAndComps(StormCommon.systemTopology(conf, stormToplogy)), currentTime, "user");
    Topologies topologies = new Topologies(topo);
    Cluster cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, conf);
    scheduler = new ResourceAwareScheduler();
    scheduler.prepare(conf, new StormMetricsRegistry());
    scheduler.schedule(topologies, cluster);
    // We need to have 3 slots on 3 separate hosts. The topology needs 6 GPUs 3500 MB memory and 350% CPU
    // The bolt-3 instances must be on separate nodes because they each need 2 GPUs.
    // The bolt-2 instances must be on the same node as they each need 1 GPU
    // (this assumes that we are packing the components to avoid fragmentation).
    // The bolt-1 and spout instances fill in the rest.
    // Ordered execs: [[6, 6], [2, 2], [4, 4], [5, 5], [1, 1], [3, 3], [0, 0]]
    // Ackers: [[8, 8], [7, 7]] (+ [[9, 9], [10, 10]] when numOfAckersPerWorker=2)
    HashSet<HashSet<ExecutorDetails>> expectedScheduling = new HashSet<>();
    if (numOfAckersPerWorker == -1 || numOfAckersPerWorker == 1) {
        expectedScheduling.add(new HashSet<>(Arrays.asList(// bolt-3 - 500 MB, 50% CPU, 2 GPU
        new ExecutorDetails(3, 3))));
        // Total 500 MB, 50% CPU, 2 - GPU -> this node has 1500 MB, 150% cpu, 0 GPU left
        expectedScheduling.add(new HashSet<>(Arrays.asList(// bolt-2 - 500 MB, 50% CPU, 1 GPU
        new ExecutorDetails(6, 6), // bolt-1 - 500 MB, 50% CPU, 0 GPU
        new ExecutorDetails(2, 2), // bolt-2 - 500 MB, 50% CPU, 1 GPU
        new ExecutorDetails(5, 5), // acker - 250 MB, 50% CPU, 0 GPU
        new ExecutorDetails(8, 8))));
        // Total 1750 MB, 200% CPU, 2 GPU -> this node has 250 MB, 0% CPU, 0 GPU left
        expectedScheduling.add(new HashSet<>(Arrays.asList(// bolt-3 500 MB, 50% cpu, 2 GPU
        new ExecutorDetails(4, 4), // bolt-1 - 500 MB, 50% CPU, 0 GPU
        new ExecutorDetails(1, 1), // Spout - 500 MB, 50% CPU, 0 GPU
        new ExecutorDetails(0, 0), // acker - 250 MB, 50% CPU, 0 GPU
        new ExecutorDetails(7, 7))));
    // Total 1750 MB, 200% CPU, 2 GPU -> this node has 250 MB, 0% CPU, 0 GPU left
    } else if (numOfAckersPerWorker == 0) {
        expectedScheduling.add(new HashSet<>(Arrays.asList(// bolt-3 - 500 MB, 50% CPU, 2 GPU
        new ExecutorDetails(3, 3))));
        // Total 500 MB, 50% CPU, 2 - GPU -> this node has 1500 MB, 150% cpu, 0 GPU left
        expectedScheduling.add(new HashSet<>(Arrays.asList(// bolt-2 - 500 MB, 50% CPU, 1 GPU
        new ExecutorDetails(6, 6), // bolt-1 - 500 MB, 50% CPU, 0 GPU
        new ExecutorDetails(2, 2), // bolt-2 - 500 MB, 50% CPU, 1 GPU
        new ExecutorDetails(5, 5), // bolt-1 - 500 MB, 50% CPU, 0 GPU
        new ExecutorDetails(1, 1))));
        // Total 2000 MB, 200% CPU, 2 GPU -> this node has 0 MB, 0% CPU, 0 GPU left
        expectedScheduling.add(new HashSet<>(Arrays.asList(// Spout - 500 MB, 50% CPU, 0 GPU
        new ExecutorDetails(0, 0), // bolt-3 500 MB, 50% cpu, 2 GPU
        new ExecutorDetails(4, 4))));
    // Total 1000 MB, 100% CPU, 2 GPU -> this node has 1000 MB, 100% CPU, 0 GPU left
    } else if (numOfAckersPerWorker == 2) {
        expectedScheduling.add(new HashSet<>(Arrays.asList(// bolt-3 - 500 MB, 50% CPU, 2 GPU
        new ExecutorDetails(3, 3))));
        // Total 500 MB, 50% CPU, 2 - GPU -> this node has 1500 MB, 150% cpu, 0 GPU left
        expectedScheduling.add(new HashSet<>(Arrays.asList(// acker - 250 MB, 50% CPU, 0 GPU
        new ExecutorDetails(7, 7), // acker - 250 MB, 50% CPU, 0 GPU
        new ExecutorDetails(8, 8), // bolt-2 - 500 MB, 50% CPU, 1 GPU
        new ExecutorDetails(6, 6), // bolt-1 - 500 MB, 50% CPU, 0 GPU
        new ExecutorDetails(2, 2))));
        // Total 1500 MB, 200% CPU, 2 GPU -> this node has 500 MB, 0% CPU, 0 GPU left
        expectedScheduling.add(new HashSet<>(Arrays.asList(// acker- 250 MB, 50% CPU, 0 GPU
        new ExecutorDetails(9, 9), // acker- 250 MB, 50% CPU, 0 GPU
        new ExecutorDetails(10, 10), // bolt-1 - 500 MB, 50% CPU, 0 GPU
        new ExecutorDetails(1, 1), // bolt-3 500 MB, 50% cpu, 2 GPU
        new ExecutorDetails(4, 4))));
        // Total 1500 MB, 200% CPU, 2 GPU -> this node has 500 MB, 0% CPU, 0 GPU left
        expectedScheduling.add(new HashSet<>(Arrays.asList(// Spout - 500 MB, 50% CPU, 0 GPU
        new ExecutorDetails(0, 0), // bolt-2 - 500 MB, 50% CPU, 1 GPU
        new ExecutorDetails(5, 5))));
    // Total 1000 MB, 100% CPU, 2 GPU -> this node has 1000 MB, 100% CPU, 0 GPU left
    }
    HashSet<HashSet<ExecutorDetails>> foundScheduling = new HashSet<>();
    SchedulerAssignment assignment = cluster.getAssignmentById("testTopology-id");
    for (Collection<ExecutorDetails> execs : assignment.getSlotToExecutors().values()) {
        foundScheduling.add(new HashSet<>(execs));
    }
    assertEquals(expectedScheduling, foundScheduling);
}
Also used : ExecutorDetails(org.apache.storm.scheduler.ExecutorDetails) TopologyBuilder(org.apache.storm.topology.TopologyBuilder) HashMap(java.util.HashMap) DaemonConfig(org.apache.storm.DaemonConfig) Config(org.apache.storm.Config) StormTopology(org.apache.storm.generated.StormTopology) StormMetricsRegistry(org.apache.storm.metric.StormMetricsRegistry) ResourceAwareScheduler(org.apache.storm.scheduler.resource.ResourceAwareScheduler) TestUtilsForResourceAwareScheduler(org.apache.storm.scheduler.resource.TestUtilsForResourceAwareScheduler) ResourceMetrics(org.apache.storm.scheduler.resource.normalization.ResourceMetrics) Topologies(org.apache.storm.scheduler.Topologies) SupervisorDetails(org.apache.storm.scheduler.SupervisorDetails) HashSet(java.util.HashSet) Cluster(org.apache.storm.scheduler.Cluster) INimbus(org.apache.storm.scheduler.INimbus) TopologyDetails(org.apache.storm.scheduler.TopologyDetails) SchedulerAssignment(org.apache.storm.scheduler.SchedulerAssignment) ValueSource(org.junit.jupiter.params.provider.ValueSource) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Example 64 with Cluster

use of org.apache.storm.scheduler.Cluster in project storm by apache.

the class TestGenericResourceAwareStrategy method testGenericResourceAwareStrategyInFavorOfShuffle.

/**
 * test if the scheduling logic for the GenericResourceAwareStrategy (when in favor of shuffle) is correct.
 */
@Test
public void testGenericResourceAwareStrategyInFavorOfShuffle() throws InvalidTopologyException {
    int spoutParallelism = 1;
    int boltParallelism = 2;
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("spout", new TestSpout(), spoutParallelism);
    builder.setBolt("bolt-1", new TestBolt(), boltParallelism).shuffleGrouping("spout");
    builder.setBolt("bolt-2", new TestBolt(), boltParallelism).shuffleGrouping("bolt-1").addResource("gpu.count", 1.0);
    builder.setBolt("bolt-3", new TestBolt(), boltParallelism).shuffleGrouping("bolt-2").addResource("gpu.count", 2.0);
    StormTopology stormToplogy = builder.createTopology();
    INimbus iNimbus = new INimbusTest();
    Config conf = createGrasClusterConfig(50, 250, 250, null, Collections.emptyMap());
    Map<String, Double> genericResourcesMap = new HashMap<>();
    genericResourcesMap.put("gpu.count", 2.0);
    Map<String, SupervisorDetails> supMap = genSupervisors(4, 4, 200, 2000, genericResourcesMap);
    conf.put(Config.TOPOLOGY_PRIORITY, 0);
    conf.put(Config.TOPOLOGY_NAME, "testTopology");
    conf.put(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB, Double.MAX_VALUE);
    conf.put(Config.TOPOLOGY_SUBMITTER_USER, "user");
    conf.put(Config.TOPOLOGY_RAS_ORDER_EXECUTORS_BY_PROXIMITY_NEEDS, true);
    TopologyDetails topo = new TopologyDetails("testTopology-id", conf, stormToplogy, 0, genExecsAndComps(StormCommon.systemTopology(conf, stormToplogy)), currentTime, "user");
    Topologies topologies = new Topologies(topo);
    Cluster cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, conf);
    ResourceAwareScheduler rs = new ResourceAwareScheduler();
    rs.prepare(conf, new StormMetricsRegistry());
    rs.schedule(topologies, cluster);
    // Sorted execs: [[0, 0], [2, 2], [6, 6], [4, 4], [1, 1], [5, 5], [3, 3], [7, 7]]
    // Ackers: [[7, 7]]]
    HashSet<HashSet<ExecutorDetails>> expectedScheduling = new HashSet<>();
    expectedScheduling.add(new HashSet<>(Arrays.asList(// spout
    new ExecutorDetails(0, 0), // bolt-1
    new ExecutorDetails(2, 2), // bolt-2
    new ExecutorDetails(6, 6), // acker
    new ExecutorDetails(7, 7))));
    expectedScheduling.add(new HashSet<>(Arrays.asList(// bolt-3
    new ExecutorDetails(4, 4), // bolt-1
    new ExecutorDetails(1, 1))));
    // bolt-2
    expectedScheduling.add(new HashSet<>(Arrays.asList(new ExecutorDetails(5, 5))));
    // bolt-3
    expectedScheduling.add(new HashSet<>(Arrays.asList(new ExecutorDetails(3, 3))));
    HashSet<HashSet<ExecutorDetails>> foundScheduling = new HashSet<>();
    SchedulerAssignment assignment = cluster.getAssignmentById("testTopology-id");
    for (Collection<ExecutorDetails> execs : assignment.getSlotToExecutors().values()) {
        foundScheduling.add(new HashSet<>(execs));
    }
    assertEquals(expectedScheduling, foundScheduling);
}
Also used : ExecutorDetails(org.apache.storm.scheduler.ExecutorDetails) TopologyBuilder(org.apache.storm.topology.TopologyBuilder) HashMap(java.util.HashMap) DaemonConfig(org.apache.storm.DaemonConfig) Config(org.apache.storm.Config) StormTopology(org.apache.storm.generated.StormTopology) StormMetricsRegistry(org.apache.storm.metric.StormMetricsRegistry) ResourceAwareScheduler(org.apache.storm.scheduler.resource.ResourceAwareScheduler) TestUtilsForResourceAwareScheduler(org.apache.storm.scheduler.resource.TestUtilsForResourceAwareScheduler) ResourceMetrics(org.apache.storm.scheduler.resource.normalization.ResourceMetrics) Topologies(org.apache.storm.scheduler.Topologies) SupervisorDetails(org.apache.storm.scheduler.SupervisorDetails) HashSet(java.util.HashSet) Cluster(org.apache.storm.scheduler.Cluster) INimbus(org.apache.storm.scheduler.INimbus) TopologyDetails(org.apache.storm.scheduler.TopologyDetails) SchedulerAssignment(org.apache.storm.scheduler.SchedulerAssignment) Test(org.junit.Test) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Example 65 with Cluster

use of org.apache.storm.scheduler.Cluster in project storm by apache.

the class TestGenericResourceAwareStrategy method testGrasRequiringEviction.

/*
     * test requiring eviction until Generic Resource (gpu) is evicted.
     */
@Test
public void testGrasRequiringEviction() {
    int spoutParallelism = 3;
    double cpuPercent = 10;
    double memoryOnHeap = 10;
    double memoryOffHeap = 10;
    // Sufficient Cpu/Memory. But insufficient gpu to schedule all topologies (gpu1, noGpu, gpu2).
    // gpu topology (requires 3 gpu's in total)
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("spout", new TestSpout(), spoutParallelism).addResource("gpu.count", 1.0);
    StormTopology stormTopologyWithGpu = builder.createTopology();
    // non-gpu topology
    builder = new TopologyBuilder();
    builder.setSpout("spout", new TestSpout(), spoutParallelism);
    StormTopology stormTopologyNoGpu = builder.createTopology();
    Config conf = createGrasClusterConfig(cpuPercent, memoryOnHeap, memoryOffHeap, null, Collections.emptyMap());
    // allow 1 round of evictions
    conf.put(DaemonConfig.RESOURCE_AWARE_SCHEDULER_MAX_TOPOLOGY_SCHEDULING_ATTEMPTS, 2);
    String gpu1 = "hasGpu1";
    String noGpu = "hasNoGpu";
    String gpu2 = "hasGpu2";
    TopologyDetails[] topo = { createTestStormTopology(stormTopologyWithGpu, 10, gpu1, conf), createTestStormTopology(stormTopologyNoGpu, 10, noGpu, conf), createTestStormTopology(stormTopologyWithGpu, 9, gpu2, conf) };
    Topologies topologies = new Topologies(topo[0], topo[1]);
    Map<String, Double> genericResourcesMap = new HashMap<>();
    genericResourcesMap.put("gpu.count", 1.0);
    Map<String, SupervisorDetails> supMap = genSupervisors(4, 4, 500, 2000, genericResourcesMap);
    Cluster cluster = new Cluster(new INimbusTest(), new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, conf);
    // should schedule gpu1 and noGpu successfully
    scheduler = new ResourceAwareScheduler();
    scheduler.prepare(conf, new StormMetricsRegistry());
    scheduler.schedule(topologies, cluster);
    assertTopologiesFullyScheduled(cluster, gpu1);
    assertTopologiesFullyScheduled(cluster, noGpu);
    // should evict gpu1 and noGpu topologies in order to schedule gpu2 topology; then fail to reschedule gpu1 topology;
    // then schedule noGpu topology.
    // Scheduling used to ignore gpu resource when deciding when to stop evicting, and gpu2 would fail to schedule.
    topologies = new Topologies(topo[0], topo[1], topo[2]);
    cluster = new Cluster(cluster, topologies);
    scheduler.schedule(topologies, cluster);
    assertTopologiesNotScheduled(cluster, gpu1);
    assertTopologiesFullyScheduled(cluster, noGpu);
    assertTopologiesFullyScheduled(cluster, gpu2);
}
Also used : TopologyBuilder(org.apache.storm.topology.TopologyBuilder) HashMap(java.util.HashMap) DaemonConfig(org.apache.storm.DaemonConfig) Config(org.apache.storm.Config) StormTopology(org.apache.storm.generated.StormTopology) StormMetricsRegistry(org.apache.storm.metric.StormMetricsRegistry) Cluster(org.apache.storm.scheduler.Cluster) TopologyDetails(org.apache.storm.scheduler.TopologyDetails) ResourceAwareScheduler(org.apache.storm.scheduler.resource.ResourceAwareScheduler) TestUtilsForResourceAwareScheduler(org.apache.storm.scheduler.resource.TestUtilsForResourceAwareScheduler) ResourceMetrics(org.apache.storm.scheduler.resource.normalization.ResourceMetrics) Topologies(org.apache.storm.scheduler.Topologies) SupervisorDetails(org.apache.storm.scheduler.SupervisorDetails) Test(org.junit.Test) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Aggregations

Cluster (org.apache.storm.scheduler.Cluster)95 Config (org.apache.storm.Config)85 Topologies (org.apache.storm.scheduler.Topologies)83 TopologyDetails (org.apache.storm.scheduler.TopologyDetails)81 SupervisorDetails (org.apache.storm.scheduler.SupervisorDetails)79 INimbus (org.apache.storm.scheduler.INimbus)73 HashMap (java.util.HashMap)69 StormMetricsRegistry (org.apache.storm.metric.StormMetricsRegistry)68 ResourceMetrics (org.apache.storm.scheduler.resource.normalization.ResourceMetrics)62 TestUtilsForResourceAwareScheduler (org.apache.storm.scheduler.resource.TestUtilsForResourceAwareScheduler)59 Map (java.util.Map)44 DaemonConfig (org.apache.storm.DaemonConfig)43 SchedulerAssignment (org.apache.storm.scheduler.SchedulerAssignment)40 ResourceAwareScheduler (org.apache.storm.scheduler.resource.ResourceAwareScheduler)40 HashSet (java.util.HashSet)39 ExecutorDetails (org.apache.storm.scheduler.ExecutorDetails)39 Test (org.junit.Test)37 WorkerSlot (org.apache.storm.scheduler.WorkerSlot)35 Test (org.junit.jupiter.api.Test)35 SchedulerAssignmentImpl (org.apache.storm.scheduler.SchedulerAssignmentImpl)33