Search in sources :

Example 16 with StormMetricsRegistry

use of org.apache.storm.metric.StormMetricsRegistry in project storm by apache.

the class TestResourceAwareScheduler method testLargeClusterSchedulingTiming.

/*
     * Test time to schedule large cluster scheduling with fragmentation
     */
private TimeBlockResult testLargeClusterSchedulingTiming(int numNodes, Config config) {
    // Attempt to schedule multiple copies of 2 different topologies (topo-t0 and topo-t1) in 3 blocks.
    // Without fragmentation it is possible to schedule all topologies, but fragmentation causes topologies to not
    // schedule for the last block.
    // Get start/end indices for blocks
    int numTopologyPairs = numNodes;
    int increment = (int) Math.floor(numTopologyPairs * 0.1);
    int[] firstBlockIndices = { 0, increment - 1 };
    int[] midBlockIndices = { increment, numTopologyPairs - increment - 1 };
    int[] lastBlockIndices = { numTopologyPairs - increment, numTopologyPairs - 1 };
    // Memory is the constraining resource.
    // memory required by topo-t0
    double t0Mem = 70;
    // memory required by topo-t1
    double t1Mem = 20;
    double nodeMem = 100;
    // first block (0% - 10%)
    Map<String, TopologyDetails> topologyMap = new HashMap<>();
    addTopologyBlockToMap(topologyMap, "topo_t0-", config, t0Mem, firstBlockIndices);
    addTopologyBlockToMap(topologyMap, "topo_t1-", config, t1Mem, firstBlockIndices);
    Topologies topologies = new Topologies(topologyMap);
    Map<String, SupervisorDetails> supMap = genSupervisors(numNodes, 7, 3500, nodeMem);
    Cluster cluster = new Cluster(new INimbusTest(), new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<String, SchedulerAssignmentImpl>(), topologies, config);
    TimeBlockResult timeBlockResult = new TimeBlockResult();
    // schedule first block (0% - 10%)
    {
        scheduler = new ResourceAwareScheduler();
        scheduler.prepare(config, new StormMetricsRegistry());
        long time = Time.currentTimeMillis();
        scheduler.schedule(topologies, cluster);
        timeBlockResult.firstBlockTime.add(Time.currentTimeMillis() - time);
    }
    // schedule mid block (10% - 90%)
    {
        addTopologyBlockToMap(topologyMap, "topo_t0-", config, t0Mem, midBlockIndices);
        addTopologyBlockToMap(topologyMap, "topo_t1-", config, t1Mem, midBlockIndices);
        topologies = new Topologies(topologyMap);
        cluster = new Cluster(cluster, topologies);
        scheduler.schedule(topologies, cluster);
    }
    // schedule last block (90% to 100%)
    {
        addTopologyBlockToMap(topologyMap, "topo_t0-", config, t0Mem, lastBlockIndices);
        addTopologyBlockToMap(topologyMap, "topo_t1-", config, t1Mem, lastBlockIndices);
        topologies = new Topologies(topologyMap);
        cluster = new Cluster(cluster, topologies);
        long time = Time.currentTimeMillis();
        scheduler.schedule(topologies, cluster);
        timeBlockResult.lastBlockTime.add(Time.currentTimeMillis() - time);
    }
    return timeBlockResult;
}
Also used : HashMap(java.util.HashMap) StormMetricsRegistry(org.apache.storm.metric.StormMetricsRegistry) Cluster(org.apache.storm.scheduler.Cluster) TopologyDetails(org.apache.storm.scheduler.TopologyDetails) TestUtilsForResourceAwareScheduler(org.apache.storm.scheduler.resource.TestUtilsForResourceAwareScheduler) ResourceMetrics(org.apache.storm.scheduler.resource.normalization.ResourceMetrics) SchedulerAssignmentImpl(org.apache.storm.scheduler.SchedulerAssignmentImpl) Topologies(org.apache.storm.scheduler.Topologies) SupervisorDetails(org.apache.storm.scheduler.SupervisorDetails)

Example 17 with StormMetricsRegistry

use of org.apache.storm.metric.StormMetricsRegistry in project storm by apache.

the class TestResourceAwareScheduler method testSubmitUsersWithNoGuarantees.

@Test
public void testSubmitUsersWithNoGuarantees() {
    INimbus iNimbus = new INimbusTest();
    Map<String, SupervisorDetails> supMap = genSupervisors(4, 4, 100, 1000);
    Map<String, Map<String, Number>> resourceUserPool = userResourcePool(userRes("jerry", 200, 2000));
    Config config = createClusterConfig(100, 500, 500, resourceUserPool);
    Topologies topologies = new Topologies(genTopology("topo-1", config, 1, 0, 1, 0, currentTime - 2, 10, "jerry"), genTopology("topo-2", config, 1, 0, 1, 0, currentTime - 2, 20, "jerry"), genTopology("topo-3", config, 1, 0, 1, 0, currentTime - 2, 20, "jerry"), genTopology("topo-4", config, 1, 0, 1, 0, currentTime - 2, 10, "bobby"), genTopology("topo-5", config, 1, 0, 1, 0, currentTime - 2, 20, "bobby"));
    Cluster cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, config);
    scheduler = new ResourceAwareScheduler();
    scheduler.prepare(config, new StormMetricsRegistry());
    scheduler.schedule(topologies, cluster);
    assertTopologiesFullyScheduled(cluster, "topo-1", "topo-2", "topo-3", "topo-4");
    assertTopologiesNotScheduled(cluster, "topo-5");
}
Also used : DaemonConfig(org.apache.storm.DaemonConfig) Config(org.apache.storm.Config) StormMetricsRegistry(org.apache.storm.metric.StormMetricsRegistry) Cluster(org.apache.storm.scheduler.Cluster) INimbus(org.apache.storm.scheduler.INimbus) TestUtilsForResourceAwareScheduler(org.apache.storm.scheduler.resource.TestUtilsForResourceAwareScheduler) ResourceMetrics(org.apache.storm.scheduler.resource.normalization.ResourceMetrics) Topologies(org.apache.storm.scheduler.Topologies) SupervisorDetails(org.apache.storm.scheduler.SupervisorDetails) Map(java.util.Map) HashMap(java.util.HashMap) TreeMap(java.util.TreeMap) Test(org.junit.jupiter.api.Test) PerformanceTest(org.apache.storm.testing.PerformanceTest)

Example 18 with StormMetricsRegistry

use of org.apache.storm.metric.StormMetricsRegistry in project storm by apache.

the class TestResourceAwareScheduler method testResourceLimitation.

@Test
public void testResourceLimitation() {
    INimbus iNimbus = new INimbusTest();
    Map<String, SupervisorDetails> supMap = genSupervisors(2, 2, 400, 2000);
    // a topology with multiple spouts
    TopologyBuilder builder1 = new TopologyBuilder();
    builder1.setSpout("wordSpout", new TestWordSpout(), 2).setCPULoad(250.0).setMemoryLoad(1000.0, 200.0);
    builder1.setBolt("wordCountBolt", new TestWordCounter(), 1).shuffleGrouping("wordSpout").setCPULoad(100.0).setMemoryLoad(500.0, 100.0);
    StormTopology stormTopology1 = builder1.createTopology();
    Config config = new Config();
    config.putAll(defaultTopologyConf);
    Map<ExecutorDetails, String> executorMap1 = genExecsAndComps(stormTopology1);
    TopologyDetails topology1 = new TopologyDetails("topology1", config, stormTopology1, 2, executorMap1, 0, "user");
    ResourceAwareScheduler rs = new ResourceAwareScheduler();
    scheduler = rs;
    Topologies topologies = new Topologies(topology1);
    Cluster cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, config);
    rs.prepare(config, new StormMetricsRegistry());
    rs.schedule(topologies, cluster);
    SchedulerAssignment assignment1 = cluster.getAssignmentById(topology1.getId());
    Set<WorkerSlot> assignedSlots1 = assignment1.getSlots();
    Set<String> nodesIDs1 = new HashSet<>();
    for (WorkerSlot slot : assignedSlots1) {
        nodesIDs1.add(slot.getNodeId());
    }
    Collection<ExecutorDetails> executors1 = assignment1.getExecutors();
    List<Double> assignedExecutorMemory = new ArrayList<>();
    List<Double> assignedExecutorCpu = new ArrayList<>();
    for (ExecutorDetails executor : executors1) {
        assignedExecutorMemory.add(topology1.getTotalMemReqTask(executor));
        assignedExecutorCpu.add(topology1.getTotalCpuReqTask(executor));
    }
    Collections.sort(assignedExecutorCpu);
    Collections.sort(assignedExecutorMemory);
    Map<ExecutorDetails, SupervisorDetails> executorToSupervisor = new HashMap<>();
    Map<SupervisorDetails, List<ExecutorDetails>> supervisorToExecutors = new HashMap<>();
    Map<Double, Double> cpuAvailableToUsed = new HashMap<>();
    Map<Double, Double> memoryAvailableToUsed = new HashMap<>();
    for (Map.Entry<ExecutorDetails, WorkerSlot> entry : assignment1.getExecutorToSlot().entrySet()) {
        executorToSupervisor.put(entry.getKey(), cluster.getSupervisorById(entry.getValue().getNodeId()));
    }
    for (Map.Entry<ExecutorDetails, SupervisorDetails> entry : executorToSupervisor.entrySet()) {
        supervisorToExecutors.computeIfAbsent(entry.getValue(), k -> new ArrayList<>()).add(entry.getKey());
    }
    for (Map.Entry<SupervisorDetails, List<ExecutorDetails>> entry : supervisorToExecutors.entrySet()) {
        Double supervisorTotalCpu = entry.getKey().getTotalCpu();
        Double supervisorTotalMemory = entry.getKey().getTotalMemory();
        Double supervisorUsedCpu = 0.0;
        Double supervisorUsedMemory = 0.0;
        for (ExecutorDetails executor : entry.getValue()) {
            supervisorUsedMemory += topology1.getTotalCpuReqTask(executor);
            supervisorTotalCpu += topology1.getTotalMemReqTask(executor);
        }
        cpuAvailableToUsed.put(supervisorTotalCpu, supervisorUsedCpu);
        memoryAvailableToUsed.put(supervisorTotalMemory, supervisorUsedMemory);
    }
    // executor0 resides one one worker (on one), executor1 and executor2 on another worker (on the other node)
    assertEquals(2, assignedSlots1.size());
    assertEquals(2, nodesIDs1.size());
    assertEquals(3, executors1.size());
    assertEquals(100.0, assignedExecutorCpu.get(0), 0.001);
    assertEquals(250.0, assignedExecutorCpu.get(1), 0.001);
    assertEquals(250.0, assignedExecutorCpu.get(2), 0.001);
    assertEquals(600.0, assignedExecutorMemory.get(0), 0.001);
    assertEquals(1200.0, assignedExecutorMemory.get(1), 0.001);
    assertEquals(1200.0, assignedExecutorMemory.get(2), 0.001);
    for (Map.Entry<Double, Double> entry : memoryAvailableToUsed.entrySet()) {
        assertTrue(entry.getKey() - entry.getValue() >= 0);
    }
    for (Map.Entry<Double, Double> entry : cpuAvailableToUsed.entrySet()) {
        assertTrue(entry.getKey() - entry.getValue() >= 0);
    }
    assertFalse(cluster.needsSchedulingRas(topology1));
    assertTrue(cluster.getStatusMap().get(topology1.getId()).startsWith("Running - Fully Scheduled by DefaultResourceAwareStrategy"));
}
Also used : ExecutorDetails(org.apache.storm.scheduler.ExecutorDetails) Arrays(java.util.Arrays) ConstraintSolverStrategy(org.apache.storm.scheduler.resource.strategies.scheduling.ConstraintSolverStrategy) DefaultResourceAwareStrategy(org.apache.storm.scheduler.resource.strategies.scheduling.DefaultResourceAwareStrategy) LoggerFactory(org.slf4j.LoggerFactory) INimbus(org.apache.storm.scheduler.INimbus) DaemonConfig(org.apache.storm.DaemonConfig) ResourceMetrics(org.apache.storm.scheduler.resource.normalization.ResourceMetrics) Map(java.util.Map) WorkerSlot(org.apache.storm.scheduler.WorkerSlot) After(org.junit.After) Duration(java.time.Duration) TopologyBuilder(org.apache.storm.topology.TopologyBuilder) TestWordSpout(org.apache.storm.testing.TestWordSpout) SchedulerAssignmentImpl(org.apache.storm.scheduler.SchedulerAssignmentImpl) DisallowedStrategyException(org.apache.storm.utils.DisallowedStrategyException) SchedulerAssignment(org.apache.storm.scheduler.SchedulerAssignment) Collection(java.util.Collection) TopologyDetails(org.apache.storm.scheduler.TopologyDetails) Set(java.util.Set) NormalizedResources(org.apache.storm.scheduler.resource.normalization.NormalizedResources) TestWordCounter(org.apache.storm.testing.TestWordCounter) Time(org.apache.storm.utils.Time) Test(org.junit.jupiter.api.Test) WorkerResources(org.apache.storm.generated.WorkerResources) List(java.util.List) ConfigUtils(org.apache.storm.utils.ConfigUtils) TestUtilsForResourceAwareScheduler(org.apache.storm.scheduler.resource.TestUtilsForResourceAwareScheduler) Entry(java.util.Map.Entry) Config(org.apache.storm.Config) ExecutorDetails(org.apache.storm.scheduler.ExecutorDetails) IScheduler(org.apache.storm.scheduler.IScheduler) HashMap(java.util.HashMap) BaseResourceAwareStrategy(org.apache.storm.scheduler.resource.strategies.scheduling.BaseResourceAwareStrategy) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) Topologies(org.apache.storm.scheduler.Topologies) ReflectionUtils(org.apache.storm.utils.ReflectionUtils) StormTopology(org.apache.storm.generated.StormTopology) DefaultResourceAwareStrategyOld(org.apache.storm.scheduler.resource.strategies.scheduling.DefaultResourceAwareStrategyOld) LinkedList(java.util.LinkedList) ConfigValidation(org.apache.storm.validation.ConfigValidation) StormMetricsRegistry(org.apache.storm.metric.StormMetricsRegistry) GenericResourceAwareStrategy(org.apache.storm.scheduler.resource.strategies.scheduling.GenericResourceAwareStrategy) Logger(org.slf4j.Logger) SupervisorDetails(org.apache.storm.scheduler.SupervisorDetails) Utils(org.apache.storm.utils.Utils) PerformanceTest(org.apache.storm.testing.PerformanceTest) Cluster(org.apache.storm.scheduler.Cluster) AtomicLong(java.util.concurrent.atomic.AtomicLong) TreeMap(java.util.TreeMap) Assertions(org.junit.jupiter.api.Assertions) Assert(org.junit.Assert) Collections(java.util.Collections) TopologyBuilder(org.apache.storm.topology.TopologyBuilder) HashMap(java.util.HashMap) DaemonConfig(org.apache.storm.DaemonConfig) Config(org.apache.storm.Config) StormTopology(org.apache.storm.generated.StormTopology) StormMetricsRegistry(org.apache.storm.metric.StormMetricsRegistry) ArrayList(java.util.ArrayList) TestUtilsForResourceAwareScheduler(org.apache.storm.scheduler.resource.TestUtilsForResourceAwareScheduler) ResourceMetrics(org.apache.storm.scheduler.resource.normalization.ResourceMetrics) WorkerSlot(org.apache.storm.scheduler.WorkerSlot) Topologies(org.apache.storm.scheduler.Topologies) List(java.util.List) ArrayList(java.util.ArrayList) LinkedList(java.util.LinkedList) SupervisorDetails(org.apache.storm.scheduler.SupervisorDetails) HashSet(java.util.HashSet) TestWordCounter(org.apache.storm.testing.TestWordCounter) Cluster(org.apache.storm.scheduler.Cluster) INimbus(org.apache.storm.scheduler.INimbus) TopologyDetails(org.apache.storm.scheduler.TopologyDetails) SchedulerAssignment(org.apache.storm.scheduler.SchedulerAssignment) TestWordSpout(org.apache.storm.testing.TestWordSpout) Map(java.util.Map) HashMap(java.util.HashMap) TreeMap(java.util.TreeMap) Test(org.junit.jupiter.api.Test) PerformanceTest(org.apache.storm.testing.PerformanceTest)

Example 19 with StormMetricsRegistry

use of org.apache.storm.metric.StormMetricsRegistry in project storm by apache.

the class TestResourceAwareScheduler method testStrategyTakingTooLong.

@Test
public void testStrategyTakingTooLong() {
    INimbus iNimbus = new INimbusTest();
    Map<String, SupervisorDetails> supMap = genSupervisors(8, 4, 100, 1000);
    Config config = createClusterConfig(100, 500, 500, null);
    List<String> allowedSchedulerStrategies = new ArrayList<>();
    allowedSchedulerStrategies.add(getDefaultResourceAwareStrategyClass().getName());
    allowedSchedulerStrategies.add(DefaultResourceAwareStrategyOld.class.getName());
    allowedSchedulerStrategies.add(NeverEndingSchedulingStrategy.class.getName());
    config.put(Config.NIMBUS_SCHEDULER_STRATEGY_CLASS_WHITELIST, allowedSchedulerStrategies);
    config.put(DaemonConfig.SCHEDULING_TIMEOUT_SECONDS_PER_TOPOLOGY, 30);
    TopologyDetails topo1 = genTopology("topo-1", config, 1, 0, 2, 0, currentTime - 2, 10, "jerry");
    TopologyDetails topo3 = genTopology("topo-3", config, 1, 2, 1, 1, currentTime - 2, 20, "jerry");
    config.put(Config.TOPOLOGY_SCHEDULER_STRATEGY, NeverEndingSchedulingStrategy.class.getName());
    TopologyDetails topo2 = genTopology("topo-2", config, 2, 0, 2, 0, currentTime - 2, 20, "jerry");
    Topologies topologies = new Topologies(topo1, topo2, topo3);
    Cluster cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, config);
    scheduler = new ResourceAwareScheduler();
    scheduler.prepare(config, new StormMetricsRegistry());
    scheduler.schedule(topologies, cluster);
    assertFalse(cluster.needsSchedulingRas(topo1));
    assertTrue(cluster.needsSchedulingRas(topo2));
    assertFalse(cluster.needsSchedulingRas(topo3));
    assertTrue("Topo-1 scheduled?", cluster.getAssignmentById(topo1.getId()) != null);
    assertEquals("Topo-1 all executors scheduled?", 2, cluster.getAssignmentById(topo1.getId()).getExecutorToSlot().size());
    assertTrue("Topo-2 not scheduled", cluster.getAssignmentById(topo2.getId()) == null);
    assertEquals("Scheduling took too long for " + topo2.getId() + " using strategy " + NeverEndingSchedulingStrategy.class.getName() + " timeout after 30 seconds using config scheduling.timeout.seconds.per.topology.", cluster.getStatusMap().get(topo2.getId()));
    assertTrue("Topo-3 scheduled?", cluster.getAssignmentById(topo3.getId()) != null);
    assertEquals("Topo-3 all executors scheduled?", 3, cluster.getAssignmentById(topo3.getId()).getExecutorToSlot().size());
}
Also used : DefaultResourceAwareStrategyOld(org.apache.storm.scheduler.resource.strategies.scheduling.DefaultResourceAwareStrategyOld) DaemonConfig(org.apache.storm.DaemonConfig) Config(org.apache.storm.Config) StormMetricsRegistry(org.apache.storm.metric.StormMetricsRegistry) ArrayList(java.util.ArrayList) Cluster(org.apache.storm.scheduler.Cluster) INimbus(org.apache.storm.scheduler.INimbus) TopologyDetails(org.apache.storm.scheduler.TopologyDetails) TestUtilsForResourceAwareScheduler(org.apache.storm.scheduler.resource.TestUtilsForResourceAwareScheduler) ResourceMetrics(org.apache.storm.scheduler.resource.normalization.ResourceMetrics) Topologies(org.apache.storm.scheduler.Topologies) SupervisorDetails(org.apache.storm.scheduler.SupervisorDetails) Test(org.junit.jupiter.api.Test) PerformanceTest(org.apache.storm.testing.PerformanceTest)

Example 20 with StormMetricsRegistry

use of org.apache.storm.metric.StormMetricsRegistry in project storm by apache.

the class TestResourceAwareScheduler method testTopologyWithMultipleSpouts.

@Test
public void testTopologyWithMultipleSpouts() {
    INimbus iNimbus = new INimbusTest();
    Map<String, SupervisorDetails> supMap = genSupervisors(2, 4, 400, 2000);
    // a topology with multiple spouts
    TopologyBuilder builder1 = new TopologyBuilder();
    builder1.setSpout("wordSpout1", new TestWordSpout(), 1);
    builder1.setSpout("wordSpout2", new TestWordSpout(), 1);
    builder1.setBolt("wordCountBolt1", new TestWordCounter(), 1).shuffleGrouping("wordSpout1").shuffleGrouping("wordSpout2");
    builder1.setBolt("wordCountBolt2", new TestWordCounter(), 1).shuffleGrouping("wordCountBolt1");
    builder1.setBolt("wordCountBolt3", new TestWordCounter(), 1).shuffleGrouping("wordCountBolt1");
    builder1.setBolt("wordCountBolt4", new TestWordCounter(), 1).shuffleGrouping("wordCountBolt2");
    builder1.setBolt("wordCountBolt5", new TestWordCounter(), 1).shuffleGrouping("wordSpout2");
    StormTopology stormTopology1 = builder1.createTopology();
    Config config = new Config();
    config.putAll(defaultTopologyConf);
    Map<ExecutorDetails, String> executorMap1 = genExecsAndComps(stormTopology1);
    TopologyDetails topology1 = new TopologyDetails("topology1", config, stormTopology1, 0, executorMap1, 0, "user");
    // a topology with two unconnected partitions
    TopologyBuilder builder2 = new TopologyBuilder();
    builder2.setSpout("wordSpoutX", new TestWordSpout(), 1);
    builder2.setSpout("wordSpoutY", new TestWordSpout(), 1);
    StormTopology stormTopology2 = builder2.createTopology();
    Map<ExecutorDetails, String> executorMap2 = genExecsAndComps(stormTopology2);
    TopologyDetails topology2 = new TopologyDetails("topology2", config, stormTopology2, 0, executorMap2, 0, "user");
    scheduler = new ResourceAwareScheduler();
    Topologies topologies = new Topologies(topology1, topology2);
    Cluster cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, config);
    scheduler.prepare(config, new StormMetricsRegistry());
    scheduler.schedule(topologies, cluster);
    SchedulerAssignment assignment1 = cluster.getAssignmentById(topology1.getId());
    Set<WorkerSlot> assignedSlots1 = assignment1.getSlots();
    Set<String> nodesIDs1 = new HashSet<>();
    for (WorkerSlot slot : assignedSlots1) {
        nodesIDs1.add(slot.getNodeId());
    }
    Collection<ExecutorDetails> executors1 = assignment1.getExecutors();
    assertEquals(1, assignedSlots1.size());
    assertEquals(1, nodesIDs1.size());
    assertEquals(7, executors1.size());
    assertFalse(cluster.needsSchedulingRas(topology1));
    assertTrue(cluster.getStatusMap().get(topology1.getId()).startsWith("Running - Fully Scheduled by DefaultResourceAwareStrategy"));
    SchedulerAssignment assignment2 = cluster.getAssignmentById(topology2.getId());
    Set<WorkerSlot> assignedSlots2 = assignment2.getSlots();
    Set<String> nodesIDs2 = new HashSet<>();
    for (WorkerSlot slot : assignedSlots2) {
        nodesIDs2.add(slot.getNodeId());
    }
    Collection<ExecutorDetails> executors2 = assignment2.getExecutors();
    assertEquals(1, assignedSlots2.size());
    assertEquals(1, nodesIDs2.size());
    assertEquals(2, executors2.size());
    assertFalse(cluster.needsSchedulingRas(topology2));
    assertTrue(cluster.getStatusMap().get(topology2.getId()).startsWith("Running - Fully Scheduled by DefaultResourceAwareStrategy"));
}
Also used : ExecutorDetails(org.apache.storm.scheduler.ExecutorDetails) TopologyBuilder(org.apache.storm.topology.TopologyBuilder) DaemonConfig(org.apache.storm.DaemonConfig) Config(org.apache.storm.Config) StormTopology(org.apache.storm.generated.StormTopology) StormMetricsRegistry(org.apache.storm.metric.StormMetricsRegistry) TestUtilsForResourceAwareScheduler(org.apache.storm.scheduler.resource.TestUtilsForResourceAwareScheduler) ResourceMetrics(org.apache.storm.scheduler.resource.normalization.ResourceMetrics) WorkerSlot(org.apache.storm.scheduler.WorkerSlot) Topologies(org.apache.storm.scheduler.Topologies) SupervisorDetails(org.apache.storm.scheduler.SupervisorDetails) HashSet(java.util.HashSet) TestWordCounter(org.apache.storm.testing.TestWordCounter) Cluster(org.apache.storm.scheduler.Cluster) INimbus(org.apache.storm.scheduler.INimbus) TopologyDetails(org.apache.storm.scheduler.TopologyDetails) SchedulerAssignment(org.apache.storm.scheduler.SchedulerAssignment) TestWordSpout(org.apache.storm.testing.TestWordSpout) Test(org.junit.jupiter.api.Test) PerformanceTest(org.apache.storm.testing.PerformanceTest)

Aggregations

StormMetricsRegistry (org.apache.storm.metric.StormMetricsRegistry)123 Cluster (org.apache.storm.scheduler.Cluster)67 Topologies (org.apache.storm.scheduler.Topologies)66 Config (org.apache.storm.Config)64 SupervisorDetails (org.apache.storm.scheduler.SupervisorDetails)64 HashMap (java.util.HashMap)63 Test (org.junit.Test)62 ResourceMetrics (org.apache.storm.scheduler.resource.normalization.ResourceMetrics)61 INimbus (org.apache.storm.scheduler.INimbus)60 TestUtilsForResourceAwareScheduler (org.apache.storm.scheduler.resource.TestUtilsForResourceAwareScheduler)54 TopologyDetails (org.apache.storm.scheduler.TopologyDetails)53 DaemonConfig (org.apache.storm.DaemonConfig)41 Test (org.junit.jupiter.api.Test)40 ResourceAwareScheduler (org.apache.storm.scheduler.resource.ResourceAwareScheduler)34 HashSet (java.util.HashSet)29 Map (java.util.Map)29 SchedulerAssignment (org.apache.storm.scheduler.SchedulerAssignment)27 TopologyBuilder (org.apache.storm.topology.TopologyBuilder)27 ExecutorDetails (org.apache.storm.scheduler.ExecutorDetails)26 StormTopology (org.apache.storm.generated.StormTopology)24