Search in sources :

Example 86 with StormMetricsRegistry

use of org.apache.storm.metric.StormMetricsRegistry in project storm by apache.

the class TestGenericResourceAwareStrategy method testGrasRequiringEviction.

/*
     * test requiring eviction until Generic Resource (gpu) is evicted.
     */
@Test
public void testGrasRequiringEviction() {
    int spoutParallelism = 3;
    double cpuPercent = 10;
    double memoryOnHeap = 10;
    double memoryOffHeap = 10;
    // Sufficient Cpu/Memory. But insufficient gpu to schedule all topologies (gpu1, noGpu, gpu2).
    // gpu topology (requires 3 gpu's in total)
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("spout", new TestSpout(), spoutParallelism).addResource("gpu.count", 1.0);
    StormTopology stormTopologyWithGpu = builder.createTopology();
    // non-gpu topology
    builder = new TopologyBuilder();
    builder.setSpout("spout", new TestSpout(), spoutParallelism);
    StormTopology stormTopologyNoGpu = builder.createTopology();
    Config conf = createGrasClusterConfig(cpuPercent, memoryOnHeap, memoryOffHeap, null, Collections.emptyMap());
    // allow 1 round of evictions
    conf.put(DaemonConfig.RESOURCE_AWARE_SCHEDULER_MAX_TOPOLOGY_SCHEDULING_ATTEMPTS, 2);
    String gpu1 = "hasGpu1";
    String noGpu = "hasNoGpu";
    String gpu2 = "hasGpu2";
    TopologyDetails[] topo = { createTestStormTopology(stormTopologyWithGpu, 10, gpu1, conf), createTestStormTopology(stormTopologyNoGpu, 10, noGpu, conf), createTestStormTopology(stormTopologyWithGpu, 9, gpu2, conf) };
    Topologies topologies = new Topologies(topo[0], topo[1]);
    Map<String, Double> genericResourcesMap = new HashMap<>();
    genericResourcesMap.put("gpu.count", 1.0);
    Map<String, SupervisorDetails> supMap = genSupervisors(4, 4, 500, 2000, genericResourcesMap);
    Cluster cluster = new Cluster(new INimbusTest(), new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, conf);
    // should schedule gpu1 and noGpu successfully
    scheduler = new ResourceAwareScheduler();
    scheduler.prepare(conf, new StormMetricsRegistry());
    scheduler.schedule(topologies, cluster);
    assertTopologiesFullyScheduled(cluster, gpu1);
    assertTopologiesFullyScheduled(cluster, noGpu);
    // should evict gpu1 and noGpu topologies in order to schedule gpu2 topology; then fail to reschedule gpu1 topology;
    // then schedule noGpu topology.
    // Scheduling used to ignore gpu resource when deciding when to stop evicting, and gpu2 would fail to schedule.
    topologies = new Topologies(topo[0], topo[1], topo[2]);
    cluster = new Cluster(cluster, topologies);
    scheduler.schedule(topologies, cluster);
    assertTopologiesNotScheduled(cluster, gpu1);
    assertTopologiesFullyScheduled(cluster, noGpu);
    assertTopologiesFullyScheduled(cluster, gpu2);
}
Also used : TopologyBuilder(org.apache.storm.topology.TopologyBuilder) HashMap(java.util.HashMap) DaemonConfig(org.apache.storm.DaemonConfig) Config(org.apache.storm.Config) StormTopology(org.apache.storm.generated.StormTopology) StormMetricsRegistry(org.apache.storm.metric.StormMetricsRegistry) Cluster(org.apache.storm.scheduler.Cluster) TopologyDetails(org.apache.storm.scheduler.TopologyDetails) ResourceAwareScheduler(org.apache.storm.scheduler.resource.ResourceAwareScheduler) TestUtilsForResourceAwareScheduler(org.apache.storm.scheduler.resource.TestUtilsForResourceAwareScheduler) ResourceMetrics(org.apache.storm.scheduler.resource.normalization.ResourceMetrics) Topologies(org.apache.storm.scheduler.Topologies) SupervisorDetails(org.apache.storm.scheduler.SupervisorDetails) Test(org.junit.Test) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Example 87 with StormMetricsRegistry

use of org.apache.storm.metric.StormMetricsRegistry in project storm by apache.

the class TestGenericResourceAwareStrategy method testAntiAffinityWithMultipleTopologies.

@Test
public void testAntiAffinityWithMultipleTopologies() {
    INimbus iNimbus = new INimbusTest();
    Map<String, SupervisorDetails> supMap = genSupervisorsWithRacks(1, 40, 66, 0, 0, 4700, 226200, new HashMap<>());
    HashMap<String, Double> extraResources = new HashMap<>();
    extraResources.put("my.gpu", 1.0);
    supMap.putAll(genSupervisorsWithRacks(1, 40, 66, 1, 0, 4700, 226200, extraResources));
    Config config = new Config();
    config.putAll(createGrasClusterConfig(88, 775, 25, null, null));
    scheduler = new ResourceAwareScheduler();
    scheduler.prepare(config, new StormMetricsRegistry());
    TopologyDetails tdSimple = genTopology("topology-simple", config, 1, 5, 100, 300, 0, 0, "user", 8192);
    // Schedule the simple topology first
    Topologies topologies = new Topologies(tdSimple);
    Cluster cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, config);
    scheduler.schedule(topologies, cluster);
    TopologyBuilder builder = topologyBuilder(1, 5, 100, 300);
    builder.setBolt("gpu-bolt", new TestBolt(), 40).addResource("my.gpu", 1.0).shuffleGrouping("spout-0");
    TopologyDetails tdGpu = topoToTopologyDetails("topology-gpu", config, builder.createTopology(), 0, 0, "user", 8192);
    // Now schedule GPU but with the simple topology in place.
    topologies = new Topologies(tdSimple, tdGpu);
    cluster = new Cluster(cluster, topologies);
    scheduler.schedule(topologies, cluster);
    Map<String, SchedulerAssignment> assignments = new TreeMap<>(cluster.getAssignments());
    assertEquals(2, assignments.size());
    Map<String, Map<String, AtomicLong>> topoPerRackCount = new HashMap<>();
    for (Entry<String, SchedulerAssignment> entry : assignments.entrySet()) {
        SchedulerAssignment sa = entry.getValue();
        Map<String, AtomicLong> slotsPerRack = new TreeMap<>();
        for (WorkerSlot slot : sa.getSlots()) {
            String nodeId = slot.getNodeId();
            String rack = supervisorIdToRackName(nodeId);
            slotsPerRack.computeIfAbsent(rack, (r) -> new AtomicLong(0)).incrementAndGet();
        }
        LOG.info("{} => {}", entry.getKey(), slotsPerRack);
        topoPerRackCount.put(entry.getKey(), slotsPerRack);
    }
    Map<String, AtomicLong> simpleCount = topoPerRackCount.get("topology-simple-0");
    assertNotNull(simpleCount);
    // Because the simple topology was scheduled first we want to be sure that it didn't put anything on
    // the GPU nodes.
    // Only 1 rack is in use
    assertEquals(1, simpleCount.size());
    // r001 is the second rack with GPUs
    assertFalse(simpleCount.containsKey("r001"));
    // r000 is the first rack with no GPUs
    assertTrue(simpleCount.containsKey("r000"));
// We don't really care too much about the scheduling of topology-gpu-0, because it was scheduled.
}
Also used : Arrays(java.util.Arrays) IScheduler(org.apache.storm.scheduler.IScheduler) SharedOffHeapWithinNode(org.apache.storm.topology.SharedOffHeapWithinNode) LoggerFactory(org.slf4j.LoggerFactory) HashMap(java.util.HashMap) INimbus(org.apache.storm.scheduler.INimbus) HashSet(java.util.HashSet) SupervisorResources(org.apache.storm.scheduler.SupervisorResources) Topologies(org.apache.storm.scheduler.Topologies) DaemonConfig(org.apache.storm.DaemonConfig) ServerUtils(org.apache.storm.utils.ServerUtils) StormTopology(org.apache.storm.generated.StormTopology) ResourceMetrics(org.apache.storm.scheduler.resource.normalization.ResourceMetrics) Map(java.util.Map) WorkerSlot(org.apache.storm.scheduler.WorkerSlot) After(org.junit.After) TopologyBuilder(org.apache.storm.topology.TopologyBuilder) StormMetricsRegistry(org.apache.storm.metric.StormMetricsRegistry) ValueSource(org.junit.jupiter.params.provider.ValueSource) SchedulerAssignment(org.apache.storm.scheduler.SchedulerAssignment) Logger(org.slf4j.Logger) Collection(java.util.Collection) TopologyDetails(org.apache.storm.scheduler.TopologyDetails) SharedOffHeapWithinWorker(org.apache.storm.topology.SharedOffHeapWithinWorker) Set(java.util.Set) Test(org.junit.Test) SupervisorDetails(org.apache.storm.scheduler.SupervisorDetails) SharedOnHeap(org.apache.storm.topology.SharedOnHeap) Cluster(org.apache.storm.scheduler.Cluster) AtomicLong(java.util.concurrent.atomic.AtomicLong) WorkerResources(org.apache.storm.generated.WorkerResources) ResourceAwareScheduler(org.apache.storm.scheduler.resource.ResourceAwareScheduler) Nimbus(org.apache.storm.daemon.nimbus.Nimbus) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) TreeMap(java.util.TreeMap) TestUtilsForResourceAwareScheduler(org.apache.storm.scheduler.resource.TestUtilsForResourceAwareScheduler) Entry(java.util.Map.Entry) Config(org.apache.storm.Config) InvalidTopologyException(org.apache.storm.generated.InvalidTopologyException) Assert(org.junit.Assert) Collections(java.util.Collections) StormCommon(org.apache.storm.daemon.StormCommon) ExecutorDetails(org.apache.storm.scheduler.ExecutorDetails) HashMap(java.util.HashMap) TopologyBuilder(org.apache.storm.topology.TopologyBuilder) DaemonConfig(org.apache.storm.DaemonConfig) Config(org.apache.storm.Config) StormMetricsRegistry(org.apache.storm.metric.StormMetricsRegistry) ResourceAwareScheduler(org.apache.storm.scheduler.resource.ResourceAwareScheduler) TestUtilsForResourceAwareScheduler(org.apache.storm.scheduler.resource.TestUtilsForResourceAwareScheduler) ResourceMetrics(org.apache.storm.scheduler.resource.normalization.ResourceMetrics) WorkerSlot(org.apache.storm.scheduler.WorkerSlot) Topologies(org.apache.storm.scheduler.Topologies) SupervisorDetails(org.apache.storm.scheduler.SupervisorDetails) Cluster(org.apache.storm.scheduler.Cluster) INimbus(org.apache.storm.scheduler.INimbus) TreeMap(java.util.TreeMap) TopologyDetails(org.apache.storm.scheduler.TopologyDetails) AtomicLong(java.util.concurrent.atomic.AtomicLong) SchedulerAssignment(org.apache.storm.scheduler.SchedulerAssignment) HashMap(java.util.HashMap) Map(java.util.Map) TreeMap(java.util.TreeMap) Test(org.junit.Test) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Example 88 with StormMetricsRegistry

use of org.apache.storm.metric.StormMetricsRegistry in project storm by apache.

the class TestLargeCluster method testLargeCluster.

/**
 * Create a large cluster, read topologies and configuration from resource directory and schedule.
 *
 * @throws Exception upon error.
 */
@Test
public void testLargeCluster() throws Exception {
    for (TEST_CLUSTER_NAME testClusterName : TEST_CLUSTER_NAME.values()) {
        LOG.info("********************************************");
        LOG.info("testLargeCluster: Start Processing cluster {}", testClusterName.getClusterName());
        String resourcePath = testClusterName.getResourcePath();
        Map<String, SupervisorDetails> supervisors = createSupervisors(testClusterName, 0);
        TopologyDetails[] topoDetailsArray = createTopoDetailsArray(resourcePath, false);
        Assert.assertTrue("No topologies found for cluster " + testClusterName.getClusterName(), topoDetailsArray.length > 0);
        Topologies topologies = new Topologies(topoDetailsArray);
        Config confWithDefaultStrategy = new Config();
        confWithDefaultStrategy.putAll(topoDetailsArray[0].getConf());
        confWithDefaultStrategy.put(Config.TOPOLOGY_SCHEDULER_STRATEGY, DefaultResourceAwareStrategy.class.getName());
        confWithDefaultStrategy.put(Config.STORM_NETWORK_TOPOGRAPHY_PLUGIN, TestUtilsForResourceAwareScheduler.GenSupervisorsDnsToSwitchMapping.class.getName());
        INimbus iNimbus = new INimbusTest();
        Cluster cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supervisors, new HashMap<>(), topologies, confWithDefaultStrategy);
        scheduler = new ResourceAwareScheduler();
        List<Class> classesToDebug = Arrays.asList(DefaultResourceAwareStrategy.class, GenericResourceAwareStrategy.class, ResourceAwareScheduler.class, Cluster.class);
        // switch to Level.DEBUG for verbose otherwise Level.INFO
        Level logLevel = Level.INFO;
        classesToDebug.forEach(x -> Configurator.setLevel(x.getName(), logLevel));
        long startTime = System.currentTimeMillis();
        scheduler.prepare(confWithDefaultStrategy, new StormMetricsRegistry());
        scheduler.schedule(topologies, cluster);
        long endTime = System.currentTimeMillis();
        LOG.info("Cluster={} Scheduling Time: {} topologies in {} seconds", testClusterName.getClusterName(), topoDetailsArray.length, (endTime - startTime) / 1000.0);
        for (TopologyDetails td : topoDetailsArray) {
            TestUtilsForResourceAwareScheduler.assertTopologiesFullyScheduled(cluster, td.getName());
        }
        // Remove topology and reschedule it
        for (int i = 0; i < topoDetailsArray.length; i++) {
            startTime = System.currentTimeMillis();
            TopologyDetails topoDetails = topoDetailsArray[i];
            cluster.unassign(topoDetails.getId());
            LOG.info("Cluster={},  ({}) Removed topology {}", testClusterName.getClusterName(), i, topoDetails.getName());
            IScheduler rescheduler = new ResourceAwareScheduler();
            rescheduler.prepare(confWithDefaultStrategy, new StormMetricsRegistry());
            rescheduler.schedule(topologies, cluster);
            TestUtilsForResourceAwareScheduler.assertTopologiesFullyScheduled(cluster, topoDetails.getName());
            endTime = System.currentTimeMillis();
            LOG.info("Cluster={}, ({}) Scheduling Time: Removed topology {} and rescheduled in {} seconds", testClusterName.getClusterName(), i, topoDetails.getName(), (endTime - startTime) / 1000.0);
        }
        classesToDebug.forEach(x -> Configurator.setLevel(x.getName(), Level.INFO));
        LOG.info("testLargeCluster: End Processing cluster {}", testClusterName.getClusterName());
        LOG.info("********************************************");
    }
}
Also used : DaemonConfig(org.apache.storm.DaemonConfig) Config(org.apache.storm.Config) StormMetricsRegistry(org.apache.storm.metric.StormMetricsRegistry) Cluster(org.apache.storm.scheduler.Cluster) INimbus(org.apache.storm.scheduler.INimbus) TopologyDetails(org.apache.storm.scheduler.TopologyDetails) ResourceAwareScheduler(org.apache.storm.scheduler.resource.ResourceAwareScheduler) TestUtilsForResourceAwareScheduler(org.apache.storm.scheduler.resource.TestUtilsForResourceAwareScheduler) ResourceMetrics(org.apache.storm.scheduler.resource.normalization.ResourceMetrics) Topologies(org.apache.storm.scheduler.Topologies) Level(org.apache.logging.log4j.Level) SupervisorDetails(org.apache.storm.scheduler.SupervisorDetails) IScheduler(org.apache.storm.scheduler.IScheduler) Test(org.junit.jupiter.api.Test)

Example 89 with StormMetricsRegistry

use of org.apache.storm.metric.StormMetricsRegistry in project storm by apache.

the class TestDefaultEvictionStrategy method testEvictTopologyFromItself.

/**
 * If topologies from other users cannot be evicted to make space
 * check if there is a topology with lower priority that can be evicted from the current user
 */
@Test
public void testEvictTopologyFromItself() {
    INimbus iNimbus = new INimbusTest();
    Map<String, SupervisorDetails> supMap = genSupervisors(4, 4, 100, 1000);
    Map<String, Map<String, Number>> resourceUserPool = userResourcePool(userRes("jerry", 200, 2000), userRes("bobby", 100, 1000), userRes("derek", 100, 1000));
    Config config = createClusterConfig(100, 500, 500, resourceUserPool);
    Topologies topologies = new Topologies(genTopology("topo-1", config, 1, 0, 1, 0, currentTime - 2, 20, "jerry"), genTopology("topo-2", config, 1, 0, 1, 0, currentTime - 2, 20, "jerry"), genTopology("topo-5", config, 1, 0, 1, 0, currentTime - 2, 10, "bobby"), genTopology("topo-6", config, 1, 0, 1, 0, currentTime - 2, 29, "derek"));
    Cluster cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, config);
    scheduler = new ResourceAwareScheduler();
    scheduler.prepare(config, new StormMetricsRegistry());
    LOG.info("\n\n\t\tScheduling topos 1,2,5,6");
    scheduler.schedule(topologies, cluster);
    LOG.info("\n\n\t\tDone Scheduling...");
    assertTopologiesFullyScheduled(cluster, "topo-1", "topo-2", "topo-5", "topo-6");
    // user jerry submits another topology into a full cluster
    // topo3 should not be able to scheduled
    topologies = addTopologies(topologies, genTopology("topo-3", config, 1, 0, 1, 0, currentTime - 2, 29, "jerry"));
    cluster = new Cluster(cluster, topologies);
    LOG.info("\n\n\t\tScheduling topos 1,2,3,5,6");
    scheduler.schedule(topologies, cluster);
    LOG.info("\n\n\t\tDone Scheduling...");
    assertTopologiesFullyScheduled(cluster, "topo-1", "topo-2", "topo-5", "topo-6");
    assertTopologiesNotScheduled(cluster, "topo-3");
    // user jerry submits another topology but this one should be scheduled since it has higher priority than than the
    // rest of jerry's running topologies
    topologies = addTopologies(topologies, genTopology("topo-4", config, 1, 0, 1, 0, currentTime - 2, 10, "jerry"));
    cluster = new Cluster(cluster, topologies);
    LOG.info("\n\n\t\tScheduling topos 1-6");
    scheduler.schedule(topologies, cluster);
    LOG.info("\n\n\t\tDone Scheduling...");
    assertTopologiesFullyScheduled(cluster, "topo-1", "topo-4", "topo-5", "topo-6");
    assertTopologiesNotScheduled(cluster, "topo-2", "topo-3");
}
Also used : Config(org.apache.storm.Config) StormMetricsRegistry(org.apache.storm.metric.StormMetricsRegistry) Cluster(org.apache.storm.scheduler.Cluster) INimbus(org.apache.storm.scheduler.INimbus) ResourceAwareScheduler(org.apache.storm.scheduler.resource.ResourceAwareScheduler) TestUtilsForResourceAwareScheduler(org.apache.storm.scheduler.resource.TestUtilsForResourceAwareScheduler) ResourceMetrics(org.apache.storm.scheduler.resource.normalization.ResourceMetrics) Topologies(org.apache.storm.scheduler.Topologies) SupervisorDetails(org.apache.storm.scheduler.SupervisorDetails) HashMap(java.util.HashMap) Map(java.util.Map) Test(org.junit.Test)

Example 90 with StormMetricsRegistry

use of org.apache.storm.metric.StormMetricsRegistry in project storm by apache.

the class TestDefaultEvictionStrategy method testEvictMultipleTopologies.

@Test
public void testEvictMultipleTopologies() {
    INimbus iNimbus = new INimbusTest();
    Map<String, SupervisorDetails> supMap = genSupervisors(4, 4, 100, 1000);
    Map<String, Map<String, Number>> resourceUserPool = userResourcePool(userRes("jerry", 200, 2000), userRes("derek", 100, 1000));
    Config config = createClusterConfig(100, 500, 500, resourceUserPool);
    Topologies topologies = new Topologies(genTopology("topo-2", config, 1, 0, 1, 0, currentTime - 2, 10, "bobby"), genTopology("topo-3", config, 1, 0, 1, 0, currentTime - 2, 20, "bobby"), genTopology("topo-4", config, 1, 0, 1, 0, currentTime - 2, 29, "derek"), genTopology("topo-5", config, 1, 0, 1, 0, currentTime - 2, 29, "derek"));
    Cluster cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, config);
    scheduler = new ResourceAwareScheduler();
    scheduler.prepare(config, new StormMetricsRegistry());
    LOG.info("\n\n\t\tScheduling topos 2 to 5...");
    scheduler.schedule(topologies, cluster);
    LOG.info("\n\n\t\tDone scheduling...");
    assertTopologiesFullyScheduled(cluster, "topo-2", "topo-3", "topo-4", "topo-5");
    // user jerry submits another topology
    topologies = addTopologies(topologies, genTopology("topo-1", config, 2, 0, 1, 0, currentTime - 2, 10, "jerry"));
    cluster = new Cluster(cluster, topologies);
    LOG.info("\n\n\t\tScheduling topos 1 to 5");
    scheduler.schedule(topologies, cluster);
    LOG.info("\n\n\t\tDone scheduling...");
    // bobby has no guarantee so topo-2 and topo-3 evicted
    assertTopologiesFullyScheduled(cluster, "topo-1", "topo-4", "topo-5");
    assertTopologiesNotScheduled(cluster, "topo-2", "topo-3");
}
Also used : Config(org.apache.storm.Config) StormMetricsRegistry(org.apache.storm.metric.StormMetricsRegistry) Cluster(org.apache.storm.scheduler.Cluster) INimbus(org.apache.storm.scheduler.INimbus) ResourceAwareScheduler(org.apache.storm.scheduler.resource.ResourceAwareScheduler) TestUtilsForResourceAwareScheduler(org.apache.storm.scheduler.resource.TestUtilsForResourceAwareScheduler) ResourceMetrics(org.apache.storm.scheduler.resource.normalization.ResourceMetrics) Topologies(org.apache.storm.scheduler.Topologies) SupervisorDetails(org.apache.storm.scheduler.SupervisorDetails) HashMap(java.util.HashMap) Map(java.util.Map) Test(org.junit.Test)

Aggregations

StormMetricsRegistry (org.apache.storm.metric.StormMetricsRegistry)123 Cluster (org.apache.storm.scheduler.Cluster)67 Topologies (org.apache.storm.scheduler.Topologies)66 Config (org.apache.storm.Config)64 SupervisorDetails (org.apache.storm.scheduler.SupervisorDetails)64 HashMap (java.util.HashMap)63 Test (org.junit.Test)62 ResourceMetrics (org.apache.storm.scheduler.resource.normalization.ResourceMetrics)61 INimbus (org.apache.storm.scheduler.INimbus)60 TestUtilsForResourceAwareScheduler (org.apache.storm.scheduler.resource.TestUtilsForResourceAwareScheduler)54 TopologyDetails (org.apache.storm.scheduler.TopologyDetails)53 DaemonConfig (org.apache.storm.DaemonConfig)41 Test (org.junit.jupiter.api.Test)40 ResourceAwareScheduler (org.apache.storm.scheduler.resource.ResourceAwareScheduler)34 HashSet (java.util.HashSet)29 Map (java.util.Map)29 SchedulerAssignment (org.apache.storm.scheduler.SchedulerAssignment)27 TopologyBuilder (org.apache.storm.topology.TopologyBuilder)27 ExecutorDetails (org.apache.storm.scheduler.ExecutorDetails)26 StormTopology (org.apache.storm.generated.StormTopology)24