Search in sources :

Example 36 with Topologies

use of org.apache.storm.scheduler.Topologies in project storm by apache.

the class TestDefaultEvictionStrategy method testOverGuaranteeEviction.

/**
 * If users are above his or her guarantee, check if topology eviction works correctly
 */
@Test
public void testOverGuaranteeEviction() {
    INimbus iNimbus = new INimbusTest();
    Map<String, SupervisorDetails> supMap = genSupervisors(4, 4, 100, 1000);
    Map<String, Map<String, Number>> resourceUserPool = userResourcePool(userRes("jerry", 70, 700), userRes("bobby", 100, 1000), userRes("derek", 25, 250));
    Config config = createClusterConfig(100, 500, 500, resourceUserPool);
    Topologies topologies = new Topologies(genTopology("topo-1", config, 1, 0, 1, 0, currentTime - 2, 20, "jerry"), genTopology("topo-3", config, 1, 0, 1, 0, currentTime - 2, 10, "bobby"), genTopology("topo-4", config, 1, 0, 1, 0, currentTime - 2, 10, "bobby"), genTopology("topo-5", config, 1, 0, 1, 0, currentTime - 2, 29, "derek"));
    Cluster cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, config);
    scheduler = new ResourceAwareScheduler();
    scheduler.prepare(config, new StormMetricsRegistry());
    LOG.info("\n\n\t\tScheduling topos 1,3,4,5");
    scheduler.schedule(topologies, cluster);
    LOG.info("\n\n\t\tDone scheduling...");
    assertTopologiesFullyScheduled(cluster, "topo-1", "topo-3", "topo-4", "topo-5");
    // user derek submits another topology into a full cluster
    // topo6 should not be able to scheduled initially, but since topo6 has higher priority than topo5
    // topo5 will be evicted so that topo6 can be scheduled
    topologies = addTopologies(topologies, genTopology("topo-6", config, 1, 0, 1, 0, currentTime - 2, 10, "derek"));
    cluster = new Cluster(cluster, topologies);
    LOG.info("\n\n\t\tScheduling topos 1,3,4,5,6");
    scheduler.schedule(topologies, cluster);
    LOG.info("\n\n\t\tDone scheduling...");
    assertTopologiesFullyScheduled(cluster, "topo-1", "topo-3", "topo-4", "topo-6");
    assertTopologiesNotScheduled(cluster, "topo-5");
    // user jerry submits topo2
    topologies = addTopologies(topologies, genTopology("topo-2", config, 1, 0, 1, 0, currentTime - 2, 20, "jerry"));
    cluster = new Cluster(cluster, topologies);
    LOG.info("\n\n\t\tScheduling topos 1-6");
    scheduler.schedule(topologies, cluster);
    LOG.info("\n\n\t\tDone scheduling...");
    assertTopologiesFullyScheduled(cluster, "topo-1", "topo-3", "topo-4", "topo-6");
    assertTopologiesNotScheduled(cluster, "topo-2", "topo-5");
}
Also used : Config(org.apache.storm.Config) StormMetricsRegistry(org.apache.storm.metric.StormMetricsRegistry) Cluster(org.apache.storm.scheduler.Cluster) INimbus(org.apache.storm.scheduler.INimbus) ResourceAwareScheduler(org.apache.storm.scheduler.resource.ResourceAwareScheduler) TestUtilsForResourceAwareScheduler(org.apache.storm.scheduler.resource.TestUtilsForResourceAwareScheduler) ResourceMetrics(org.apache.storm.scheduler.resource.normalization.ResourceMetrics) Topologies(org.apache.storm.scheduler.Topologies) SupervisorDetails(org.apache.storm.scheduler.SupervisorDetails) HashMap(java.util.HashMap) Map(java.util.Map) Test(org.junit.Test)

Example 37 with Topologies

use of org.apache.storm.scheduler.Topologies in project storm by apache.

the class TestDefaultEvictionStrategy method testEvictMultipleTopologiesFromMultipleUsersInCorrectOrder.

@Test
public void testEvictMultipleTopologiesFromMultipleUsersInCorrectOrder() {
    INimbus iNimbus = new INimbusTest();
    Map<String, SupervisorDetails> supMap = genSupervisors(4, 4, 100, 1000);
    Map<String, Map<String, Number>> resourceUserPool = userResourcePool(userRes("jerry", 300, 3000), userRes("derek", 100, 1000));
    Config config = createClusterConfig(100, 500, 500, resourceUserPool);
    Topologies topologies = new Topologies(genTopology("topo-2", config, 1, 0, 1, 0, currentTime - 2, 10, "bobby"), genTopology("topo-3", config, 1, 0, 1, 0, currentTime - 2, 20, "bobby"), genTopology("topo-4", config, 1, 0, 1, 0, currentTime - 2, 29, "derek"), genTopology("topo-5", config, 1, 0, 1, 0, currentTime - 15, 29, "derek"));
    Cluster cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, config);
    scheduler = new ResourceAwareScheduler();
    scheduler.prepare(config, new StormMetricsRegistry());
    scheduler.schedule(topologies, cluster);
    assertTopologiesFullyScheduled(cluster, "topo-2", "topo-3", "topo-4", "topo-5");
    // user jerry submits another topology
    topologies = addTopologies(topologies, genTopology("topo-1", config, 1, 0, 1, 0, currentTime - 2, 10, "jerry"));
    cluster = new Cluster(cluster, topologies);
    scheduler.schedule(topologies, cluster);
    // topo-3 evicted since user bobby don't have any resource guarantees and topo-3 is the lowest priority for user bobby
    assertTopologiesFullyScheduled(cluster, "topo-1", "topo-2", "topo-4", "topo-5");
    assertTopologiesNotScheduled(cluster, "topo-3");
    topologies = addTopologies(topologies, genTopology("topo-6", config, 1, 0, 1, 0, currentTime - 2, 10, "jerry"));
    cluster = new Cluster(cluster, topologies);
    scheduler.schedule(topologies, cluster);
    // topo-2 evicted since user bobby don't have any resource guarantees and topo-2 is the next lowest priority for user bobby
    assertTopologiesFullyScheduled(cluster, "topo-1", "topo-4", "topo-5");
    assertTopologiesNotScheduled(cluster, "topo-2", "topo-3");
    topologies = addTopologies(topologies, genTopology("topo-7", config, 1, 0, 1, 0, currentTime - 2, 10, "jerry"));
    cluster = new Cluster(cluster, topologies);
    scheduler.schedule(topologies, cluster);
    // since user derek has exceeded his resource guarantee while user jerry has not topo-5 or topo-4 could be evicted because they have the same priority
    // but topo-4 was submitted earlier thus we choose that one to evict (somewhat arbitrary)
    assertTopologiesFullyScheduled(cluster, "topo-1", "topo-5", "topo-7");
    assertTopologiesNotScheduled(cluster, "topo-2", "topo-3", "topo-4");
}
Also used : Config(org.apache.storm.Config) StormMetricsRegistry(org.apache.storm.metric.StormMetricsRegistry) Cluster(org.apache.storm.scheduler.Cluster) INimbus(org.apache.storm.scheduler.INimbus) ResourceAwareScheduler(org.apache.storm.scheduler.resource.ResourceAwareScheduler) TestUtilsForResourceAwareScheduler(org.apache.storm.scheduler.resource.TestUtilsForResourceAwareScheduler) ResourceMetrics(org.apache.storm.scheduler.resource.normalization.ResourceMetrics) Topologies(org.apache.storm.scheduler.Topologies) SupervisorDetails(org.apache.storm.scheduler.SupervisorDetails) HashMap(java.util.HashMap) Map(java.util.Map) Test(org.junit.Test)

Example 38 with Topologies

use of org.apache.storm.scheduler.Topologies in project storm by apache.

the class TestGenericResourceAwareSchedulingPriorityStrategy method testGenericSchedulingPriorityStrategyEvicting.

/*
     * GenericResourceAwareSchedulingPriorityStrategy extend scoring formula to accommodate generic resources
     *
     *   Same setting as testDefaultSchedulingPriorityStrategyEvicting, but this time, new scoring system is taking generic resources into account,
     *   the score of rui's new topo will be higher than all Ethan's topos' due to its crazy generic request.
     *   At the end, all Ethan's topo will not be evicted as expected.
     */
@Test
public void testGenericSchedulingPriorityStrategyEvicting() {
    Map<String, Double> requestedgenericResourcesMap = new HashMap<>();
    requestedgenericResourcesMap.put("generic.resource.1", 40.0);
    Config ruiConf = createGrasClusterConfig(10, 10, 10, null, requestedgenericResourcesMap);
    Config ethanConf = createGrasClusterConfig(60, 200, 300, null, Collections.emptyMap());
    Topologies topologies = new Topologies(genTopology("ethan-topo-1", ethanConf, 1, 0, 1, 0, currentTime - 2, 10, "ethan"), genTopology("ethan-topo-2", ethanConf, 1, 0, 1, 0, currentTime - 2, 20, "ethan"), genTopology("ethan-topo-3", ethanConf, 1, 0, 1, 0, currentTime - 2, 28, "ethan"), genTopology("ethan-topo-4", ethanConf, 1, 0, 1, 0, currentTime - 2, 29, "ethan"));
    Topologies withNewTopo = addTopologies(topologies, genTopology("rui-topo-1", ruiConf, 1, 0, 5, 0, currentTime - 2, 10, "rui"));
    Config config = mkClusterConfig(GenericResourceAwareSchedulingPriorityStrategy.class.getName());
    Cluster cluster = mkTestCluster(topologies, config);
    scheduler = new ResourceAwareScheduler();
    scheduler.prepare(config, new StormMetricsRegistry());
    scheduler.schedule(topologies, cluster);
    assertTopologiesFullyScheduled(cluster, "ethan-topo-1", "ethan-topo-2", "ethan-topo-3", "ethan-topo-4");
    cluster = new Cluster(cluster, withNewTopo);
    scheduler.schedule(withNewTopo, cluster);
    Map<String, Set<String>> evictedTopos = ((ResourceAwareScheduler) scheduler).getEvictedTopologiesMap();
    assertTopologiesFullyScheduled(cluster, "ethan-topo-1", "ethan-topo-2", "ethan-topo-3", "ethan-topo-4");
    assertTopologiesNotBeenEvicted(cluster, collectMapValues(evictedTopos), "ethan-topo-1", "ethan-topo-2", "ethan-topo-3", "ethan-topo-4");
    assertTopologiesNotScheduled(cluster, "rui-topo-1");
}
Also used : HashSet(java.util.HashSet) Set(java.util.Set) HashMap(java.util.HashMap) DaemonConfig(org.apache.storm.DaemonConfig) Config(org.apache.storm.Config) StormMetricsRegistry(org.apache.storm.metric.StormMetricsRegistry) Topologies(org.apache.storm.scheduler.Topologies) TestUtilsForResourceAwareScheduler.addTopologies(org.apache.storm.scheduler.resource.TestUtilsForResourceAwareScheduler.addTopologies) Cluster(org.apache.storm.scheduler.Cluster) ResourceAwareScheduler(org.apache.storm.scheduler.resource.ResourceAwareScheduler) TestUtilsForResourceAwareScheduler(org.apache.storm.scheduler.resource.TestUtilsForResourceAwareScheduler) Test(org.junit.Test)

Example 39 with Topologies

use of org.apache.storm.scheduler.Topologies in project storm by apache.

the class TestGenericResourceAwareSchedulingPriorityStrategy method testDefaultSchedulingPriorityStrategyNotEvicting.

/*
     * DefaultSchedulingPriorityStrategy will not evict topo as long as the resources request can be met
     *
     *  Ethan asks for heavy cpu and memory while Rui asks for little cpu and memory but heavy generic resource
     *  Since Rui's all types of resources request can be met, no eviction will happen.
    */
@Test
public void testDefaultSchedulingPriorityStrategyNotEvicting() {
    Map<String, Double> requestedgenericResourcesMap = new HashMap<>();
    requestedgenericResourcesMap.put("generic.resource.1", 40.0);
    // Use full memory and cpu of the cluster capacity
    Config ruiConf = createGrasClusterConfig(20, 50, 50, null, requestedgenericResourcesMap);
    Config ethanConf = createGrasClusterConfig(80, 400, 500, null, Collections.emptyMap());
    Topologies topologies = new Topologies(genTopology("ethan-topo-1", ethanConf, 1, 0, 1, 0, currentTime - 2, 10, "ethan"), genTopology("ethan-topo-2", ethanConf, 1, 0, 1, 0, currentTime - 2, 20, "ethan"), genTopology("ethan-topo-3", ethanConf, 1, 0, 1, 0, currentTime - 2, 28, "ethan"), genTopology("ethan-topo-4", ethanConf, 1, 0, 1, 0, currentTime - 2, 29, "ethan"));
    Topologies withNewTopo = addTopologies(topologies, genTopology("rui-topo-1", ruiConf, 1, 0, 4, 0, currentTime - 2, 10, "rui"));
    Config config = mkClusterConfig(DefaultSchedulingPriorityStrategy.class.getName());
    Cluster cluster = mkTestCluster(topologies, config);
    scheduler = new ResourceAwareScheduler();
    scheduler.prepare(config, new StormMetricsRegistry());
    scheduler.schedule(topologies, cluster);
    assertTopologiesFullyScheduled(cluster, "ethan-topo-1", "ethan-topo-2", "ethan-topo-3", "ethan-topo-4");
    cluster = new Cluster(cluster, withNewTopo);
    scheduler.schedule(withNewTopo, cluster);
    Map<String, Set<String>> evictedTopos = ((ResourceAwareScheduler) scheduler).getEvictedTopologiesMap();
    assertTopologiesFullyScheduled(cluster, "ethan-topo-1", "ethan-topo-2", "ethan-topo-3", "ethan-topo-4");
    assertTopologiesNotBeenEvicted(cluster, collectMapValues(evictedTopos), "ethan-topo-1", "ethan-topo-2", "ethan-topo-3", "ethan-topo-4");
    assertTopologiesFullyScheduled(cluster, "rui-topo-1");
}
Also used : HashSet(java.util.HashSet) Set(java.util.Set) HashMap(java.util.HashMap) DaemonConfig(org.apache.storm.DaemonConfig) Config(org.apache.storm.Config) StormMetricsRegistry(org.apache.storm.metric.StormMetricsRegistry) Topologies(org.apache.storm.scheduler.Topologies) TestUtilsForResourceAwareScheduler.addTopologies(org.apache.storm.scheduler.resource.TestUtilsForResourceAwareScheduler.addTopologies) Cluster(org.apache.storm.scheduler.Cluster) ResourceAwareScheduler(org.apache.storm.scheduler.resource.ResourceAwareScheduler) TestUtilsForResourceAwareScheduler(org.apache.storm.scheduler.resource.TestUtilsForResourceAwareScheduler) Test(org.junit.Test)

Example 40 with Topologies

use of org.apache.storm.scheduler.Topologies in project storm by apache.

the class TestConstraintSolverStrategy method testScheduleLargeExecutorConstraintCount.

private void testScheduleLargeExecutorConstraintCount(int parallelismMultiplier) {
    if (parallelismMultiplier > 1 && !consolidatedConfigFlag) {
        Assert.assertFalse("Large parallelism test requires new consolidated constraint format with maxCoLocationCnt=" + parallelismMultiplier, consolidatedConfigFlag);
        return;
    }
    // Add 1 topology with large number of executors and constraints. Too many can cause a java.lang.StackOverflowError
    Config config = createCSSClusterConfig(10, 10, 0, null);
    config.put(Config.TOPOLOGY_RAS_CONSTRAINT_MAX_STATE_SEARCH, 50000);
    config.put(Config.TOPOLOGY_RAS_CONSTRAINT_MAX_TIME_SECS, 120);
    config.put(DaemonConfig.SCHEDULING_TIMEOUT_SECONDS_PER_TOPOLOGY, 120);
    List<List<String>> constraints = new LinkedList<>();
    addConstraints("spout-0", "spout-0", constraints);
    addConstraints("bolt-1", "bolt-1", constraints);
    addConstraints("spout-0", "bolt-0", constraints);
    addConstraints("bolt-2", "spout-0", constraints);
    addConstraints("bolt-1", "bolt-2", constraints);
    addConstraints("bolt-1", "bolt-0", constraints);
    addConstraints("bolt-1", "spout-0", constraints);
    Map<String, Integer> spreads = new HashMap<>();
    spreads.put("spout-0", parallelismMultiplier);
    spreads.put("bolt-1", parallelismMultiplier);
    setConstraintConfig(constraints, spreads, config);
    TopologyDetails topo = genTopology("testTopo-" + parallelismMultiplier, config, 10, 10, 30 * parallelismMultiplier, 30 * parallelismMultiplier, 31414, 0, "user");
    Topologies topologies = new Topologies(topo);
    Map<String, SupervisorDetails> supMap = genSupervisors(30 * parallelismMultiplier, 30, 3500, 35000);
    Cluster cluster = makeCluster(topologies, supMap);
    ResourceAwareScheduler scheduler = new ResourceAwareScheduler();
    scheduler.prepare(config, new StormMetricsRegistry());
    scheduler.schedule(topologies, cluster);
    boolean scheduleSuccess = isStatusSuccess(cluster.getStatus(topo.getId()));
    LOG.info("testScheduleLargeExecutorCount scheduling {} with {}x executor multiplier, consolidatedConfigFlag={}", scheduleSuccess ? "succeeds" : "fails", parallelismMultiplier, consolidatedConfigFlag);
    Assert.assertTrue(scheduleSuccess);
}
Also used : HashMap(java.util.HashMap) DaemonConfig(org.apache.storm.DaemonConfig) Config(org.apache.storm.Config) StormMetricsRegistry(org.apache.storm.metric.StormMetricsRegistry) Cluster(org.apache.storm.scheduler.Cluster) TopologyDetails(org.apache.storm.scheduler.TopologyDetails) LinkedList(java.util.LinkedList) ResourceAwareScheduler(org.apache.storm.scheduler.resource.ResourceAwareScheduler) TestUtilsForResourceAwareScheduler(org.apache.storm.scheduler.resource.TestUtilsForResourceAwareScheduler) Topologies(org.apache.storm.scheduler.Topologies) ArrayList(java.util.ArrayList) LinkedList(java.util.LinkedList) List(java.util.List) SupervisorDetails(org.apache.storm.scheduler.SupervisorDetails)

Aggregations

Topologies (org.apache.storm.scheduler.Topologies)89 Cluster (org.apache.storm.scheduler.Cluster)82 Config (org.apache.storm.Config)77 SupervisorDetails (org.apache.storm.scheduler.SupervisorDetails)77 TopologyDetails (org.apache.storm.scheduler.TopologyDetails)76 INimbus (org.apache.storm.scheduler.INimbus)71 StormMetricsRegistry (org.apache.storm.metric.StormMetricsRegistry)66 HashMap (java.util.HashMap)61 ResourceMetrics (org.apache.storm.scheduler.resource.normalization.ResourceMetrics)60 TestUtilsForResourceAwareScheduler (org.apache.storm.scheduler.resource.TestUtilsForResourceAwareScheduler)59 DaemonConfig (org.apache.storm.DaemonConfig)41 ResourceAwareScheduler (org.apache.storm.scheduler.resource.ResourceAwareScheduler)39 Test (org.junit.Test)36 Map (java.util.Map)35 Test (org.junit.jupiter.api.Test)35 SchedulerAssignment (org.apache.storm.scheduler.SchedulerAssignment)31 SchedulerAssignmentImpl (org.apache.storm.scheduler.SchedulerAssignmentImpl)31 ExecutorDetails (org.apache.storm.scheduler.ExecutorDetails)30 TopologyBuilder (org.apache.storm.topology.TopologyBuilder)30 HashSet (java.util.HashSet)29