use of org.apache.storm.scheduler.INimbus in project storm by apache.
the class TestResourceAwareScheduler method TestTopologySortedInCorrectOrder.
@Test
public void TestTopologySortedInCorrectOrder() {
INimbus iNimbus = new TestUtilsForResourceAwareScheduler.INimbusTest();
Map<String, Number> resourceMap = new HashMap<String, Number>();
resourceMap.put(Config.SUPERVISOR_CPU_CAPACITY, 100.0);
resourceMap.put(Config.SUPERVISOR_MEMORY_CAPACITY_MB, 1024.0);
Map<String, SupervisorDetails> supMap = TestUtilsForResourceAwareScheduler.genSupervisors(20, 4, resourceMap);
Config config = new Config();
config.putAll(Utils.readDefaultConfig());
config.put(Config.RESOURCE_AWARE_SCHEDULER_EVICTION_STRATEGY, org.apache.storm.scheduler.resource.strategies.eviction.DefaultEvictionStrategy.class.getName());
config.put(Config.RESOURCE_AWARE_SCHEDULER_PRIORITY_STRATEGY, org.apache.storm.scheduler.resource.strategies.priority.DefaultSchedulingPriorityStrategy.class.getName());
config.put(Config.TOPOLOGY_SCHEDULER_STRATEGY, org.apache.storm.scheduler.resource.strategies.scheduling.DefaultResourceAwareStrategy.class.getName());
config.put(Config.TOPOLOGY_COMPONENT_CPU_PCORE_PERCENT, 10.0);
config.put(Config.TOPOLOGY_COMPONENT_RESOURCES_OFFHEAP_MEMORY_MB, 128.0);
config.put(Config.TOPOLOGY_COMPONENT_RESOURCES_ONHEAP_MEMORY_MB, 0.0);
config.put(Config.TOPOLOGY_SUBMITTER_USER, TOPOLOGY_SUBMITTER);
Map<String, Map<String, Number>> resourceUserPool = new HashMap<String, Map<String, Number>>();
resourceUserPool.put("jerry", new HashMap<String, Number>());
resourceUserPool.get("jerry").put("cpu", 1000);
resourceUserPool.get("jerry").put("memory", 8192.0);
resourceUserPool.put("bobby", new HashMap<String, Number>());
resourceUserPool.get("bobby").put("cpu", 10000.0);
resourceUserPool.get("bobby").put("memory", 32768);
resourceUserPool.put("derek", new HashMap<String, Number>());
resourceUserPool.get("derek").put("cpu", 5000.0);
resourceUserPool.get("derek").put("memory", 16384.0);
config.put(Config.RESOURCE_AWARE_SCHEDULER_USER_POOLS, resourceUserPool);
TopologyDetails topo1 = TestUtilsForResourceAwareScheduler.getTopology("topo-1", config, 5, 15, 1, 1, currentTime - 2, 20);
TopologyDetails topo2 = TestUtilsForResourceAwareScheduler.getTopology("topo-2", config, 5, 15, 1, 1, currentTime - 8, 30);
TopologyDetails topo3 = TestUtilsForResourceAwareScheduler.getTopology("topo-3", config, 5, 15, 1, 1, currentTime - 16, 30);
TopologyDetails topo4 = TestUtilsForResourceAwareScheduler.getTopology("topo-4", config, 5, 15, 1, 1, currentTime - 16, 20);
TopologyDetails topo5 = TestUtilsForResourceAwareScheduler.getTopology("topo-5", config, 5, 15, 1, 1, currentTime - 24, 30);
Map<String, TopologyDetails> topoMap = new HashMap<String, TopologyDetails>();
topoMap.put(topo1.getId(), topo1);
topoMap.put(topo2.getId(), topo2);
topoMap.put(topo3.getId(), topo3);
topoMap.put(topo4.getId(), topo4);
topoMap.put(topo5.getId(), topo5);
Cluster cluster = new Cluster(iNimbus, supMap, new HashMap<String, SchedulerAssignmentImpl>(), config);
Topologies topologies = new Topologies(topoMap);
ResourceAwareScheduler rs = new ResourceAwareScheduler();
rs.prepare(config);
rs.schedule(topologies, cluster);
Set<TopologyDetails> queue = rs.getUser("jerry").getTopologiesPending();
Assert.assertEquals("check size", queue.size(), 0);
queue = rs.getUser("jerry").getTopologiesRunning();
Iterator<TopologyDetails> itr = queue.iterator();
TopologyDetails topo = itr.next();
LOG.info("{} - {}", topo.getName(), queue);
Assert.assertEquals("check order", topo.getName(), "topo-4");
topo = itr.next();
LOG.info("{} - {}", topo.getName(), queue);
Assert.assertEquals("check order", topo.getName(), "topo-1");
topo = itr.next();
LOG.info("{} - {}", topo.getName(), queue);
Assert.assertEquals("check order", topo.getName(), "topo-5");
topo = itr.next();
LOG.info("{} - {}", topo.getName(), queue);
Assert.assertEquals("check order", topo.getName(), "topo-3");
topo = itr.next();
LOG.info("{} - {}", topo.getName(), queue);
Assert.assertEquals("check order", topo.getName(), "topo-2");
TopologyDetails topo6 = TestUtilsForResourceAwareScheduler.getTopology("topo-6", config, 5, 15, 1, 1, currentTime - 30, 10);
topoMap.put(topo6.getId(), topo6);
topologies = new Topologies(topoMap);
rs.prepare(config);
rs.schedule(topologies, cluster);
queue = rs.getUser("jerry").getTopologiesRunning();
itr = queue.iterator();
topo = itr.next();
Assert.assertEquals("check order", topo.getName(), "topo-6");
topo = itr.next();
Assert.assertEquals("check order", topo.getName(), "topo-4");
topo = itr.next();
Assert.assertEquals("check order", topo.getName(), "topo-1");
topo = itr.next();
Assert.assertEquals("check order", topo.getName(), "topo-5");
topo = itr.next();
Assert.assertEquals("check order", topo.getName(), "topo-3");
topo = itr.next();
Assert.assertEquals("check order", topo.getName(), "topo-2");
queue = rs.getUser("jerry").getTopologiesPending();
Assert.assertEquals("check size", queue.size(), 0);
}
use of org.apache.storm.scheduler.INimbus in project storm by apache.
the class TestResourceAwareScheduler method TestSubmitUsersWithNoGuarantees.
@Test
public void TestSubmitUsersWithNoGuarantees() {
INimbus iNimbus = new TestUtilsForResourceAwareScheduler.INimbusTest();
Map<String, Number> resourceMap = new HashMap<String, Number>();
resourceMap.put(Config.SUPERVISOR_CPU_CAPACITY, 100.0);
resourceMap.put(Config.SUPERVISOR_MEMORY_CAPACITY_MB, 1000.0);
Map<String, SupervisorDetails> supMap = TestUtilsForResourceAwareScheduler.genSupervisors(4, 4, resourceMap);
Config config = new Config();
config.putAll(Utils.readDefaultConfig());
config.put(Config.RESOURCE_AWARE_SCHEDULER_EVICTION_STRATEGY, org.apache.storm.scheduler.resource.strategies.eviction.DefaultEvictionStrategy.class.getName());
config.put(Config.RESOURCE_AWARE_SCHEDULER_PRIORITY_STRATEGY, org.apache.storm.scheduler.resource.strategies.priority.DefaultSchedulingPriorityStrategy.class.getName());
config.put(Config.TOPOLOGY_SCHEDULER_STRATEGY, org.apache.storm.scheduler.resource.strategies.scheduling.DefaultResourceAwareStrategy.class.getName());
config.put(Config.TOPOLOGY_COMPONENT_CPU_PCORE_PERCENT, 100.0);
config.put(Config.TOPOLOGY_COMPONENT_RESOURCES_OFFHEAP_MEMORY_MB, 500);
config.put(Config.TOPOLOGY_COMPONENT_RESOURCES_ONHEAP_MEMORY_MB, 500);
Map<String, Map<String, Number>> resourceUserPool = new HashMap<String, Map<String, Number>>();
resourceUserPool.put("jerry", new HashMap<String, Number>());
resourceUserPool.get("jerry").put("cpu", 200.0);
resourceUserPool.get("jerry").put("memory", 2000.0);
config.put(Config.RESOURCE_AWARE_SCHEDULER_USER_POOLS, resourceUserPool);
Cluster cluster = new Cluster(iNimbus, supMap, new HashMap<String, SchedulerAssignmentImpl>(), config);
config.put(Config.TOPOLOGY_SUBMITTER_USER, "jerry");
TopologyDetails topo1 = TestUtilsForResourceAwareScheduler.getTopology("topo-1", config, 1, 0, 1, 0, currentTime - 2, 10);
TopologyDetails topo2 = TestUtilsForResourceAwareScheduler.getTopology("topo-2", config, 1, 0, 1, 0, currentTime - 2, 20);
TopologyDetails topo3 = TestUtilsForResourceAwareScheduler.getTopology("topo-3", config, 1, 0, 1, 0, currentTime - 2, 20);
config.put(Config.TOPOLOGY_SUBMITTER_USER, "bobby");
TopologyDetails topo4 = TestUtilsForResourceAwareScheduler.getTopology("topo-4", config, 1, 0, 1, 0, currentTime - 2, 10);
TopologyDetails topo5 = TestUtilsForResourceAwareScheduler.getTopology("topo-5", config, 1, 0, 1, 0, currentTime - 2, 20);
Map<String, TopologyDetails> topoMap = new HashMap<String, TopologyDetails>();
topoMap.put(topo1.getId(), topo1);
topoMap.put(topo2.getId(), topo2);
topoMap.put(topo3.getId(), topo3);
topoMap.put(topo4.getId(), topo4);
topoMap.put(topo5.getId(), topo5);
Topologies topologies = new Topologies(topoMap);
ResourceAwareScheduler rs = new ResourceAwareScheduler();
rs.prepare(config);
rs.schedule(topologies, cluster);
for (TopologyDetails topo : rs.getUser("jerry").getTopologiesRunning()) {
Assert.assertTrue("assert topology success", TestUtilsForResourceAwareScheduler.assertStatusSuccess(cluster.getStatusMap().get(topo.getId())));
}
Assert.assertEquals("# of running topologies", 3, rs.getUser("jerry").getTopologiesRunning().size());
Assert.assertEquals("# of pending topologies", 0, rs.getUser("jerry").getTopologiesPending().size());
Assert.assertEquals("# of attempted topologies", 0, rs.getUser("jerry").getTopologiesAttempted().size());
Assert.assertEquals("# of invalid topologies", 0, rs.getUser("jerry").getTopologiesInvalid().size());
for (TopologyDetails topo : rs.getUser("bobby").getTopologiesRunning()) {
Assert.assertTrue("assert topology success", TestUtilsForResourceAwareScheduler.assertStatusSuccess(cluster.getStatusMap().get(topo.getId())));
}
Assert.assertEquals("# of running topologies", 1, rs.getUser("bobby").getTopologiesRunning().size());
Assert.assertEquals("# of pending topologies", 0, rs.getUser("bobby").getTopologiesPending().size());
Assert.assertEquals("# of attempted topologies", 1, rs.getUser("bobby").getTopologiesAttempted().size());
Assert.assertEquals("# of invalid topologies", 0, rs.getUser("bobby").getTopologiesInvalid().size());
}
use of org.apache.storm.scheduler.INimbus in project storm by apache.
the class TestGenericResourceAwareStrategy method testGenericResourceAwareStrategyWithoutSettingAckerExecutors.
/**
* Test if the scheduling logic for the GenericResourceAwareStrategy is correct
* without setting {@link Config#TOPOLOGY_ACKER_EXECUTORS}.
*
* Test details refer to {@link TestDefaultResourceAwareStrategy#testDefaultResourceAwareStrategyWithoutSettingAckerExecutors(int)}
*/
@ParameterizedTest
@ValueSource(ints = { -1, 0, 1, 2 })
public void testGenericResourceAwareStrategyWithoutSettingAckerExecutors(int numOfAckersPerWorker) throws InvalidTopologyException {
int spoutParallelism = 1;
int boltParallelism = 2;
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("spout", new TestSpout(), spoutParallelism);
builder.setBolt("bolt-1", new TestBolt(), boltParallelism).shuffleGrouping("spout");
builder.setBolt("bolt-2", new TestBolt(), boltParallelism).shuffleGrouping("bolt-1").addResource("gpu.count", 1.0);
builder.setBolt("bolt-3", new TestBolt(), boltParallelism).shuffleGrouping("bolt-2").addResource("gpu.count", 2.0);
String topoName = "testTopology";
StormTopology stormToplogy = builder.createTopology();
INimbus iNimbus = new INimbusTest();
Config conf = createGrasClusterConfig(50, 500, 0, null, Collections.emptyMap());
Map<String, Double> genericResourcesMap = new HashMap<>();
genericResourcesMap.put("gpu.count", 2.0);
Map<String, SupervisorDetails> supMap = genSupervisors(4, 4, 200, 2000, genericResourcesMap);
conf.put(Config.TOPOLOGY_PRIORITY, 0);
conf.put(Config.TOPOLOGY_NAME, topoName);
conf.put(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB, 2000);
conf.put(Config.TOPOLOGY_SUBMITTER_USER, "user");
// Parameterized test on different numOfAckersPerWorker
if (numOfAckersPerWorker == -1) {
// Both Config.TOPOLOGY_ACKER_EXECUTORS and Config.TOPOLOGY_RAS_ACKER_EXECUTORS_PER_WORKER are not set
// Default will be 2 (estimate num of workers) and 1 respectively
} else {
conf.put(Config.TOPOLOGY_RAS_ACKER_EXECUTORS_PER_WORKER, numOfAckersPerWorker);
}
int estimatedNumWorker = ServerUtils.getEstimatedWorkerCountForRasTopo(conf, stormToplogy);
Nimbus.setUpAckerExecutorConfigs(topoName, conf, conf, estimatedNumWorker);
conf.put(Config.TOPOLOGY_ACKER_RESOURCES_ONHEAP_MEMORY_MB, 250);
conf.put(Config.TOPOLOGY_ACKER_CPU_PCORE_PERCENT, 50);
TopologyDetails topo = new TopologyDetails("testTopology-id", conf, stormToplogy, 0, genExecsAndComps(StormCommon.systemTopology(conf, stormToplogy)), currentTime, "user");
Topologies topologies = new Topologies(topo);
Cluster cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, conf);
scheduler = new ResourceAwareScheduler();
scheduler.prepare(conf, new StormMetricsRegistry());
scheduler.schedule(topologies, cluster);
// We need to have 3 slots on 3 separate hosts. The topology needs 6 GPUs 3500 MB memory and 350% CPU
// The bolt-3 instances must be on separate nodes because they each need 2 GPUs.
// The bolt-2 instances must be on the same node as they each need 1 GPU
// (this assumes that we are packing the components to avoid fragmentation).
// The bolt-1 and spout instances fill in the rest.
// Ordered execs: [[6, 6], [2, 2], [4, 4], [5, 5], [1, 1], [3, 3], [0, 0]]
// Ackers: [[8, 8], [7, 7]] (+ [[9, 9], [10, 10]] when numOfAckersPerWorker=2)
HashSet<HashSet<ExecutorDetails>> expectedScheduling = new HashSet<>();
if (numOfAckersPerWorker == -1 || numOfAckersPerWorker == 1) {
expectedScheduling.add(new HashSet<>(Arrays.asList(// bolt-3 - 500 MB, 50% CPU, 2 GPU
new ExecutorDetails(3, 3))));
// Total 500 MB, 50% CPU, 2 - GPU -> this node has 1500 MB, 150% cpu, 0 GPU left
expectedScheduling.add(new HashSet<>(Arrays.asList(// bolt-2 - 500 MB, 50% CPU, 1 GPU
new ExecutorDetails(6, 6), // bolt-1 - 500 MB, 50% CPU, 0 GPU
new ExecutorDetails(2, 2), // bolt-2 - 500 MB, 50% CPU, 1 GPU
new ExecutorDetails(5, 5), // acker - 250 MB, 50% CPU, 0 GPU
new ExecutorDetails(8, 8))));
// Total 1750 MB, 200% CPU, 2 GPU -> this node has 250 MB, 0% CPU, 0 GPU left
expectedScheduling.add(new HashSet<>(Arrays.asList(// bolt-3 500 MB, 50% cpu, 2 GPU
new ExecutorDetails(4, 4), // bolt-1 - 500 MB, 50% CPU, 0 GPU
new ExecutorDetails(1, 1), // Spout - 500 MB, 50% CPU, 0 GPU
new ExecutorDetails(0, 0), // acker - 250 MB, 50% CPU, 0 GPU
new ExecutorDetails(7, 7))));
// Total 1750 MB, 200% CPU, 2 GPU -> this node has 250 MB, 0% CPU, 0 GPU left
} else if (numOfAckersPerWorker == 0) {
expectedScheduling.add(new HashSet<>(Arrays.asList(// bolt-3 - 500 MB, 50% CPU, 2 GPU
new ExecutorDetails(3, 3))));
// Total 500 MB, 50% CPU, 2 - GPU -> this node has 1500 MB, 150% cpu, 0 GPU left
expectedScheduling.add(new HashSet<>(Arrays.asList(// bolt-2 - 500 MB, 50% CPU, 1 GPU
new ExecutorDetails(6, 6), // bolt-1 - 500 MB, 50% CPU, 0 GPU
new ExecutorDetails(2, 2), // bolt-2 - 500 MB, 50% CPU, 1 GPU
new ExecutorDetails(5, 5), // bolt-1 - 500 MB, 50% CPU, 0 GPU
new ExecutorDetails(1, 1))));
// Total 2000 MB, 200% CPU, 2 GPU -> this node has 0 MB, 0% CPU, 0 GPU left
expectedScheduling.add(new HashSet<>(Arrays.asList(// Spout - 500 MB, 50% CPU, 0 GPU
new ExecutorDetails(0, 0), // bolt-3 500 MB, 50% cpu, 2 GPU
new ExecutorDetails(4, 4))));
// Total 1000 MB, 100% CPU, 2 GPU -> this node has 1000 MB, 100% CPU, 0 GPU left
} else if (numOfAckersPerWorker == 2) {
expectedScheduling.add(new HashSet<>(Arrays.asList(// bolt-3 - 500 MB, 50% CPU, 2 GPU
new ExecutorDetails(3, 3))));
// Total 500 MB, 50% CPU, 2 - GPU -> this node has 1500 MB, 150% cpu, 0 GPU left
expectedScheduling.add(new HashSet<>(Arrays.asList(// acker - 250 MB, 50% CPU, 0 GPU
new ExecutorDetails(7, 7), // acker - 250 MB, 50% CPU, 0 GPU
new ExecutorDetails(8, 8), // bolt-2 - 500 MB, 50% CPU, 1 GPU
new ExecutorDetails(6, 6), // bolt-1 - 500 MB, 50% CPU, 0 GPU
new ExecutorDetails(2, 2))));
// Total 1500 MB, 200% CPU, 2 GPU -> this node has 500 MB, 0% CPU, 0 GPU left
expectedScheduling.add(new HashSet<>(Arrays.asList(// acker- 250 MB, 50% CPU, 0 GPU
new ExecutorDetails(9, 9), // acker- 250 MB, 50% CPU, 0 GPU
new ExecutorDetails(10, 10), // bolt-1 - 500 MB, 50% CPU, 0 GPU
new ExecutorDetails(1, 1), // bolt-3 500 MB, 50% cpu, 2 GPU
new ExecutorDetails(4, 4))));
// Total 1500 MB, 200% CPU, 2 GPU -> this node has 500 MB, 0% CPU, 0 GPU left
expectedScheduling.add(new HashSet<>(Arrays.asList(// Spout - 500 MB, 50% CPU, 0 GPU
new ExecutorDetails(0, 0), // bolt-2 - 500 MB, 50% CPU, 1 GPU
new ExecutorDetails(5, 5))));
// Total 1000 MB, 100% CPU, 2 GPU -> this node has 1000 MB, 100% CPU, 0 GPU left
}
HashSet<HashSet<ExecutorDetails>> foundScheduling = new HashSet<>();
SchedulerAssignment assignment = cluster.getAssignmentById("testTopology-id");
for (Collection<ExecutorDetails> execs : assignment.getSlotToExecutors().values()) {
foundScheduling.add(new HashSet<>(execs));
}
assertEquals(expectedScheduling, foundScheduling);
}
use of org.apache.storm.scheduler.INimbus in project storm by apache.
the class TestGenericResourceAwareStrategy method testGenericResourceAwareStrategyInFavorOfShuffle.
/**
* test if the scheduling logic for the GenericResourceAwareStrategy (when in favor of shuffle) is correct.
*/
@Test
public void testGenericResourceAwareStrategyInFavorOfShuffle() throws InvalidTopologyException {
int spoutParallelism = 1;
int boltParallelism = 2;
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("spout", new TestSpout(), spoutParallelism);
builder.setBolt("bolt-1", new TestBolt(), boltParallelism).shuffleGrouping("spout");
builder.setBolt("bolt-2", new TestBolt(), boltParallelism).shuffleGrouping("bolt-1").addResource("gpu.count", 1.0);
builder.setBolt("bolt-3", new TestBolt(), boltParallelism).shuffleGrouping("bolt-2").addResource("gpu.count", 2.0);
StormTopology stormToplogy = builder.createTopology();
INimbus iNimbus = new INimbusTest();
Config conf = createGrasClusterConfig(50, 250, 250, null, Collections.emptyMap());
Map<String, Double> genericResourcesMap = new HashMap<>();
genericResourcesMap.put("gpu.count", 2.0);
Map<String, SupervisorDetails> supMap = genSupervisors(4, 4, 200, 2000, genericResourcesMap);
conf.put(Config.TOPOLOGY_PRIORITY, 0);
conf.put(Config.TOPOLOGY_NAME, "testTopology");
conf.put(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB, Double.MAX_VALUE);
conf.put(Config.TOPOLOGY_SUBMITTER_USER, "user");
conf.put(Config.TOPOLOGY_RAS_ORDER_EXECUTORS_BY_PROXIMITY_NEEDS, true);
TopologyDetails topo = new TopologyDetails("testTopology-id", conf, stormToplogy, 0, genExecsAndComps(StormCommon.systemTopology(conf, stormToplogy)), currentTime, "user");
Topologies topologies = new Topologies(topo);
Cluster cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, conf);
ResourceAwareScheduler rs = new ResourceAwareScheduler();
rs.prepare(conf, new StormMetricsRegistry());
rs.schedule(topologies, cluster);
// Sorted execs: [[0, 0], [2, 2], [6, 6], [4, 4], [1, 1], [5, 5], [3, 3], [7, 7]]
// Ackers: [[7, 7]]]
HashSet<HashSet<ExecutorDetails>> expectedScheduling = new HashSet<>();
expectedScheduling.add(new HashSet<>(Arrays.asList(// spout
new ExecutorDetails(0, 0), // bolt-1
new ExecutorDetails(2, 2), // bolt-2
new ExecutorDetails(6, 6), // acker
new ExecutorDetails(7, 7))));
expectedScheduling.add(new HashSet<>(Arrays.asList(// bolt-3
new ExecutorDetails(4, 4), // bolt-1
new ExecutorDetails(1, 1))));
// bolt-2
expectedScheduling.add(new HashSet<>(Arrays.asList(new ExecutorDetails(5, 5))));
// bolt-3
expectedScheduling.add(new HashSet<>(Arrays.asList(new ExecutorDetails(3, 3))));
HashSet<HashSet<ExecutorDetails>> foundScheduling = new HashSet<>();
SchedulerAssignment assignment = cluster.getAssignmentById("testTopology-id");
for (Collection<ExecutorDetails> execs : assignment.getSlotToExecutors().values()) {
foundScheduling.add(new HashSet<>(execs));
}
assertEquals(expectedScheduling, foundScheduling);
}
use of org.apache.storm.scheduler.INimbus in project storm by apache.
the class TestGenericResourceAwareStrategy method testAntiAffinityWithMultipleTopologies.
@Test
public void testAntiAffinityWithMultipleTopologies() {
INimbus iNimbus = new INimbusTest();
Map<String, SupervisorDetails> supMap = genSupervisorsWithRacks(1, 40, 66, 0, 0, 4700, 226200, new HashMap<>());
HashMap<String, Double> extraResources = new HashMap<>();
extraResources.put("my.gpu", 1.0);
supMap.putAll(genSupervisorsWithRacks(1, 40, 66, 1, 0, 4700, 226200, extraResources));
Config config = new Config();
config.putAll(createGrasClusterConfig(88, 775, 25, null, null));
scheduler = new ResourceAwareScheduler();
scheduler.prepare(config, new StormMetricsRegistry());
TopologyDetails tdSimple = genTopology("topology-simple", config, 1, 5, 100, 300, 0, 0, "user", 8192);
// Schedule the simple topology first
Topologies topologies = new Topologies(tdSimple);
Cluster cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, config);
scheduler.schedule(topologies, cluster);
TopologyBuilder builder = topologyBuilder(1, 5, 100, 300);
builder.setBolt("gpu-bolt", new TestBolt(), 40).addResource("my.gpu", 1.0).shuffleGrouping("spout-0");
TopologyDetails tdGpu = topoToTopologyDetails("topology-gpu", config, builder.createTopology(), 0, 0, "user", 8192);
// Now schedule GPU but with the simple topology in place.
topologies = new Topologies(tdSimple, tdGpu);
cluster = new Cluster(cluster, topologies);
scheduler.schedule(topologies, cluster);
Map<String, SchedulerAssignment> assignments = new TreeMap<>(cluster.getAssignments());
assertEquals(2, assignments.size());
Map<String, Map<String, AtomicLong>> topoPerRackCount = new HashMap<>();
for (Entry<String, SchedulerAssignment> entry : assignments.entrySet()) {
SchedulerAssignment sa = entry.getValue();
Map<String, AtomicLong> slotsPerRack = new TreeMap<>();
for (WorkerSlot slot : sa.getSlots()) {
String nodeId = slot.getNodeId();
String rack = supervisorIdToRackName(nodeId);
slotsPerRack.computeIfAbsent(rack, (r) -> new AtomicLong(0)).incrementAndGet();
}
LOG.info("{} => {}", entry.getKey(), slotsPerRack);
topoPerRackCount.put(entry.getKey(), slotsPerRack);
}
Map<String, AtomicLong> simpleCount = topoPerRackCount.get("topology-simple-0");
assertNotNull(simpleCount);
// Because the simple topology was scheduled first we want to be sure that it didn't put anything on
// the GPU nodes.
// Only 1 rack is in use
assertEquals(1, simpleCount.size());
// r001 is the second rack with GPUs
assertFalse(simpleCount.containsKey("r001"));
// r000 is the first rack with no GPUs
assertTrue(simpleCount.containsKey("r000"));
// We don't really care too much about the scheduling of topology-gpu-0, because it was scheduled.
}
Aggregations