use of org.apache.storm.scheduler.resource.normalization.ResourceMetrics in project storm by apache.
the class TestDefaultResourceAwareStrategy method testMultipleSharedMemoryWithOneExecutorPerWorker.
/*
* test assigned memory with shared memory types and oneWorkerPerExecutor
*/
@ParameterizedTest
@EnumSource(SharedMemoryType.class)
public void testMultipleSharedMemoryWithOneExecutorPerWorker(SharedMemoryType memoryType) {
int spoutParallelism = 4;
double cpuPercent = 10;
double memoryOnHeap = 10;
double memoryOffHeap = 10;
double sharedOnHeapWithinWorker = 450;
double sharedOffHeapWithinNode = 600;
double sharedOffHeapWithinWorker = 400;
TopologyBuilder builder = new TopologyBuilder();
switch(memoryType) {
case SHARED_OFF_HEAP_NODE:
builder.setSpout("spout", new TestSpout(), spoutParallelism).addSharedMemory(new SharedOffHeapWithinNode(sharedOffHeapWithinNode, "spout shared off heap within node"));
break;
case SHARED_OFF_HEAP_WORKER:
builder.setSpout("spout", new TestSpout(), spoutParallelism).addSharedMemory(new SharedOffHeapWithinWorker(sharedOffHeapWithinWorker, "spout shared off heap within worker"));
break;
case SHARED_ON_HEAP_WORKER:
builder.setSpout("spout", new TestSpout(), spoutParallelism).addSharedMemory(new SharedOnHeap(sharedOnHeapWithinWorker, "spout shared on heap within worker"));
break;
}
StormTopology stormToplogy = builder.createTopology();
INimbus iNimbus = new INimbusTest();
Map<String, SupervisorDetails> supMap = genSupervisors(4, 4, 500, 1000);
Config conf = createClusterConfig(cpuPercent, memoryOnHeap, memoryOffHeap, null);
conf.put(Config.TOPOLOGY_PRIORITY, 0);
conf.put(Config.TOPOLOGY_NAME, "testTopology");
conf.put(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB, 2000);
conf.put(Config.TOPOLOGY_RAS_ONE_EXECUTOR_PER_WORKER, true);
TopologyDetails topo = new TopologyDetails("testTopology-id", conf, stormToplogy, 0, genExecsAndComps(stormToplogy), CURRENT_TIME, "user");
Topologies topologies = new Topologies(topo);
Cluster cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, conf);
scheduler = new ResourceAwareScheduler();
scheduler.prepare(conf, new StormMetricsRegistry());
scheduler.schedule(topologies, cluster);
TopologyResources topologyResources = cluster.getTopologyResourcesMap().get(topo.getId());
SchedulerAssignment assignment = cluster.getAssignmentById(topo.getId());
long numNodes = assignment.getSlotToExecutors().keySet().stream().map(ws -> ws.getNodeId()).distinct().count();
switch(memoryType) {
case SHARED_OFF_HEAP_NODE:
// 4 workers on single node. OffHeapNode memory is shared
assertThat(topologyResources.getAssignedMemOnHeap(), closeTo(spoutParallelism * memoryOnHeap, 0.01));
assertThat(topologyResources.getAssignedMemOffHeap(), closeTo(spoutParallelism * memoryOffHeap + sharedOffHeapWithinNode, 0.01));
assertThat(topologyResources.getAssignedSharedMemOnHeap(), closeTo(0, 0.01));
assertThat(topologyResources.getAssignedSharedMemOffHeap(), closeTo(sharedOffHeapWithinNode, 0.01));
assertThat(topologyResources.getAssignedNonSharedMemOnHeap(), closeTo(spoutParallelism * memoryOnHeap, 0.01));
assertThat(topologyResources.getAssignedNonSharedMemOffHeap(), closeTo(spoutParallelism * memoryOffHeap, 0.01));
assertThat(numNodes, is(1L));
assertThat(cluster.getAssignedNumWorkers(topo), is(spoutParallelism));
break;
case SHARED_OFF_HEAP_WORKER:
// 4 workers on 2 nodes. OffHeapWorker memory not shared -- consumed 4x, once for each worker)
assertThat(topologyResources.getAssignedMemOnHeap(), closeTo(spoutParallelism * memoryOnHeap, 0.01));
assertThat(topologyResources.getAssignedMemOffHeap(), closeTo(spoutParallelism * (memoryOffHeap + sharedOffHeapWithinWorker), 0.01));
assertThat(topologyResources.getAssignedSharedMemOnHeap(), closeTo(0, 0.01));
assertThat(topologyResources.getAssignedSharedMemOffHeap(), closeTo(spoutParallelism * sharedOffHeapWithinWorker, 0.01));
assertThat(topologyResources.getAssignedNonSharedMemOnHeap(), closeTo(spoutParallelism * memoryOnHeap, 0.01));
assertThat(topologyResources.getAssignedNonSharedMemOffHeap(), closeTo(spoutParallelism * memoryOffHeap, 0.01));
assertThat(numNodes, is(2L));
assertThat(cluster.getAssignedNumWorkers(topo), is(spoutParallelism));
break;
case SHARED_ON_HEAP_WORKER:
// 4 workers on 2 nodes. onHeap memory not shared -- consumed 4x, once for each worker
assertThat(topologyResources.getAssignedMemOnHeap(), closeTo(spoutParallelism * (memoryOnHeap + sharedOnHeapWithinWorker), 0.01));
assertThat(topologyResources.getAssignedMemOffHeap(), closeTo(spoutParallelism * memoryOffHeap, 0.01));
assertThat(topologyResources.getAssignedSharedMemOnHeap(), closeTo(spoutParallelism * sharedOnHeapWithinWorker, 0.01));
assertThat(topologyResources.getAssignedSharedMemOffHeap(), closeTo(0, 0.01));
assertThat(topologyResources.getAssignedNonSharedMemOnHeap(), closeTo(spoutParallelism * memoryOnHeap, 0.01));
assertThat(topologyResources.getAssignedNonSharedMemOffHeap(), closeTo(spoutParallelism * memoryOffHeap, 0.01));
assertThat(numNodes, is(2L));
assertThat(cluster.getAssignedNumWorkers(topo), is(spoutParallelism));
break;
}
}
use of org.apache.storm.scheduler.resource.normalization.ResourceMetrics in project storm by apache.
the class TestDefaultResourceAwareStrategy method testDefaultResourceAwareStrategyInFavorOfShuffle.
/**
* test if the scheduling logic for the DefaultResourceAwareStrategy (when made by network proximity needs.) is correct
*/
@Test
public void testDefaultResourceAwareStrategyInFavorOfShuffle() throws InvalidTopologyException {
int spoutParallelism = 1;
int boltParallelism = 2;
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("spout", new TestSpout(), spoutParallelism);
builder.setBolt("bolt-1", new TestBolt(), boltParallelism).shuffleGrouping("spout");
builder.setBolt("bolt-2", new TestBolt(), boltParallelism).shuffleGrouping("bolt-1");
builder.setBolt("bolt-3", new TestBolt(), boltParallelism).shuffleGrouping("bolt-2");
StormTopology stormToplogy = builder.createTopology();
INimbus iNimbus = new INimbusTest();
Map<String, SupervisorDetails> supMap = genSupervisors(4, 4, 200, 2000);
Config conf = createClusterConfig(50, 250, 250, null);
conf.put(Config.TOPOLOGY_PRIORITY, 0);
conf.put(Config.TOPOLOGY_NAME, "testTopology");
conf.put(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB, Double.MAX_VALUE);
conf.put(Config.TOPOLOGY_SUBMITTER_USER, "user");
conf.put(Config.TOPOLOGY_RAS_ORDER_EXECUTORS_BY_PROXIMITY_NEEDS, true);
TopologyDetails topo = new TopologyDetails("testTopology-id", conf, stormToplogy, 0, genExecsAndComps(StormCommon.systemTopology(conf, stormToplogy)), CURRENT_TIME, "user");
Topologies topologies = new Topologies(topo);
Cluster cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, conf);
ResourceAwareScheduler rs = new ResourceAwareScheduler();
rs.prepare(conf, new StormMetricsRegistry());
rs.schedule(topologies, cluster);
// Sorted execs: [[0, 0], [2, 2], [6, 6], [4, 4], [1, 1], [5, 5], [3, 3], [7, 7]]
// Ackers: [[7, 7]]]
HashSet<HashSet<ExecutorDetails>> expectedScheduling = new HashSet<>();
expectedScheduling.add(new HashSet<>(Arrays.asList(// spout
new ExecutorDetails(0, 0), // bolt-2
new ExecutorDetails(6, 6), // bolt-1
new ExecutorDetails(2, 2), // acker
new ExecutorDetails(7, 7))));
expectedScheduling.add(new HashSet<>(Arrays.asList(// bolt-3
new ExecutorDetails(3, 3), // bolt-2
new ExecutorDetails(5, 5), // bolt-3
new ExecutorDetails(4, 4), // bolt-1
new ExecutorDetails(1, 1))));
HashSet<HashSet<ExecutorDetails>> foundScheduling = new HashSet<>();
SchedulerAssignment assignment = cluster.getAssignmentById("testTopology-id");
for (Collection<ExecutorDetails> execs : assignment.getSlotToExecutors().values()) {
foundScheduling.add(new HashSet<>(execs));
}
Assert.assertEquals(expectedScheduling, foundScheduling);
}
use of org.apache.storm.scheduler.resource.normalization.ResourceMetrics in project storm by apache.
the class TestResourceAwareScheduler method testTopologyWorkerMaxHeapSize.
@Test
public void testTopologyWorkerMaxHeapSize() {
// Test1: If RAS spreads executors across multiple workers based on the set limit for a worker used by the topology
INimbus iNimbus = new INimbusTest();
Map<String, SupervisorDetails> supMap = genSupervisors(2, 2, 400, 2000);
TopologyBuilder builder1 = new TopologyBuilder();
builder1.setSpout("wordSpout1", new TestWordSpout(), 4);
StormTopology stormTopology1 = builder1.createTopology();
Config config1 = new Config();
config1.putAll(defaultTopologyConf);
config1.put(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB, 128.0);
Map<ExecutorDetails, String> executorMap1 = genExecsAndComps(stormTopology1);
TopologyDetails topology1 = new TopologyDetails("topology1", config1, stormTopology1, 1, executorMap1, 0, "user");
ResourceAwareScheduler rs = new ResourceAwareScheduler();
Topologies topologies = new Topologies(topology1);
Cluster cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, config1);
rs.prepare(config1, new StormMetricsRegistry());
try {
rs.schedule(topologies, cluster);
assertFalse(cluster.needsSchedulingRas(topology1));
assertTrue(cluster.getStatusMap().get(topology1.getId()).startsWith("Running - Fully Scheduled by DefaultResourceAwareStrategy"));
assertEquals(4, cluster.getAssignedNumWorkers(topology1));
} finally {
rs.cleanup();
}
// Test2: test when no more workers are available due to topology worker max heap size limit but there is memory is still available
// wordSpout2 is going to contain 5 executors that needs scheduling. Each of those executors has a memory requirement of 128.0 MB
// The cluster contains 4 free WorkerSlots. For this topolology each worker is limited to a max heap size of 128.0
// Thus, one executor not going to be able to get scheduled thus failing the scheduling of this topology and no executors of this
// topology will be scheduled
TopologyBuilder builder2 = new TopologyBuilder();
builder2.setSpout("wordSpout2", new TestWordSpout(), 5);
StormTopology stormTopology2 = builder2.createTopology();
Config config2 = new Config();
config2.putAll(defaultTopologyConf);
config2.put(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB, 128.0);
Map<ExecutorDetails, String> executorMap2 = genExecsAndComps(stormTopology2);
TopologyDetails topology2 = new TopologyDetails("topology2", config2, stormTopology2, 1, executorMap2, 0, "user");
topologies = new Topologies(topology2);
cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, config2);
rs.prepare(config2, new StormMetricsRegistry());
try {
rs.schedule(topologies, cluster);
assertTrue(cluster.needsSchedulingRas(topology2));
String status = cluster.getStatusMap().get(topology2.getId());
assert status.startsWith("Not enough resources to schedule") : status;
// assert status.endsWith("5 executors not scheduled") : status;
assertEquals(5, cluster.getUnassignedExecutors(topology2).size());
} finally {
rs.cleanup();
}
}
use of org.apache.storm.scheduler.resource.normalization.ResourceMetrics in project storm by apache.
the class TestResourceAwareScheduler method testFaultTolerance.
/**
* Test correct behavior when a supervisor dies. Check if the scheduler handles it correctly and evicts the correct
* topology when rescheduling the executors from the died supervisor
*/
@Test
public void testFaultTolerance() {
INimbus iNimbus = new INimbusTest();
Map<String, SupervisorDetails> supMap = genSupervisors(6, 4, 100, 1000);
Map<String, Map<String, Number>> resourceUserPool = userResourcePool(userRes("jerry", 50, 500), userRes("bobby", 200, 2_000), userRes("derek", 100, 1_000));
Config config = createClusterConfig(100, 500, 500, resourceUserPool);
Topologies topologies = new Topologies(genTopology("topo-1", config, 1, 0, 1, 0, currentTime - 2, 21, "jerry"), genTopology("topo-2", config, 1, 0, 1, 0, currentTime - 2, 20, "jerry"), genTopology("topo-3", config, 1, 0, 1, 0, currentTime - 2, 10, "bobby"), genTopology("topo-4", config, 1, 0, 1, 0, currentTime - 2, 10, "bobby"), genTopology("topo-5", config, 1, 0, 1, 0, currentTime - 2, 29, "derek"), genTopology("topo-6", config, 1, 0, 1, 0, currentTime - 2, 10, "derek"));
Cluster cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, config);
scheduler = new ResourceAwareScheduler();
scheduler.prepare(config, new StormMetricsRegistry());
scheduler.schedule(topologies, cluster);
assertTopologiesFullyScheduled(cluster, "topo-1", "topo-2", "topo-3", "topo-4", "topo-5", "topo-6");
// fail supervisor
SupervisorDetails supFailed = cluster.getSupervisors().values().iterator().next();
LOG.info("/***** failing supervisor: {} ****/", supFailed.getHost());
supMap.remove(supFailed.getId());
Map<String, SchedulerAssignmentImpl> newAssignments = new HashMap<>();
for (Map.Entry<String, SchedulerAssignment> topoToAssignment : cluster.getAssignments().entrySet()) {
String topoId = topoToAssignment.getKey();
SchedulerAssignment assignment = topoToAssignment.getValue();
Map<ExecutorDetails, WorkerSlot> executorToSlots = new HashMap<>();
for (Map.Entry<ExecutorDetails, WorkerSlot> execToWorker : assignment.getExecutorToSlot().entrySet()) {
ExecutorDetails exec = execToWorker.getKey();
WorkerSlot ws = execToWorker.getValue();
if (!ws.getNodeId().equals(supFailed.getId())) {
executorToSlots.put(exec, ws);
}
}
newAssignments.put(topoId, new SchedulerAssignmentImpl(topoId, executorToSlots, null, null));
}
Map<String, String> statusMap = cluster.getStatusMap();
LOG.warn("Rescheduling with removed Supervisor....");
cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, newAssignments, topologies, config);
cluster.setStatusMap(statusMap);
scheduler.schedule(topologies, cluster);
assertTopologiesFullyScheduled(cluster, "topo-2", "topo-3", "topo-4", "topo-5", "topo-6");
assertTopologiesNotScheduled(cluster, "topo-1");
}
use of org.apache.storm.scheduler.resource.normalization.ResourceMetrics in project storm by apache.
the class TestResourceAwareScheduler method sanityTestOfScheduling.
@Test
public void sanityTestOfScheduling() {
INimbus iNimbus = new INimbusTest();
Map<String, SupervisorDetails> supMap = genSupervisors(1, 2, 400, 2000);
Config config = new Config();
config.putAll(defaultTopologyConf);
scheduler = new ResourceAwareScheduler();
TopologyDetails topology1 = genTopology("topology1", config, 1, 1, 1, 1, 0, 0, "user");
Topologies topologies = new Topologies(topology1);
Cluster cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, config);
scheduler.prepare(config, new StormMetricsRegistry());
scheduler.schedule(topologies, cluster);
SchedulerAssignment assignment = cluster.getAssignmentById(topology1.getId());
Set<WorkerSlot> assignedSlots = assignment.getSlots();
Set<String> nodesIDs = new HashSet<>();
for (WorkerSlot slot : assignedSlots) {
nodesIDs.add(slot.getNodeId());
}
Collection<ExecutorDetails> executors = assignment.getExecutors();
assertEquals(1, assignedSlots.size());
assertEquals(1, nodesIDs.size());
assertEquals(2, executors.size());
assertFalse(cluster.needsSchedulingRas(topology1));
assertTrue(cluster.getStatusMap().get(topology1.getId()).startsWith("Running - Fully Scheduled by DefaultResourceAwareStrategy"));
}
Aggregations