Search in sources :

Example 36 with ExecutorDetails

use of org.apache.storm.scheduler.ExecutorDetails in project storm by apache.

the class TestResourceAwareScheduler method testFaultTolerance.

/**
 * Test correct behavior when a supervisor dies.  Check if the scheduler handles it correctly and evicts the correct
 * topology when rescheduling the executors from the died supervisor
 */
@Test
public void testFaultTolerance() {
    INimbus iNimbus = new INimbusTest();
    Map<String, SupervisorDetails> supMap = genSupervisors(6, 4, 100, 1000);
    Map<String, Map<String, Number>> resourceUserPool = userResourcePool(userRes("jerry", 50, 500), userRes("bobby", 200, 2_000), userRes("derek", 100, 1_000));
    Config config = createClusterConfig(100, 500, 500, resourceUserPool);
    Topologies topologies = new Topologies(genTopology("topo-1", config, 1, 0, 1, 0, currentTime - 2, 21, "jerry"), genTopology("topo-2", config, 1, 0, 1, 0, currentTime - 2, 20, "jerry"), genTopology("topo-3", config, 1, 0, 1, 0, currentTime - 2, 10, "bobby"), genTopology("topo-4", config, 1, 0, 1, 0, currentTime - 2, 10, "bobby"), genTopology("topo-5", config, 1, 0, 1, 0, currentTime - 2, 29, "derek"), genTopology("topo-6", config, 1, 0, 1, 0, currentTime - 2, 10, "derek"));
    Cluster cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, config);
    scheduler = new ResourceAwareScheduler();
    scheduler.prepare(config, new StormMetricsRegistry());
    scheduler.schedule(topologies, cluster);
    assertTopologiesFullyScheduled(cluster, "topo-1", "topo-2", "topo-3", "topo-4", "topo-5", "topo-6");
    // fail supervisor
    SupervisorDetails supFailed = cluster.getSupervisors().values().iterator().next();
    LOG.info("/***** failing supervisor: {} ****/", supFailed.getHost());
    supMap.remove(supFailed.getId());
    Map<String, SchedulerAssignmentImpl> newAssignments = new HashMap<>();
    for (Map.Entry<String, SchedulerAssignment> topoToAssignment : cluster.getAssignments().entrySet()) {
        String topoId = topoToAssignment.getKey();
        SchedulerAssignment assignment = topoToAssignment.getValue();
        Map<ExecutorDetails, WorkerSlot> executorToSlots = new HashMap<>();
        for (Map.Entry<ExecutorDetails, WorkerSlot> execToWorker : assignment.getExecutorToSlot().entrySet()) {
            ExecutorDetails exec = execToWorker.getKey();
            WorkerSlot ws = execToWorker.getValue();
            if (!ws.getNodeId().equals(supFailed.getId())) {
                executorToSlots.put(exec, ws);
            }
        }
        newAssignments.put(topoId, new SchedulerAssignmentImpl(topoId, executorToSlots, null, null));
    }
    Map<String, String> statusMap = cluster.getStatusMap();
    LOG.warn("Rescheduling with removed Supervisor....");
    cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, newAssignments, topologies, config);
    cluster.setStatusMap(statusMap);
    scheduler.schedule(topologies, cluster);
    assertTopologiesFullyScheduled(cluster, "topo-2", "topo-3", "topo-4", "topo-5", "topo-6");
    assertTopologiesNotScheduled(cluster, "topo-1");
}
Also used : ExecutorDetails(org.apache.storm.scheduler.ExecutorDetails) HashMap(java.util.HashMap) DaemonConfig(org.apache.storm.DaemonConfig) Config(org.apache.storm.Config) StormMetricsRegistry(org.apache.storm.metric.StormMetricsRegistry) Cluster(org.apache.storm.scheduler.Cluster) INimbus(org.apache.storm.scheduler.INimbus) TestUtilsForResourceAwareScheduler(org.apache.storm.scheduler.resource.TestUtilsForResourceAwareScheduler) ResourceMetrics(org.apache.storm.scheduler.resource.normalization.ResourceMetrics) SchedulerAssignmentImpl(org.apache.storm.scheduler.SchedulerAssignmentImpl) SchedulerAssignment(org.apache.storm.scheduler.SchedulerAssignment) WorkerSlot(org.apache.storm.scheduler.WorkerSlot) Topologies(org.apache.storm.scheduler.Topologies) SupervisorDetails(org.apache.storm.scheduler.SupervisorDetails) Map(java.util.Map) HashMap(java.util.HashMap) TreeMap(java.util.TreeMap) Test(org.junit.jupiter.api.Test) PerformanceTest(org.apache.storm.testing.PerformanceTest)

Example 37 with ExecutorDetails

use of org.apache.storm.scheduler.ExecutorDetails in project storm by apache.

the class TestResourceAwareScheduler method sanityTestOfScheduling.

@Test
public void sanityTestOfScheduling() {
    INimbus iNimbus = new INimbusTest();
    Map<String, SupervisorDetails> supMap = genSupervisors(1, 2, 400, 2000);
    Config config = new Config();
    config.putAll(defaultTopologyConf);
    scheduler = new ResourceAwareScheduler();
    TopologyDetails topology1 = genTopology("topology1", config, 1, 1, 1, 1, 0, 0, "user");
    Topologies topologies = new Topologies(topology1);
    Cluster cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, config);
    scheduler.prepare(config, new StormMetricsRegistry());
    scheduler.schedule(topologies, cluster);
    SchedulerAssignment assignment = cluster.getAssignmentById(topology1.getId());
    Set<WorkerSlot> assignedSlots = assignment.getSlots();
    Set<String> nodesIDs = new HashSet<>();
    for (WorkerSlot slot : assignedSlots) {
        nodesIDs.add(slot.getNodeId());
    }
    Collection<ExecutorDetails> executors = assignment.getExecutors();
    assertEquals(1, assignedSlots.size());
    assertEquals(1, nodesIDs.size());
    assertEquals(2, executors.size());
    assertFalse(cluster.needsSchedulingRas(topology1));
    assertTrue(cluster.getStatusMap().get(topology1.getId()).startsWith("Running - Fully Scheduled by DefaultResourceAwareStrategy"));
}
Also used : ExecutorDetails(org.apache.storm.scheduler.ExecutorDetails) DaemonConfig(org.apache.storm.DaemonConfig) Config(org.apache.storm.Config) StormMetricsRegistry(org.apache.storm.metric.StormMetricsRegistry) Cluster(org.apache.storm.scheduler.Cluster) INimbus(org.apache.storm.scheduler.INimbus) TopologyDetails(org.apache.storm.scheduler.TopologyDetails) TestUtilsForResourceAwareScheduler(org.apache.storm.scheduler.resource.TestUtilsForResourceAwareScheduler) ResourceMetrics(org.apache.storm.scheduler.resource.normalization.ResourceMetrics) SchedulerAssignment(org.apache.storm.scheduler.SchedulerAssignment) WorkerSlot(org.apache.storm.scheduler.WorkerSlot) Topologies(org.apache.storm.scheduler.Topologies) SupervisorDetails(org.apache.storm.scheduler.SupervisorDetails) HashSet(java.util.HashSet) Test(org.junit.jupiter.api.Test) PerformanceTest(org.apache.storm.testing.PerformanceTest)

Example 38 with ExecutorDetails

use of org.apache.storm.scheduler.ExecutorDetails in project storm by apache.

the class TestUtilsForResourceAwareScheduler method getSupervisorToMemoryUsage.

public static Map<SupervisorDetails, Double> getSupervisorToMemoryUsage(ISchedulingState cluster, Topologies topologies) {
    Map<SupervisorDetails, Double> superToMem = new HashMap<>();
    Collection<SchedulerAssignment> assignments = cluster.getAssignments().values();
    Collection<SupervisorDetails> supervisors = cluster.getSupervisors().values();
    for (SupervisorDetails supervisor : supervisors) {
        superToMem.put(supervisor, 0.0);
    }
    for (SchedulerAssignment assignment : assignments) {
        Map<ExecutorDetails, SupervisorDetails> executorToSupervisor = new HashMap<>();
        Map<SupervisorDetails, List<ExecutorDetails>> supervisorToExecutors = new HashMap<>();
        TopologyDetails topology = topologies.getById(assignment.getTopologyId());
        for (Map.Entry<ExecutorDetails, WorkerSlot> entry : assignment.getExecutorToSlot().entrySet()) {
            executorToSupervisor.put(entry.getKey(), cluster.getSupervisorById(entry.getValue().getNodeId()));
        }
        for (Map.Entry<ExecutorDetails, SupervisorDetails> entry : executorToSupervisor.entrySet()) {
            List<ExecutorDetails> executorsOnSupervisor = supervisorToExecutors.get(entry.getValue());
            if (executorsOnSupervisor == null) {
                executorsOnSupervisor = new ArrayList<>();
                supervisorToExecutors.put(entry.getValue(), executorsOnSupervisor);
            }
            executorsOnSupervisor.add(entry.getKey());
        }
        for (Map.Entry<SupervisorDetails, List<ExecutorDetails>> entry : supervisorToExecutors.entrySet()) {
            Double supervisorUsedMemory = 0.0;
            for (ExecutorDetails executor : entry.getValue()) {
                supervisorUsedMemory += topology.getTotalMemReqTask(executor);
            }
            superToMem.put(entry.getKey(), superToMem.get(entry.getKey()) + supervisorUsedMemory);
        }
    }
    return superToMem;
}
Also used : ExecutorDetails(org.apache.storm.scheduler.ExecutorDetails) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) TopologyDetails(org.apache.storm.scheduler.TopologyDetails) SchedulerAssignment(org.apache.storm.scheduler.SchedulerAssignment) WorkerSlot(org.apache.storm.scheduler.WorkerSlot) List(java.util.List) ArrayList(java.util.ArrayList) LinkedList(java.util.LinkedList) SupervisorDetails(org.apache.storm.scheduler.SupervisorDetails) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap)

Example 39 with ExecutorDetails

use of org.apache.storm.scheduler.ExecutorDetails in project storm by apache.

the class TestUtilsForResourceAwareScheduler method getSupervisorToCpuUsage.

public static Map<SupervisorDetails, Double> getSupervisorToCpuUsage(ISchedulingState cluster, Topologies topologies) {
    Map<SupervisorDetails, Double> superToCpu = new HashMap<>();
    Collection<SchedulerAssignment> assignments = cluster.getAssignments().values();
    Collection<SupervisorDetails> supervisors = cluster.getSupervisors().values();
    for (SupervisorDetails supervisor : supervisors) {
        superToCpu.put(supervisor, 0.0);
    }
    for (SchedulerAssignment assignment : assignments) {
        Map<ExecutorDetails, SupervisorDetails> executorToSupervisor = new HashMap<>();
        Map<SupervisorDetails, List<ExecutorDetails>> supervisorToExecutors = new HashMap<>();
        TopologyDetails topology = topologies.getById(assignment.getTopologyId());
        for (Map.Entry<ExecutorDetails, WorkerSlot> entry : assignment.getExecutorToSlot().entrySet()) {
            executorToSupervisor.put(entry.getKey(), cluster.getSupervisorById(entry.getValue().getNodeId()));
        }
        for (Map.Entry<ExecutorDetails, SupervisorDetails> entry : executorToSupervisor.entrySet()) {
            List<ExecutorDetails> executorsOnSupervisor = supervisorToExecutors.get(entry.getValue());
            if (executorsOnSupervisor == null) {
                executorsOnSupervisor = new ArrayList<>();
                supervisorToExecutors.put(entry.getValue(), executorsOnSupervisor);
            }
            executorsOnSupervisor.add(entry.getKey());
        }
        for (Map.Entry<SupervisorDetails, List<ExecutorDetails>> entry : supervisorToExecutors.entrySet()) {
            Double supervisorUsedCpu = 0.0;
            for (ExecutorDetails executor : entry.getValue()) {
                supervisorUsedCpu += topology.getTotalCpuReqTask(executor);
            }
            superToCpu.put(entry.getKey(), superToCpu.get(entry.getKey()) + supervisorUsedCpu);
        }
    }
    return superToCpu;
}
Also used : ExecutorDetails(org.apache.storm.scheduler.ExecutorDetails) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) TopologyDetails(org.apache.storm.scheduler.TopologyDetails) SchedulerAssignment(org.apache.storm.scheduler.SchedulerAssignment) WorkerSlot(org.apache.storm.scheduler.WorkerSlot) List(java.util.List) ArrayList(java.util.ArrayList) LinkedList(java.util.LinkedList) SupervisorDetails(org.apache.storm.scheduler.SupervisorDetails) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap)

Example 40 with ExecutorDetails

use of org.apache.storm.scheduler.ExecutorDetails in project storm by apache.

the class ResourceUtils method printScheduling.

/**
     * print scheduling for debug purposes
     * @param cluster
     * @param topologies
     */
public static String printScheduling(Cluster cluster, Topologies topologies) {
    StringBuilder str = new StringBuilder();
    Map<String, Map<String, Map<WorkerSlot, Collection<ExecutorDetails>>>> schedulingMap = new HashMap<String, Map<String, Map<WorkerSlot, Collection<ExecutorDetails>>>>();
    for (TopologyDetails topo : topologies.getTopologies()) {
        if (cluster.getAssignmentById(topo.getId()) != null) {
            for (Map.Entry<ExecutorDetails, WorkerSlot> entry : cluster.getAssignmentById(topo.getId()).getExecutorToSlot().entrySet()) {
                WorkerSlot slot = entry.getValue();
                String nodeId = slot.getNodeId();
                ExecutorDetails exec = entry.getKey();
                if (!schedulingMap.containsKey(nodeId)) {
                    schedulingMap.put(nodeId, new HashMap<String, Map<WorkerSlot, Collection<ExecutorDetails>>>());
                }
                if (schedulingMap.get(nodeId).containsKey(topo.getId()) == false) {
                    schedulingMap.get(nodeId).put(topo.getId(), new HashMap<WorkerSlot, Collection<ExecutorDetails>>());
                }
                if (schedulingMap.get(nodeId).get(topo.getId()).containsKey(slot) == false) {
                    schedulingMap.get(nodeId).get(topo.getId()).put(slot, new LinkedList<ExecutorDetails>());
                }
                schedulingMap.get(nodeId).get(topo.getId()).get(slot).add(exec);
            }
        }
    }
    for (Map.Entry<String, Map<String, Map<WorkerSlot, Collection<ExecutorDetails>>>> entry : schedulingMap.entrySet()) {
        if (cluster.getSupervisorById(entry.getKey()) != null) {
            str.append("/** Node: " + cluster.getSupervisorById(entry.getKey()).getHost() + "-" + entry.getKey() + " **/\n");
        } else {
            str.append("/** Node: Unknown may be dead -" + entry.getKey() + " **/\n");
        }
        for (Map.Entry<String, Map<WorkerSlot, Collection<ExecutorDetails>>> topo_sched : schedulingMap.get(entry.getKey()).entrySet()) {
            str.append("\t-->Topology: " + topo_sched.getKey() + "\n");
            for (Map.Entry<WorkerSlot, Collection<ExecutorDetails>> ws : topo_sched.getValue().entrySet()) {
                str.append("\t\t->Slot [" + ws.getKey().getPort() + "] -> " + ws.getValue() + "\n");
            }
        }
    }
    return str.toString();
}
Also used : ExecutorDetails(org.apache.storm.scheduler.ExecutorDetails) HashMap(java.util.HashMap) TopologyDetails(org.apache.storm.scheduler.TopologyDetails) WorkerSlot(org.apache.storm.scheduler.WorkerSlot) Collection(java.util.Collection) HashMap(java.util.HashMap) Map(java.util.Map)

Aggregations

ExecutorDetails (org.apache.storm.scheduler.ExecutorDetails)72 HashMap (java.util.HashMap)50 TopologyDetails (org.apache.storm.scheduler.TopologyDetails)42 WorkerSlot (org.apache.storm.scheduler.WorkerSlot)41 SchedulerAssignment (org.apache.storm.scheduler.SchedulerAssignment)36 ArrayList (java.util.ArrayList)35 Map (java.util.Map)34 Cluster (org.apache.storm.scheduler.Cluster)31 Config (org.apache.storm.Config)29 HashSet (java.util.HashSet)28 List (java.util.List)28 SupervisorDetails (org.apache.storm.scheduler.SupervisorDetails)28 Topologies (org.apache.storm.scheduler.Topologies)23 LinkedList (java.util.LinkedList)21 INimbus (org.apache.storm.scheduler.INimbus)21 Collection (java.util.Collection)20 StormMetricsRegistry (org.apache.storm.metric.StormMetricsRegistry)19 StormTopology (org.apache.storm.generated.StormTopology)18 TestUtilsForResourceAwareScheduler (org.apache.storm.scheduler.resource.TestUtilsForResourceAwareScheduler)18 ResourceMetrics (org.apache.storm.scheduler.resource.normalization.ResourceMetrics)18