use of org.apache.storm.scheduler.WorkerSlot in project storm by apache.
the class TestUtilsForResourceAwareScheduler method getSupervisorToCpuUsage.
public static Map<SupervisorDetails, Double> getSupervisorToCpuUsage(Cluster cluster, Topologies topologies) {
Map<SupervisorDetails, Double> superToCpu = new HashMap<>();
Collection<SchedulerAssignment> assignments = cluster.getAssignments().values();
Collection<SupervisorDetails> supervisors = cluster.getSupervisors().values();
for (SupervisorDetails supervisor : supervisors) {
superToCpu.put(supervisor, 0.0);
}
for (SchedulerAssignment assignment : assignments) {
Map<ExecutorDetails, SupervisorDetails> executorToSupervisor = new HashMap<>();
Map<SupervisorDetails, List<ExecutorDetails>> supervisorToExecutors = new HashMap<>();
TopologyDetails topology = topologies.getById(assignment.getTopologyId());
for (Map.Entry<ExecutorDetails, WorkerSlot> entry : assignment.getExecutorToSlot().entrySet()) {
executorToSupervisor.put(entry.getKey(), cluster.getSupervisorById(entry.getValue().getNodeId()));
}
for (Map.Entry<ExecutorDetails, SupervisorDetails> entry : executorToSupervisor.entrySet()) {
List<ExecutorDetails> executorsOnSupervisor = supervisorToExecutors.get(entry.getValue());
if (executorsOnSupervisor == null) {
executorsOnSupervisor = new ArrayList<>();
supervisorToExecutors.put(entry.getValue(), executorsOnSupervisor);
}
executorsOnSupervisor.add(entry.getKey());
}
for (Map.Entry<SupervisorDetails, List<ExecutorDetails>> entry : supervisorToExecutors.entrySet()) {
Double supervisorUsedCpu = 0.0;
for (ExecutorDetails executor : entry.getValue()) {
supervisorUsedCpu += topology.getTotalCpuReqTask(executor);
}
superToCpu.put(entry.getKey(), superToCpu.get(entry.getKey()) + supervisorUsedCpu);
}
}
return superToCpu;
}
use of org.apache.storm.scheduler.WorkerSlot in project storm by apache.
the class TestUtilsForResourceAwareScheduler method getSupervisorToMemoryUsage.
public static Map<SupervisorDetails, Double> getSupervisorToMemoryUsage(Cluster cluster, Topologies topologies) {
Map<SupervisorDetails, Double> superToMem = new HashMap<>();
Collection<SchedulerAssignment> assignments = cluster.getAssignments().values();
Collection<SupervisorDetails> supervisors = cluster.getSupervisors().values();
for (SupervisorDetails supervisor : supervisors) {
superToMem.put(supervisor, 0.0);
}
for (SchedulerAssignment assignment : assignments) {
Map<ExecutorDetails, SupervisorDetails> executorToSupervisor = new HashMap<>();
Map<SupervisorDetails, List<ExecutorDetails>> supervisorToExecutors = new HashMap<>();
TopologyDetails topology = topologies.getById(assignment.getTopologyId());
for (Map.Entry<ExecutorDetails, WorkerSlot> entry : assignment.getExecutorToSlot().entrySet()) {
executorToSupervisor.put(entry.getKey(), cluster.getSupervisorById(entry.getValue().getNodeId()));
}
for (Map.Entry<ExecutorDetails, SupervisorDetails> entry : executorToSupervisor.entrySet()) {
List<ExecutorDetails> executorsOnSupervisor = supervisorToExecutors.get(entry.getValue());
if (executorsOnSupervisor == null) {
executorsOnSupervisor = new ArrayList<>();
supervisorToExecutors.put(entry.getValue(), executorsOnSupervisor);
}
executorsOnSupervisor.add(entry.getKey());
}
for (Map.Entry<SupervisorDetails, List<ExecutorDetails>> entry : supervisorToExecutors.entrySet()) {
Double supervisorUsedMemory = 0.0;
for (ExecutorDetails executor : entry.getValue()) {
supervisorUsedMemory += topology.getTotalMemReqTask(executor);
}
superToMem.put(entry.getKey(), superToMem.get(entry.getKey()) + supervisorUsedMemory);
}
}
return superToMem;
}
use of org.apache.storm.scheduler.WorkerSlot in project storm by apache.
the class TestStatsUtil method makeTopoInfo.
public void makeTopoInfo() {
List<Object> hostPort = new ArrayList<Object>();
hostPort.add("node1");
hostPort.add(new Long(1));
exec2NodePort.put(makeExecutorId(1, 1), hostPort);
nodeHost.put("node1", "host1");
nodeHost.put("node2", "host2");
nodeHost.put("node3", "host3");
List<Integer> exec1 = new ArrayList<Integer>();
exec1.add(1);
exec1.add(1);
HashMap<String, Object> exec1Beat = new HashMap<String, Object>();
exec1Beat.put("uptime", 100);
// should not be returned since this executor is not part of the topology's assignment
List<Integer> exec2 = new ArrayList<Integer>();
exec2.add(2);
exec2.add(4);
HashMap<String, Object> exec2Beat = new HashMap<String, Object>();
exec2Beat.put("uptime", 200);
Map<String, Object> beat1 = new HashMap<String, Object>();
beat1.put("heartbeat", exec1Beat);
Map<String, Object> beat2 = new HashMap<String, Object>();
beat2.put("heartbeat", exec2Beat);
beats.put(exec1, beat1);
beats.put(exec2, beat2);
task2Component.put(1, "my-component");
task2Component.put(2, "__sys1");
task2Component.put(3, "__sys2");
task2Component.put(4, "__sys3");
task2Component.put(5, "__sys4");
task2Component.put(6, "__sys4");
task2Component.put(7, "my-component2");
WorkerResources ws1 = new WorkerResources();
ws1.set_mem_on_heap(1);
ws1.set_mem_off_heap(2);
ws1.set_cpu(3);
worker2Resources.put(new WorkerSlot("node1", 1), ws1);
WorkerResources ws2 = new WorkerResources();
ws2.set_mem_on_heap(4);
ws2.set_mem_off_heap(8);
ws2.set_cpu(12);
worker2Resources.put(new WorkerSlot("node2", 2), ws2);
WorkerResources ws3 = new WorkerResources();
ws3.set_mem_on_heap(16);
ws3.set_mem_off_heap(32);
ws3.set_cpu(48);
worker2Resources.put(new WorkerSlot("node3", 3), ws3);
}
use of org.apache.storm.scheduler.WorkerSlot in project storm by apache.
the class IsolationScheduler method hostAssignableSlots.
// returns list of list of slots, reverse sorted by number of slots
private LinkedList<HostAssignableSlots> hostAssignableSlots(Cluster cluster) {
List<WorkerSlot> assignableSlots = cluster.getAssignableSlots();
Map<String, List<WorkerSlot>> hostAssignableSlots = new HashMap<String, List<WorkerSlot>>();
for (WorkerSlot slot : assignableSlots) {
String host = cluster.getHost(slot.getNodeId());
List<WorkerSlot> slots = hostAssignableSlots.get(host);
if (slots == null) {
slots = new ArrayList<WorkerSlot>();
hostAssignableSlots.put(host, slots);
}
slots.add(slot);
}
List<HostAssignableSlots> sortHostAssignSlots = new ArrayList<HostAssignableSlots>();
for (Map.Entry<String, List<WorkerSlot>> entry : hostAssignableSlots.entrySet()) {
sortHostAssignSlots.add(new HostAssignableSlots(entry.getKey(), entry.getValue()));
}
Collections.sort(sortHostAssignSlots, new Comparator<HostAssignableSlots>() {
@Override
public int compare(HostAssignableSlots o1, HostAssignableSlots o2) {
return o2.getWorkerSlots().size() - o1.getWorkerSlots().size();
}
});
Collections.shuffle(sortHostAssignSlots);
return new LinkedList<HostAssignableSlots>(sortHostAssignSlots);
}
use of org.apache.storm.scheduler.WorkerSlot in project storm by apache.
the class IsolationScheduler method hostAssignments.
private Map<String, List<AssignmentInfo>> hostAssignments(Cluster cluster) {
Collection<SchedulerAssignment> assignmentValues = cluster.getAssignments().values();
Map<String, List<AssignmentInfo>> hostAssignments = new HashMap<String, List<AssignmentInfo>>();
for (SchedulerAssignment sa : assignmentValues) {
Map<WorkerSlot, List<ExecutorDetails>> slotExecutors = Utils.reverseMap(sa.getExecutorToSlot());
Set<Map.Entry<WorkerSlot, List<ExecutorDetails>>> entries = slotExecutors.entrySet();
for (Map.Entry<WorkerSlot, List<ExecutorDetails>> entry : entries) {
WorkerSlot slot = entry.getKey();
List<ExecutorDetails> executors = entry.getValue();
String host = cluster.getHost(slot.getNodeId());
AssignmentInfo ass = new AssignmentInfo(slot, sa.getTopologyId(), new HashSet<ExecutorDetails>(executors));
List<AssignmentInfo> executorList = hostAssignments.get(host);
if (executorList == null) {
executorList = new ArrayList<AssignmentInfo>();
hostAssignments.put(host, executorList);
}
executorList.add(ass);
}
}
return hostAssignments;
}
Aggregations