use of org.apache.storm.generated.WorkerResources in project storm by apache.
the class Nimbus method computeNewSchedulerAssignments.
private Map<String, SchedulerAssignment> computeNewSchedulerAssignments(Map<String, Assignment> existingAssignments, Topologies topologies, Map<String, StormBase> bases, String scratchTopologyId) throws KeyNotFoundException, AuthorizationException, InvalidTopologyException, IOException {
Map<String, Set<List<Integer>>> topoToExec = computeTopologyToExecutors(bases);
updateAllHeartbeats(existingAssignments, topoToExec);
Map<String, Set<List<Integer>>> topoToAliveExecutors = computeTopologyToAliveExecutors(existingAssignments, topologies, topoToExec, scratchTopologyId);
Map<String, Set<Long>> supervisorToDeadPorts = computeSupervisorToDeadPorts(existingAssignments, topoToExec, topoToAliveExecutors);
Map<String, SchedulerAssignmentImpl> topoToSchedAssignment = computeTopologyToSchedulerAssignment(existingAssignments, topoToAliveExecutors);
Set<String> missingAssignmentTopologies = new HashSet<>();
for (TopologyDetails topo : topologies.getTopologies()) {
String id = topo.getId();
Set<List<Integer>> allExecs = topoToExec.get(id);
Set<List<Integer>> aliveExecs = topoToAliveExecutors.get(id);
int numDesiredWorkers = topo.getNumWorkers();
int numAssignedWorkers = numUsedWorkers(topoToSchedAssignment.get(id));
if (allExecs == null || allExecs.isEmpty() || !allExecs.equals(aliveExecs) || numDesiredWorkers > numAssignedWorkers) {
//We have something to schedule...
missingAssignmentTopologies.add(id);
}
}
Map<String, SupervisorDetails> supervisors = readAllSupervisorDetails(supervisorToDeadPorts, topologies, missingAssignmentTopologies);
Cluster cluster = new Cluster(inimbus, supervisors, topoToSchedAssignment, conf);
cluster.setStatusMap(idToSchedStatus.get());
scheduler.schedule(topologies, cluster);
//merge with existing statuses
idToSchedStatus.set(merge(idToSchedStatus.get(), cluster.getStatusMap()));
nodeIdToResources.set(cluster.getSupervisorsResourcesMap());
if (!Utils.getBoolean(conf.get(Config.SCHEDULER_DISPLAY_RESOURCE), false)) {
cluster.updateAssignedMemoryForTopologyAndSupervisor(topologies);
}
// This is a hack for non-ras scheduler topology and worker resources
Map<String, TopologyResources> resources = new HashMap<>();
for (Entry<String, Double[]> uglyResources : cluster.getTopologyResourcesMap().entrySet()) {
Double[] r = uglyResources.getValue();
resources.put(uglyResources.getKey(), new TopologyResources(r[0], r[1], r[2], r[3], r[4], r[5]));
}
idToResources.getAndAccumulate(resources, (orig, update) -> merge(orig, update));
Map<String, Map<WorkerSlot, WorkerResources>> workerResources = new HashMap<>();
for (Entry<String, Map<WorkerSlot, Double[]>> uglyWorkerResources : cluster.getWorkerResourcesMap().entrySet()) {
Map<WorkerSlot, WorkerResources> slotToResources = new HashMap<>();
for (Entry<WorkerSlot, Double[]> uglySlotToResources : uglyWorkerResources.getValue().entrySet()) {
Double[] r = uglySlotToResources.getValue();
WorkerResources wr = new WorkerResources();
wr.set_mem_on_heap(r[0]);
wr.set_mem_off_heap(r[1]);
wr.set_cpu(r[2]);
slotToResources.put(uglySlotToResources.getKey(), wr);
}
workerResources.put(uglyWorkerResources.getKey(), slotToResources);
}
idToWorkerResources.getAndAccumulate(workerResources, (orig, update) -> merge(orig, update));
return cluster.getAssignments();
}
Aggregations