use of org.apache.storm.scheduler.TopologyDetails in project storm by apache.
the class RAS_Node method getMemoryUsedByWorker.
/**
* get the amount of memory used by a worker
*/
public double getMemoryUsedByWorker(WorkerSlot ws) {
TopologyDetails topo = findTopologyUsingWorker(ws);
if (topo == null) {
return 0.0;
}
Collection<ExecutorDetails> execs = getExecutors(ws, _cluster);
double totalMemoryUsed = 0.0;
for (ExecutorDetails exec : execs) {
totalMemoryUsed += topo.getTotalMemReqTask(exec);
}
return totalMemoryUsed;
}
use of org.apache.storm.scheduler.TopologyDetails in project storm by apache.
the class RAS_Node method free.
/**
* Frees a single slot in this node
* @param ws the slot to free
*/
public void free(WorkerSlot ws) {
LOG.info("freeing WorkerSlot {} on node {}", ws, _hostname);
if (!_slots.containsKey(ws.getId())) {
throw new IllegalArgumentException("Tried to free a slot " + ws + " that was not" + " part of this node " + _nodeId);
}
TopologyDetails topo = findTopologyUsingWorker(ws);
if (topo == null) {
throw new IllegalArgumentException("Tried to free a slot " + ws + " that was already free!");
}
double memUsed = getMemoryUsedByWorker(ws);
double cpuUsed = getCpuUsedByWorker(ws);
freeMemory(memUsed);
freeCPU(cpuUsed);
//free slot
_cluster.freeSlot(ws);
//cleanup internal assignments
_topIdToUsedSlots.get(topo.getId()).remove(ws.getId());
}
use of org.apache.storm.scheduler.TopologyDetails in project storm by apache.
the class RAS_Node method getCpuUsedByWorker.
/**
* get the amount of cpu used by a worker
*/
public double getCpuUsedByWorker(WorkerSlot ws) {
TopologyDetails topo = findTopologyUsingWorker(ws);
if (topo == null) {
return 0.0;
}
Collection<ExecutorDetails> execs = getExecutors(ws, _cluster);
double totalCpuUsed = 0.0;
for (ExecutorDetails exec : execs) {
totalCpuUsed += topo.getTotalCpuReqTask(exec);
}
return totalCpuUsed;
}
use of org.apache.storm.scheduler.TopologyDetails in project storm by apache.
the class Nimbus method getResourcesForTopology.
private TopologyResources getResourcesForTopology(String topoId, StormBase base) throws NotAliveException, AuthorizationException, InvalidTopologyException, IOException {
TopologyResources ret = idToResources.get().get(topoId);
if (ret == null) {
try {
IStormClusterState state = stormClusterState;
TopologyDetails details = readTopologyDetails(topoId, base);
double sumOnHeap = 0.0;
double sumOffHeap = 0.0;
double sumCPU = 0.0;
Assignment assignment = state.assignmentInfo(topoId, null);
if (assignment != null) {
if (assignment.is_set_worker_resources()) {
for (WorkerResources wr : assignment.get_worker_resources().values()) {
if (wr.is_set_cpu()) {
sumCPU += wr.get_cpu();
}
if (wr.is_set_mem_off_heap()) {
sumOffHeap += wr.get_mem_off_heap();
}
if (wr.is_set_mem_on_heap()) {
sumOnHeap += wr.get_mem_on_heap();
}
}
}
}
ret = new TopologyResources(details.getTotalRequestedMemOnHeap(), details.getTotalRequestedMemOffHeap(), details.getTotalRequestedCpu(), sumOnHeap, sumOffHeap, sumCPU);
} catch (KeyNotFoundException e) {
//This can happen when a topology is first coming up
// It's thrown by the blobstore code
LOG.error("Failed to get topology details", e);
ret = new TopologyResources(0.0, 0.0, 0.0, 0.0, 0.0, 0.0);
}
}
return ret;
}
use of org.apache.storm.scheduler.TopologyDetails in project storm by apache.
the class Nimbus method computeNewSchedulerAssignments.
private Map<String, SchedulerAssignment> computeNewSchedulerAssignments(Map<String, Assignment> existingAssignments, Topologies topologies, Map<String, StormBase> bases, String scratchTopologyId) throws KeyNotFoundException, AuthorizationException, InvalidTopologyException, IOException {
Map<String, Set<List<Integer>>> topoToExec = computeTopologyToExecutors(bases);
updateAllHeartbeats(existingAssignments, topoToExec);
Map<String, Set<List<Integer>>> topoToAliveExecutors = computeTopologyToAliveExecutors(existingAssignments, topologies, topoToExec, scratchTopologyId);
Map<String, Set<Long>> supervisorToDeadPorts = computeSupervisorToDeadPorts(existingAssignments, topoToExec, topoToAliveExecutors);
Map<String, SchedulerAssignmentImpl> topoToSchedAssignment = computeTopologyToSchedulerAssignment(existingAssignments, topoToAliveExecutors);
Set<String> missingAssignmentTopologies = new HashSet<>();
for (TopologyDetails topo : topologies.getTopologies()) {
String id = topo.getId();
Set<List<Integer>> allExecs = topoToExec.get(id);
Set<List<Integer>> aliveExecs = topoToAliveExecutors.get(id);
int numDesiredWorkers = topo.getNumWorkers();
int numAssignedWorkers = numUsedWorkers(topoToSchedAssignment.get(id));
if (allExecs == null || allExecs.isEmpty() || !allExecs.equals(aliveExecs) || numDesiredWorkers > numAssignedWorkers) {
//We have something to schedule...
missingAssignmentTopologies.add(id);
}
}
Map<String, SupervisorDetails> supervisors = readAllSupervisorDetails(supervisorToDeadPorts, topologies, missingAssignmentTopologies);
Cluster cluster = new Cluster(inimbus, supervisors, topoToSchedAssignment, conf);
cluster.setStatusMap(idToSchedStatus.get());
scheduler.schedule(topologies, cluster);
//merge with existing statuses
idToSchedStatus.set(merge(idToSchedStatus.get(), cluster.getStatusMap()));
nodeIdToResources.set(cluster.getSupervisorsResourcesMap());
if (!Utils.getBoolean(conf.get(Config.SCHEDULER_DISPLAY_RESOURCE), false)) {
cluster.updateAssignedMemoryForTopologyAndSupervisor(topologies);
}
// This is a hack for non-ras scheduler topology and worker resources
Map<String, TopologyResources> resources = new HashMap<>();
for (Entry<String, Double[]> uglyResources : cluster.getTopologyResourcesMap().entrySet()) {
Double[] r = uglyResources.getValue();
resources.put(uglyResources.getKey(), new TopologyResources(r[0], r[1], r[2], r[3], r[4], r[5]));
}
idToResources.getAndAccumulate(resources, (orig, update) -> merge(orig, update));
Map<String, Map<WorkerSlot, WorkerResources>> workerResources = new HashMap<>();
for (Entry<String, Map<WorkerSlot, Double[]>> uglyWorkerResources : cluster.getWorkerResourcesMap().entrySet()) {
Map<WorkerSlot, WorkerResources> slotToResources = new HashMap<>();
for (Entry<WorkerSlot, Double[]> uglySlotToResources : uglyWorkerResources.getValue().entrySet()) {
Double[] r = uglySlotToResources.getValue();
WorkerResources wr = new WorkerResources();
wr.set_mem_on_heap(r[0]);
wr.set_mem_off_heap(r[1]);
wr.set_cpu(r[2]);
slotToResources.put(uglySlotToResources.getKey(), wr);
}
workerResources.put(uglyWorkerResources.getKey(), slotToResources);
}
idToWorkerResources.getAndAccumulate(workerResources, (orig, update) -> merge(orig, update));
return cluster.getAssignments();
}
Aggregations