use of org.apache.storm.generated.SupervisorSummary in project storm by apache.
the class UIHelpers method getClusterSummary.
/**
* Converts thrift call result into map fit for UI/api.
* @param clusterSummary Obtained from Nimbus.
* @param user User Making request
* @param conf Storm Conf
* @return Cluster Summary for display on UI/monitoring purposes via API
*/
public static Map<String, Object> getClusterSummary(ClusterSummary clusterSummary, String user, Map<String, Object> conf) {
Map<String, Object> result = new HashMap();
if (MEMORIZED_VERSIONS.get() == null) {
// Races are okay this is just to avoid extra work for each page load.
NavigableMap<String, IVersionInfo> versionsMap = Utils.getAlternativeVersionsMap(conf);
List<Map<String, String>> versionList = new ArrayList<>();
for (Map.Entry<String, IVersionInfo> entry : versionsMap.entrySet()) {
Map<String, String> single = new HashMap<>(toJsonStruct(entry.getValue()));
single.put("versionMatch", entry.getKey());
versionList.add(single);
}
MEMORIZED_VERSIONS.set(versionList);
}
List<Map<String, String>> versions = MEMORIZED_VERSIONS.get();
if (!versions.isEmpty()) {
result.put("alternativeWorkerVersions", versions);
}
if (MEMORIZED_FULL_VERSION.get() == null) {
MEMORIZED_FULL_VERSION.set(toJsonStruct(VersionInfo.OUR_FULL_VERSION));
}
result.put("user", user);
result.put("stormVersion", VersionInfo.getVersion());
result.put("stormVersionInfo", MEMORIZED_FULL_VERSION.get());
List<SupervisorSummary> supervisorSummaries = clusterSummary.get_supervisors();
result.put("supervisors", supervisorSummaries.size());
result.put("topologies", clusterSummary.get_topologies_size());
int usedSlots = supervisorSummaries.stream().mapToInt(SupervisorSummary::get_num_used_workers).sum();
result.put("slotsUsed", usedSlots);
int totalSlots = supervisorSummaries.stream().mapToInt(SupervisorSummary::get_num_workers).sum();
result.put("slotsTotal", totalSlots);
result.put("slotsFree", totalSlots - usedSlots);
List<TopologySummary> topologySummaries = clusterSummary.get_topologies();
int totalTasks = topologySummaries.stream().mapToInt(TopologySummary::get_num_tasks).sum();
result.put("tasksTotal", totalTasks);
int totalExecutors = topologySummaries.stream().mapToInt(TopologySummary::get_num_executors).sum();
result.put("executorsTotal", totalExecutors);
double supervisorTotalMemory = supervisorSummaries.stream().mapToDouble(x -> x.get_total_resources().getOrDefault(Constants.COMMON_TOTAL_MEMORY_RESOURCE_NAME, x.get_total_resources().get(Config.SUPERVISOR_MEMORY_CAPACITY_MB))).sum();
result.put("totalMem", supervisorTotalMemory);
double supervisorTotalCpu = supervisorSummaries.stream().mapToDouble(x -> x.get_total_resources().getOrDefault(Constants.COMMON_CPU_RESOURCE_NAME, x.get_total_resources().get(Config.SUPERVISOR_CPU_CAPACITY))).sum();
result.put("totalCpu", supervisorTotalCpu);
double supervisorUsedMemory = supervisorSummaries.stream().mapToDouble(SupervisorSummary::get_used_mem).sum();
result.put("availMem", supervisorTotalMemory - supervisorUsedMemory);
double supervisorUsedCpu = supervisorSummaries.stream().mapToDouble(SupervisorSummary::get_used_cpu).sum();
result.put("availCpu", supervisorTotalCpu - supervisorUsedCpu);
result.put("fragmentedMem", supervisorSummaries.stream().mapToDouble(SupervisorSummary::get_fragmented_mem).sum());
result.put("fragmentedCpu", supervisorSummaries.stream().mapToDouble(SupervisorSummary::get_fragmented_cpu).sum());
result.put("schedulerDisplayResource", conf.get(DaemonConfig.SCHEDULER_DISPLAY_RESOURCE));
result.put("memAssignedPercentUtil", supervisorTotalMemory > 0 ? StatsUtil.floatStr((supervisorUsedMemory * 100.0) / supervisorTotalMemory) : "0.0");
result.put("cpuAssignedPercentUtil", supervisorTotalCpu > 0 ? StatsUtil.floatStr((supervisorUsedCpu * 100.0) / supervisorTotalCpu) : "0.0");
result.put("bugtracker-url", conf.get(DaemonConfig.UI_PROJECT_BUGTRACKER_URL));
result.put("central-log-url", conf.get(DaemonConfig.UI_CENTRAL_LOGGING_URL));
Map<String, Double> usedGenericResources = new HashMap<>();
Map<String, Double> totalGenericResources = new HashMap<>();
for (SupervisorSummary ss : supervisorSummaries) {
usedGenericResources = NormalizedResourceRequest.addResourceMap(usedGenericResources, ss.get_used_generic_resources());
totalGenericResources = NormalizedResourceRequest.addResourceMap(totalGenericResources, ss.get_total_resources());
}
Map<String, Double> availGenericResources = NormalizedResourceRequest.subtractResourceMap(totalGenericResources, usedGenericResources);
result.put("availGenerics", prettifyGenericResources(availGenericResources));
result.put("totalGenerics", prettifyGenericResources(totalGenericResources));
return result;
}
use of org.apache.storm.generated.SupervisorSummary in project storm by apache.
the class Nimbus method getClusterInfoImpl.
private ClusterSummary getClusterInfoImpl() throws Exception {
IStormClusterState state = stormClusterState;
Map<String, SupervisorInfo> infos = state.allSupervisorInfo();
List<SupervisorSummary> summaries = new ArrayList<>(infos.size());
for (Entry<String, SupervisorInfo> entry : infos.entrySet()) {
summaries.add(makeSupervisorSummary(entry.getKey(), entry.getValue()));
}
int uptime = this.uptime.upTime();
List<NimbusSummary> nimbuses = state.nimbuses();
// update the isLeader field for each nimbus summary
NimbusInfo leader = leaderElector.getLeader();
for (NimbusSummary nimbusSummary : nimbuses) {
nimbusSummary.set_uptime_secs(Time.deltaSecs(nimbusSummary.get_uptime_secs()));
// sometimes Leader election indicates the current nimbus is leader, but the host was recently restarted,
// and is currently not a leader.
boolean isLeader = leader.getHost().equals(nimbusSummary.get_host()) && leader.getPort() == nimbusSummary.get_port();
if (isLeader && this.nimbusHostPortInfo.getHost().equals(leader.getHost()) && !this.isLeader()) {
isLeader = false;
}
nimbusSummary.set_isLeader(isLeader);
}
List<TopologySummary> topologySummaries = getTopologySummariesImpl();
ClusterSummary ret = new ClusterSummary(summaries, topologySummaries, nimbuses);
return ret;
}
use of org.apache.storm.generated.SupervisorSummary in project storm by apache.
the class Nimbus method extractClusterMetrics.
private static List<DataPoint> extractClusterMetrics(ClusterSummary summ) {
List<DataPoint> ret = new ArrayList<>();
ret.add(new DataPoint("supervisors", summ.get_supervisors_size()));
ret.add(new DataPoint("topologies", summ.get_topologies_size()));
int totalSlots = 0;
int usedSlots = 0;
for (SupervisorSummary sup : summ.get_supervisors()) {
usedSlots += sup.get_num_used_workers();
totalSlots += sup.get_num_workers();
}
ret.add(new DataPoint("slotsTotal", totalSlots));
ret.add(new DataPoint("slotsUsed", usedSlots));
ret.add(new DataPoint("slotsFree", totalSlots - usedSlots));
int totalExecutors = 0;
int totalTasks = 0;
for (TopologySummary topo : summ.get_topologies()) {
totalExecutors += topo.get_num_executors();
totalTasks += topo.get_num_tasks();
}
ret.add(new DataPoint("executorsTotal", totalExecutors));
ret.add(new DataPoint("tasksTotal", totalTasks));
return ret;
}
use of org.apache.storm.generated.SupervisorSummary in project storm by apache.
the class Nimbus method getSupervisorPageInfo.
@Override
public SupervisorPageInfo getSupervisorPageInfo(String superId, String host, boolean includeSys) throws NotAliveException, AuthorizationException, TException {
try {
getSupervisorPageInfoCalls.mark();
IStormClusterState state = stormClusterState;
Map<String, SupervisorInfo> superInfos = state.allSupervisorInfo();
Map<String, List<String>> hostToSuperId = new HashMap<>();
for (Entry<String, SupervisorInfo> entry : superInfos.entrySet()) {
String h = entry.getValue().get_hostname();
List<String> superIds = hostToSuperId.get(h);
if (superIds == null) {
superIds = new ArrayList<>();
hostToSuperId.put(h, superIds);
}
superIds.add(entry.getKey());
}
List<String> supervisorIds = null;
if (superId == null) {
supervisorIds = hostToSuperId.get(host);
} else {
supervisorIds = Arrays.asList(superId);
}
SupervisorPageInfo pageInfo = new SupervisorPageInfo();
Map<String, Assignment> topoToAssignment = state.assignmentsInfo();
for (String sid : supervisorIds) {
SupervisorInfo info = superInfos.get(sid);
LOG.info("SIDL {} SI: {} ALL: {}", sid, info, superInfos);
SupervisorSummary supSum = makeSupervisorSummary(sid, info);
pageInfo.add_to_supervisor_summaries(supSum);
List<String> superTopologies = topologiesOnSupervisor(topoToAssignment, sid);
Set<String> userTopologies = filterAuthorized("getTopology", superTopologies);
for (String topoId : superTopologies) {
CommonTopoInfo common = getCommonTopoInfo(topoId, "getSupervisorPageInfo");
String topoName = common.topoName;
Assignment assignment = common.assignment;
Map<List<Integer>, Map<String, Object>> beats = common.beats;
Map<Integer, String> taskToComp = common.taskToComponent;
Map<List<Long>, List<Object>> exec2NodePort = new HashMap<>();
Map<String, String> nodeToHost;
if (assignment != null) {
Map<List<Long>, NodeInfo> execToNodeInfo = assignment.get_executor_node_port();
for (Entry<List<Long>, NodeInfo> entry : execToNodeInfo.entrySet()) {
NodeInfo ni = entry.getValue();
List<Object> nodePort = Arrays.asList(ni.get_node(), ni.get_port_iterator().next());
exec2NodePort.put(entry.getKey(), nodePort);
}
nodeToHost = assignment.get_node_host();
} else {
nodeToHost = Collections.emptyMap();
}
Map<WorkerSlot, WorkerResources> workerResources = getWorkerResourcesForTopology(topoId);
boolean isAllowed = userTopologies.contains(topoId);
String owner = (common.base == null) ? null : common.base.get_owner();
for (WorkerSummary workerSummary : StatsUtil.aggWorkerStats(topoId, topoName, taskToComp, beats, exec2NodePort, nodeToHost, workerResources, includeSys, isAllowed, sid, owner)) {
pageInfo.add_to_worker_summaries(workerSummary);
}
}
}
return pageInfo;
} catch (Exception e) {
LOG.warn("Get super page info exception. (super id='{}')", superId, e);
if (e instanceof TException) {
throw (TException) e;
}
throw new RuntimeException(e);
}
}
use of org.apache.storm.generated.SupervisorSummary in project storm by apache.
the class Nimbus method makeSupervisorSummary.
private SupervisorSummary makeSupervisorSummary(String supervisorId, SupervisorInfo info) {
Set<String> blacklistedSupervisorIds = Collections.emptySet();
if (scheduler instanceof BlacklistScheduler) {
BlacklistScheduler bs = (BlacklistScheduler) scheduler;
blacklistedSupervisorIds = bs.getBlacklistSupervisorIds();
}
LOG.debug("INFO: {} ID: {}", info, supervisorId);
int numPorts = 0;
if (info.is_set_meta()) {
numPorts = info.get_meta_size();
}
int numUsedPorts = 0;
if (info.is_set_used_ports()) {
numUsedPorts = info.get_used_ports_size();
}
LOG.debug("NUM PORTS: {}", numPorts);
SupervisorSummary ret = new SupervisorSummary(info.get_hostname(), (int) info.get_uptime_secs(), numPorts, numUsedPorts, supervisorId);
ret.set_total_resources(info.get_resources_map());
SupervisorResources resources = nodeIdToResources.get().get(supervisorId);
if (resources != null && underlyingScheduler instanceof ResourceAwareScheduler) {
ret.set_used_mem(resources.getUsedMem());
ret.set_used_cpu(resources.getUsedCpu());
ret.set_used_generic_resources(resources.getUsedGenericResources());
if (isFragmented(resources)) {
final double availableCpu = resources.getAvailableCpu();
if (availableCpu < 0) {
LOG.warn("Negative fragmented CPU on {}", supervisorId);
}
ret.set_fragmented_cpu(availableCpu);
final double availableMem = resources.getAvailableMem();
if (availableMem < 0) {
LOG.warn("Negative fragmented Mem on {}", supervisorId);
}
ret.set_fragmented_mem(availableMem);
}
}
if (info.is_set_version()) {
ret.set_version(info.get_version());
}
if (blacklistedSupervisorIds.contains(supervisorId)) {
ret.set_blacklisted(true);
} else {
ret.set_blacklisted(false);
}
return ret;
}
Aggregations