use of org.apache.storm.generated.WorkerResources in project storm by apache.
the class Nimbus method getTopologyPageInfo.
@Override
public TopologyPageInfo getTopologyPageInfo(String topoId, String window, boolean includeSys) throws NotAliveException, AuthorizationException, TException {
try {
getTopologyPageInfoCalls.mark();
CommonTopoInfo common = getCommonTopoInfo(topoId, "getTopologyPageInfo");
String topoName = common.topoName;
IStormClusterState state = stormClusterState;
int launchTimeSecs = common.launchTimeSecs;
Assignment assignment = common.assignment;
Map<List<Integer>, Map<String, Object>> beats = common.beats;
Map<Integer, String> taskToComp = common.taskToComponent;
StormTopology topology = common.topology;
Map<String, Object> topoConf = common.topoConf;
StormBase base = common.base;
if (base == null) {
throw new NotAliveException(topoId);
}
Map<WorkerSlot, WorkerResources> workerToResources = getWorkerResourcesForTopology(topoId);
List<WorkerSummary> workerSummaries = null;
Map<List<Long>, List<Object>> exec2NodePort = new HashMap<>();
if (assignment != null) {
Map<List<Long>, NodeInfo> execToNodeInfo = assignment.get_executor_node_port();
Map<String, String> nodeToHost = assignment.get_node_host();
for (Entry<List<Long>, NodeInfo> entry : execToNodeInfo.entrySet()) {
NodeInfo ni = entry.getValue();
List<Object> nodePort = Arrays.asList(ni.get_node(), ni.get_port_iterator().next());
exec2NodePort.put(entry.getKey(), nodePort);
}
workerSummaries = StatsUtil.aggWorkerStats(topoId, topoName, taskToComp, beats, exec2NodePort, nodeToHost, workerToResources, includeSys, //this is the topology page, so we know the user is authorized
true);
}
TopologyPageInfo topoPageInfo = StatsUtil.aggTopoExecsStats(topoId, exec2NodePort, taskToComp, beats, topology, window, includeSys, state);
Map<String, Map<String, Double>> spoutResources = ResourceUtils.getSpoutsResources(topology, topoConf);
for (Entry<String, ComponentAggregateStats> entry : topoPageInfo.get_id_to_spout_agg_stats().entrySet()) {
CommonAggregateStats commonStats = entry.getValue().get_common_stats();
commonStats.set_resources_map(setResourcesDefaultIfNotSet(spoutResources, entry.getKey(), topoConf));
}
Map<String, Map<String, Double>> boltResources = ResourceUtils.getBoltsResources(topology, topoConf);
for (Entry<String, ComponentAggregateStats> entry : topoPageInfo.get_id_to_bolt_agg_stats().entrySet()) {
CommonAggregateStats commonStats = entry.getValue().get_common_stats();
commonStats.set_resources_map(setResourcesDefaultIfNotSet(boltResources, entry.getKey(), topoConf));
}
if (workerSummaries != null) {
topoPageInfo.set_workers(workerSummaries);
}
if (base.is_set_owner()) {
topoPageInfo.set_owner(base.get_owner());
}
String schedStatus = idToSchedStatus.get().get(topoId);
if (schedStatus != null) {
topoPageInfo.set_sched_status(schedStatus);
}
TopologyResources resources = getResourcesForTopology(topoId, base);
if (resources != null) {
topoPageInfo.set_requested_memonheap(resources.getRequestedMemOnHeap());
topoPageInfo.set_requested_memoffheap(resources.getRequestedMemOffHeap());
topoPageInfo.set_requested_cpu(resources.getRequestedCpu());
topoPageInfo.set_assigned_memonheap(resources.getAssignedMemOnHeap());
topoPageInfo.set_assigned_memoffheap(resources.getAssignedMemOffHeap());
topoPageInfo.set_assigned_cpu(resources.getAssignedCpu());
}
topoPageInfo.set_name(topoName);
topoPageInfo.set_status(extractStatusStr(base));
topoPageInfo.set_uptime_secs(Time.deltaSecs(launchTimeSecs));
topoPageInfo.set_topology_conf(JSONValue.toJSONString(topoConf));
topoPageInfo.set_replication_count(getBlobReplicationCount(ConfigUtils.masterStormCodeKey(topoId)));
if (base.is_set_component_debug()) {
DebugOptions debug = base.get_component_debug().get(topoId);
if (debug != null) {
topoPageInfo.set_debug_options(debug);
}
}
return topoPageInfo;
} catch (Exception e) {
LOG.warn("Get topo page info exception. (topology id='{}')", topoId, e);
if (e instanceof TException) {
throw (TException) e;
}
throw new RuntimeException(e);
}
}
use of org.apache.storm.generated.WorkerResources in project storm by apache.
the class ReadClusterState method readMyExecutors.
protected Map<Integer, LocalAssignment> readMyExecutors(String stormId, String assignmentId, Assignment assignment) {
Map<Integer, LocalAssignment> portTasks = new HashMap<>();
Map<Long, WorkerResources> slotsResources = new HashMap<>();
Map<NodeInfo, WorkerResources> nodeInfoWorkerResourcesMap = assignment.get_worker_resources();
if (nodeInfoWorkerResourcesMap != null) {
for (Map.Entry<NodeInfo, WorkerResources> entry : nodeInfoWorkerResourcesMap.entrySet()) {
if (entry.getKey().get_node().equals(assignmentId)) {
Set<Long> ports = entry.getKey().get_port();
for (Long port : ports) {
slotsResources.put(port, entry.getValue());
}
}
}
}
Map<List<Long>, NodeInfo> executorNodePort = assignment.get_executor_node_port();
if (executorNodePort != null) {
for (Map.Entry<List<Long>, NodeInfo> entry : executorNodePort.entrySet()) {
if (entry.getValue().get_node().equals(assignmentId)) {
for (Long port : entry.getValue().get_port()) {
LocalAssignment localAssignment = portTasks.get(port.intValue());
if (localAssignment == null) {
List<ExecutorInfo> executors = new ArrayList<>();
localAssignment = new LocalAssignment(stormId, executors);
if (slotsResources.containsKey(port)) {
localAssignment.set_resources(slotsResources.get(port));
}
portTasks.put(port.intValue(), localAssignment);
}
List<ExecutorInfo> executorInfoList = localAssignment.get_executors();
executorInfoList.add(new ExecutorInfo(entry.getKey().get(0).intValue(), entry.getKey().get(entry.getKey().size() - 1).intValue()));
}
}
}
}
return portTasks;
}
use of org.apache.storm.generated.WorkerResources in project storm by apache.
the class StatsUtil method aggWorkerStats.
/**
* aggregate statistics per worker for a topology. Optionally filtering on specific supervisors
*
* @param topologyId topology id
* @param topology storm topology
* @param task2component a Map of {task id -> component}, note it's a clojure map
* @param beats a converted HashMap of executor heartbeats, {executor -> heartbeat}
* @param exec2hostPort a Map of {executor -> host+port}, note it's a clojure map
* @param includeSys whether to include system streams
* @param userAuthorized whether the user is authorized to view topology info
* @param filterSupervisor if not null, only return WorkerSummaries for that supervisor
*
* @return List<WorkerSummary> thrift structures
*/
public static List<WorkerSummary> aggWorkerStats(String stormId, String stormName, Map<Integer, String> task2Component, Map<List<Integer>, Map<String, Object>> beats, Map<List<Long>, List<Object>> exec2NodePort, Map<String, String> nodeHost, Map<WorkerSlot, WorkerResources> worker2Resources, boolean includeSys, boolean userAuthorized, String filterSupervisor) {
// host,port => WorkerSummary
HashMap<WorkerSlot, WorkerSummary> workerSummaryMap = new HashMap<>();
if (exec2NodePort != null) {
// for each executor -> node+port pair
for (Map.Entry<List<Long>, List<Object>> execNodePort : exec2NodePort.entrySet()) {
List<Object> nodePort = execNodePort.getValue();
String node = (String) nodePort.get(0);
Long port = (Long) nodePort.get(1);
String host = nodeHost.get(node);
WorkerSlot slot = new WorkerSlot(node, port);
WorkerResources resources = worker2Resources.get(slot);
if (filterSupervisor == null || node.equals(filterSupervisor)) {
WorkerSummary ws = workerSummaryMap.get(slot);
if (ws == null) {
ws = new WorkerSummary();
ws.set_host(host);
ws.set_port(port.intValue());
ws.set_supervisor_id(node);
ws.set_topology_id(stormId);
ws.set_topology_name(stormName);
ws.set_num_executors(0);
if (resources != null) {
ws.set_assigned_memonheap(resources.get_mem_on_heap());
ws.set_assigned_memoffheap(resources.get_mem_off_heap());
ws.set_assigned_cpu(resources.get_cpu());
} else {
ws.set_assigned_memonheap(0);
ws.set_assigned_memoffheap(0);
ws.set_assigned_cpu(0);
}
ws.set_component_to_num_tasks(new HashMap<String, Long>());
workerSummaryMap.put(slot, ws);
}
Map<String, Long> componentToNumTasks = ws.get_component_to_num_tasks();
// gets min/max task pairs (executors): [1 1] [2 3] ...
List<Long> exec = execNodePort.getKey();
// get executor heartbeat
int hbeatSecs = 0;
if (beats != null) {
Map<String, Object> beat = beats.get(convertExecutor(exec));
if (beat != null) {
Map<String, Object> hbeat = (Map<String, Object>) beat.get("heartbeat");
hbeatSecs = hbeat == null ? 0 : (int) hbeat.get("uptime");
}
}
ws.set_uptime_secs(hbeatSecs);
ws.set_num_executors(ws.get_num_executors() + 1);
// get tasks if the user is authorized for this topology
if (userAuthorized) {
int firstTask = exec.get(0).intValue();
int lastTask = exec.get(1).intValue();
// get per task components
for (int task = firstTask; task <= lastTask; task++) {
String component = task2Component.get(task);
// them in UI, keep going
if (!includeSys && Utils.isSystemId(component)) {
continue;
}
// good to go, increment # of tasks this component is being executed on
Long counter = componentToNumTasks.get(component);
if (counter == null) {
counter = new Long(0);
}
componentToNumTasks.put(component, counter + 1);
}
}
}
}
}
return new ArrayList<WorkerSummary>(workerSummaryMap.values());
}
use of org.apache.storm.generated.WorkerResources in project storm by apache.
the class Nimbus method getResourcesForTopology.
private TopologyResources getResourcesForTopology(String topoId, StormBase base) throws NotAliveException, AuthorizationException, InvalidTopologyException, IOException {
TopologyResources ret = idToResources.get().get(topoId);
if (ret == null) {
try {
IStormClusterState state = stormClusterState;
TopologyDetails details = readTopologyDetails(topoId, base);
double sumOnHeap = 0.0;
double sumOffHeap = 0.0;
double sumCPU = 0.0;
Assignment assignment = state.assignmentInfo(topoId, null);
if (assignment != null) {
if (assignment.is_set_worker_resources()) {
for (WorkerResources wr : assignment.get_worker_resources().values()) {
if (wr.is_set_cpu()) {
sumCPU += wr.get_cpu();
}
if (wr.is_set_mem_off_heap()) {
sumOffHeap += wr.get_mem_off_heap();
}
if (wr.is_set_mem_on_heap()) {
sumOnHeap += wr.get_mem_on_heap();
}
}
}
}
ret = new TopologyResources(details.getTotalRequestedMemOnHeap(), details.getTotalRequestedMemOffHeap(), details.getTotalRequestedCpu(), sumOnHeap, sumOffHeap, sumCPU);
} catch (KeyNotFoundException e) {
//This can happen when a topology is first coming up
// It's thrown by the blobstore code
LOG.error("Failed to get topology details", e);
ret = new TopologyResources(0.0, 0.0, 0.0, 0.0, 0.0, 0.0);
}
}
return ret;
}
use of org.apache.storm.generated.WorkerResources in project storm by apache.
the class BasicContainer method launch.
@Override
public void launch() throws IOException {
_type.assertFull();
LOG.info("Launching worker with assignment {} for this supervisor {} on port {} with id {}", _assignment, _supervisorId, _port, _workerId);
String logPrefix = "Worker Process " + _workerId;
ProcessExitCallback processExitCallback = new ProcessExitCallback(logPrefix);
_exitedEarly = false;
final WorkerResources resources = _assignment.get_resources();
final int memOnheap = getMemOnHeap(resources);
final String stormRoot = ConfigUtils.supervisorStormDistRoot(_conf, _topologyId);
final String jlp = javaLibraryPath(stormRoot, _conf);
List<String> commandList = mkLaunchCommand(memOnheap, stormRoot, jlp);
Map<String, String> topEnvironment = new HashMap<String, String>();
@SuppressWarnings("unchecked") Map<String, String> environment = (Map<String, String>) _topoConf.get(Config.TOPOLOGY_ENVIRONMENT);
if (environment != null) {
topEnvironment.putAll(environment);
}
topEnvironment.put("LD_LIBRARY_PATH", jlp);
if (_resourceIsolationManager != null) {
int memoffheap = (int) Math.ceil(resources.get_mem_off_heap());
int cpu = (int) Math.ceil(resources.get_cpu());
int cGroupMem = (int) (Math.ceil((double) _conf.get(Config.STORM_CGROUP_MEMORY_LIMIT_TOLERANCE_MARGIN_MB)));
int memoryValue = memoffheap + memOnheap + cGroupMem;
int cpuValue = cpu;
Map<String, Number> map = new HashMap<>();
map.put("cpu", cpuValue);
map.put("memory", memoryValue);
_resourceIsolationManager.reserveResourcesForWorker(_workerId, map);
}
LOG.info("Launching worker with command: {}. ", Utils.shellCmd(commandList));
String workerDir = ConfigUtils.workerRoot(_conf, _workerId);
launchWorkerProcess(commandList, topEnvironment, logPrefix, processExitCallback, new File(workerDir));
}
Aggregations