use of org.apache.storm.generated.NodeInfo in project storm by apache.
the class Nimbus method getTopologyInfoWithOptsImpl.
private TopologyInfo getTopologyInfoWithOptsImpl(String topoId, GetInfoOptions options) throws NotAliveException, AuthorizationException, InvalidTopologyException, Exception {
CommonTopoInfo common = getCommonTopoInfo(topoId, "getTopologyInfo");
if (common.base == null) {
throw new WrappedNotAliveException(topoId);
}
IStormClusterState state = stormClusterState;
NumErrorsChoice numErrChoice = Utils.OR(options.get_num_err_choice(), NumErrorsChoice.ALL);
Map<String, List<ErrorInfo>> errors = new HashMap<>();
for (String component : common.allComponents) {
switch(numErrChoice) {
case NONE:
errors.put(component, Collections.emptyList());
break;
case ONE:
List<ErrorInfo> errList = new ArrayList<>();
ErrorInfo info = state.lastError(topoId, component);
if (info != null) {
errList.add(info);
}
errors.put(component, errList);
break;
case ALL:
errors.put(component, state.errors(topoId, component));
break;
default:
LOG.warn("Got invalid NumErrorsChoice '{}'", numErrChoice);
errors.put(component, state.errors(topoId, component));
break;
}
}
List<ExecutorSummary> summaries = new ArrayList<>();
if (common.assignment != null) {
for (Entry<List<Long>, NodeInfo> entry : common.assignment.get_executor_node_port().entrySet()) {
NodeInfo ni = entry.getValue();
ExecutorInfo execInfo = toExecInfo(entry.getKey());
Map<String, String> nodeToHost = common.assignment.get_node_host();
Map<String, Object> heartbeat = common.beats.get(ClientStatsUtil.convertExecutor(entry.getKey()));
if (heartbeat == null) {
heartbeat = Collections.emptyMap();
}
ExecutorSummary summ = new ExecutorSummary(execInfo, common.taskToComponent.get(execInfo.get_task_start()), nodeToHost.get(ni.get_node()), ni.get_port_iterator().next().intValue(), (Integer) heartbeat.getOrDefault("uptime", 0));
// heartbeats "stats"
Map ex = (Map) heartbeat.get("stats");
if (ex != null) {
ExecutorStats stats = StatsUtil.thriftifyExecutorStats(ex);
summ.set_stats(stats);
}
summaries.add(summ);
}
}
TopologyInfo topoInfo = new TopologyInfo(topoId, common.topoName, Time.deltaSecs(common.launchTimeSecs), summaries, extractStatusStr(common.base), errors);
if (common.topology.is_set_storm_version()) {
topoInfo.set_storm_version(common.topology.get_storm_version());
}
if (common.base.is_set_owner()) {
topoInfo.set_owner(common.base.get_owner());
}
String schedStatus = idToSchedStatus.get().get(topoId);
if (schedStatus != null) {
topoInfo.set_sched_status(schedStatus);
}
TopologyResources resources = getResourcesForTopology(topoId, common.base);
if (resources != null && underlyingScheduler instanceof ResourceAwareScheduler) {
topoInfo.set_requested_memonheap(resources.getRequestedMemOnHeap());
topoInfo.set_requested_memoffheap(resources.getRequestedMemOffHeap());
topoInfo.set_requested_cpu(resources.getRequestedCpu());
topoInfo.set_assigned_memonheap(resources.getAssignedMemOnHeap());
topoInfo.set_assigned_memoffheap(resources.getAssignedMemOffHeap());
topoInfo.set_assigned_cpu(resources.getAssignedCpu());
}
if (common.base.is_set_component_debug()) {
topoInfo.set_component_debug(common.base.get_component_debug());
}
topoInfo.set_replication_count(getBlobReplicationCount(ConfigUtils.masterStormCodeKey(topoId)));
return topoInfo;
}
use of org.apache.storm.generated.NodeInfo in project storm by apache.
the class Nimbus method computeTopologyToSchedulerAssignment.
/**
* Convert assignment information in zk to SchedulerAssignment, so it can be used by scheduler api.
*
* @param existingAssignments current assignments
* @param topologyToAliveExecutors executors that are alive
* @return topo ID to schedulerAssignment
*/
private Map<String, SchedulerAssignmentImpl> computeTopologyToSchedulerAssignment(Map<String, Assignment> existingAssignments, Map<String, Set<List<Integer>>> topologyToAliveExecutors) {
Map<String, SchedulerAssignmentImpl> ret = new HashMap<>();
for (Entry<String, Assignment> entry : existingAssignments.entrySet()) {
String topoId = entry.getKey();
Assignment assignment = entry.getValue();
Set<List<Integer>> aliveExecutors = topologyToAliveExecutors.get(topoId);
Map<List<Long>, NodeInfo> execToNodePort = assignment.get_executor_node_port();
Map<NodeInfo, WorkerResources> workerToResources = assignment.get_worker_resources();
Map<NodeInfo, WorkerSlot> nodePortToSlot = new HashMap<>();
Map<WorkerSlot, WorkerResources> slotToResources = new HashMap<>();
for (Entry<NodeInfo, WorkerResources> nodeAndResources : workerToResources.entrySet()) {
NodeInfo info = nodeAndResources.getKey();
WorkerResources resources = nodeAndResources.getValue();
WorkerSlot slot = new WorkerSlot(info.get_node(), info.get_port_iterator().next());
nodePortToSlot.put(info, slot);
slotToResources.put(slot, resources);
}
Map<ExecutorDetails, WorkerSlot> execToSlot = new HashMap<>();
for (Entry<List<Long>, NodeInfo> execAndNodePort : execToNodePort.entrySet()) {
List<Integer> exec = asIntExec(execAndNodePort.getKey());
NodeInfo info = execAndNodePort.getValue();
if (aliveExecutors.contains(exec)) {
execToSlot.put(new ExecutorDetails(exec.get(0), exec.get(1)), nodePortToSlot.get(info));
}
}
ret.put(topoId, new SchedulerAssignmentImpl(topoId, execToSlot, slotToResources, null));
}
return ret;
}
use of org.apache.storm.generated.NodeInfo in project storm by apache.
the class Nimbus method getWorkerResourcesForTopology.
private Map<WorkerSlot, WorkerResources> getWorkerResourcesForTopology(String topoId) {
Map<WorkerSlot, WorkerResources> ret = idToWorkerResources.get().get(topoId);
if (ret == null) {
IStormClusterState state = stormClusterState;
ret = new HashMap<>();
Assignment assignment = state.assignmentInfo(topoId, null);
if (assignment != null && assignment.is_set_worker_resources()) {
for (Entry<NodeInfo, WorkerResources> entry : assignment.get_worker_resources().entrySet()) {
NodeInfo ni = entry.getKey();
WorkerSlot slot = new WorkerSlot(ni.get_node(), ni.get_port_iterator().next());
ret.put(slot, entry.getValue());
}
idToWorkerResources.getAndUpdate(new Assoc<>(topoId, ret));
}
}
return ret;
}
use of org.apache.storm.generated.NodeInfo in project storm by apache.
the class Nimbus method getComponentPageInfo.
@Override
public ComponentPageInfo getComponentPageInfo(String topoId, String componentId, String window, boolean includeSys) throws NotAliveException, AuthorizationException, TException {
try {
getComponentPageInfoCalls.mark();
CommonTopoInfo info = getCommonTopoInfo(topoId, "getComponentPageInfo");
if (info.base == null) {
throw new WrappedNotAliveException(topoId);
}
StormTopology topology = info.topology;
Map<String, Object> topoConf = info.topoConf;
topoConf = Utils.merge(conf, topoConf);
Assignment assignment = info.assignment;
Map<List<Long>, List<Object>> exec2NodePort = new HashMap<>();
Map<String, String> nodeToHost;
Map<List<Long>, List<Object>> exec2HostPort = new HashMap<>();
if (assignment != null) {
Map<List<Long>, NodeInfo> execToNodeInfo = assignment.get_executor_node_port();
nodeToHost = assignment.get_node_host();
for (Entry<List<Long>, NodeInfo> entry : execToNodeInfo.entrySet()) {
NodeInfo ni = entry.getValue();
List<Object> nodePort = Arrays.asList(ni.get_node(), ni.get_port_iterator().next());
List<Object> hostPort = Arrays.asList(nodeToHost.get(ni.get_node()), ni.get_port_iterator().next());
exec2NodePort.put(entry.getKey(), nodePort);
exec2HostPort.put(entry.getKey(), hostPort);
}
} else {
nodeToHost = Collections.emptyMap();
}
ComponentPageInfo compPageInfo = StatsUtil.aggCompExecsStats(exec2HostPort, info.taskToComponent, info.beats, window, includeSys, topoId, topology, componentId);
if (compPageInfo.get_component_type() == ComponentType.SPOUT) {
NormalizedResourceRequest spoutResources = ResourceUtils.getSpoutResources(topology, topoConf, componentId);
if (spoutResources == null) {
spoutResources = new NormalizedResourceRequest(topoConf, componentId);
}
compPageInfo.set_resources_map(spoutResources.toNormalizedMap());
} else {
// bolt
NormalizedResourceRequest boltResources = ResourceUtils.getBoltResources(topology, topoConf, componentId);
if (boltResources == null) {
boltResources = new NormalizedResourceRequest(topoConf, componentId);
}
compPageInfo.set_resources_map(boltResources.toNormalizedMap());
}
compPageInfo.set_topology_name(info.topoName);
compPageInfo.set_errors(stormClusterState.errors(topoId, componentId));
compPageInfo.set_topology_status(extractStatusStr(info.base));
if (info.base.is_set_component_debug()) {
DebugOptions debug = info.base.get_component_debug().get(componentId);
if (debug != null) {
compPageInfo.set_debug_options(debug);
}
}
// Add the event logger details.
Map<String, List<Integer>> compToTasks = Utils.reverseMap(info.taskToComponent);
if (compToTasks.containsKey(StormCommon.EVENTLOGGER_COMPONENT_ID)) {
List<Integer> tasks = compToTasks.get(StormCommon.EVENTLOGGER_COMPONENT_ID);
tasks.sort(null);
// Find the task the events from this component route to.
int taskIndex = TupleUtils.chooseTaskIndex(Collections.singletonList(componentId), tasks.size());
int taskId = tasks.get(taskIndex);
String host = null;
Integer port = null;
for (Entry<List<Long>, List<Object>> entry : exec2HostPort.entrySet()) {
int start = entry.getKey().get(0).intValue();
int end = entry.getKey().get(1).intValue();
if (taskId >= start && taskId <= end) {
host = (String) entry.getValue().get(0);
port = ((Number) entry.getValue().get(1)).intValue();
break;
}
}
if (host != null && port != null) {
compPageInfo.set_eventlog_host(host);
compPageInfo.set_eventlog_port(port);
}
}
return compPageInfo;
} catch (Exception e) {
LOG.warn("getComponentPageInfo exception. (topo id='{}')", topoId, e);
if (e instanceof TException) {
throw (TException) e;
}
throw new RuntimeException(e);
}
}
use of org.apache.storm.generated.NodeInfo in project storm by apache.
the class Nimbus method computeSupervisorToDeadPorts.
private Map<String, Set<Long>> computeSupervisorToDeadPorts(Map<String, Assignment> existingAssignments, Map<String, Set<List<Integer>>> topologyToExecutors, Map<String, Set<List<Integer>>> topologyToAliveExecutors) {
Map<String, Set<Long>> ret = new HashMap<>();
for (Entry<String, Assignment> entry : existingAssignments.entrySet()) {
String topoId = entry.getKey();
Assignment assignment = entry.getValue();
Set<List<Integer>> allExecutors = topologyToExecutors.get(topoId);
Set<List<Integer>> aliveExecutors = topologyToAliveExecutors.get(topoId);
Set<List<Integer>> deadExecutors = new HashSet<>(allExecutors);
deadExecutors.removeAll(aliveExecutors);
Map<List<Long>, NodeInfo> execToNodePort = assignment.get_executor_node_port();
for (Entry<List<Long>, NodeInfo> assigned : execToNodePort.entrySet()) {
if (deadExecutors.contains(asIntExec(assigned.getKey()))) {
NodeInfo info = assigned.getValue();
String superId = info.get_node();
Set<Long> ports = ret.get(superId);
if (ports == null) {
ports = new HashSet<>();
ret.put(superId, ports);
}
ports.addAll(info.get_port());
}
}
}
return ret;
}
Aggregations