use of com.alibaba.jstorm.task.TaskInfo in project jstorm by alibaba.
the class DoRebalanceTransitionCallback method setBoltInfo.
private int setBoltInfo(StormTopology oldTopology, StormTopology newTopology, int cnt, StormClusterState clusterState) throws Exception {
Map<String, Bolt> oldBolts = oldTopology.get_bolts();
Map<String, Bolt> bolts = newTopology.get_bolts();
for (Entry<String, Bolt> entry : oldBolts.entrySet()) {
String boltName = entry.getKey();
Bolt oldBolt = entry.getValue();
Bolt bolt = bolts.get(boltName);
if (oldBolt.get_common().get_parallelism_hint() > bolt.get_common().get_parallelism_hint()) {
int removedTaskNum = oldBolt.get_common().get_parallelism_hint() - bolt.get_common().get_parallelism_hint();
TreeSet<Integer> taskIds = new TreeSet<Integer>(clusterState.task_ids_by_componentId(topologyid, boltName));
Iterator<Integer> descendIterator = taskIds.descendingIterator();
while (--removedTaskNum >= 0) {
int taskId = descendIterator.next();
removeTask(topologyid, taskId, clusterState);
LOG.info("Remove bolt task, taskId=" + taskId + " for " + boltName);
}
} else if (oldBolt.get_common().get_parallelism_hint() == bolt.get_common().get_parallelism_hint()) {
continue;
} else {
int delta = bolt.get_common().get_parallelism_hint() - oldBolt.get_common().get_parallelism_hint();
Map<Integer, TaskInfo> taskInfoMap = new HashMap<Integer, TaskInfo>();
for (int i = 1; i <= delta; i++) {
cnt++;
TaskInfo taskInfo = new TaskInfo((String) entry.getKey(), "bolt");
taskInfoMap.put(cnt, taskInfo);
newTasks.add(cnt);
LOG.info("Setup new bolt task, taskId=" + cnt + " for " + boltName);
}
clusterState.add_task(topologyid, taskInfoMap);
}
}
return cnt;
}
use of com.alibaba.jstorm.task.TaskInfo in project jstorm by alibaba.
the class ServiceHandler method getTopologyInfo.
/**
* Get TopologyInfo, it contain all data of the topology running status
*
* @return TopologyInfo
*/
@Override
public TopologyInfo getTopologyInfo(String topologyId) throws NotAliveException, TException {
long start = System.nanoTime();
StormClusterState stormClusterState = data.getStormClusterState();
try {
// get topology's StormBase
StormBase base = stormClusterState.storm_base(topologyId, null);
if (base == null) {
throw new NotAliveException("No topology of " + topologyId);
}
Assignment assignment = stormClusterState.assignment_info(topologyId, null);
if (assignment == null) {
throw new NotAliveException("No topology of " + topologyId);
}
TopologyTaskHbInfo topologyTaskHbInfo = data.getTasksHeartbeat().get(topologyId);
Map<Integer, TaskHeartbeat> taskHbMap = null;
if (topologyTaskHbInfo != null)
taskHbMap = topologyTaskHbInfo.get_taskHbs();
Map<Integer, TaskInfo> taskInfoMap = Cluster.get_all_taskInfo(stormClusterState, topologyId);
Map<Integer, String> taskToComponent = Cluster.get_all_task_component(stormClusterState, topologyId, taskInfoMap);
Map<Integer, String> taskToType = Cluster.get_all_task_type(stormClusterState, topologyId, taskInfoMap);
String errorString;
if (Cluster.is_topology_exist_error(stormClusterState, topologyId)) {
errorString = "Y";
} else {
errorString = "";
}
TopologySummary topologySummary = new TopologySummary();
topologySummary.set_id(topologyId);
topologySummary.set_name(base.getStormName());
topologySummary.set_uptimeSecs(TimeUtils.time_delta(base.getLanchTimeSecs()));
topologySummary.set_status(base.getStatusString());
topologySummary.set_numTasks(NimbusUtils.getTopologyTaskNum(assignment));
topologySummary.set_numWorkers(assignment.getWorkers().size());
topologySummary.set_errorInfo(errorString);
Map<String, ComponentSummary> componentSummaryMap = new HashMap<String, ComponentSummary>();
HashMap<String, List<Integer>> componentToTasks = JStormUtils.reverse_map(taskToComponent);
for (Entry<String, List<Integer>> entry : componentToTasks.entrySet()) {
String name = entry.getKey();
List<Integer> taskIds = entry.getValue();
if (taskIds == null || taskIds.size() == 0) {
LOG.warn("No task of component " + name);
continue;
}
ComponentSummary componentSummary = new ComponentSummary();
componentSummaryMap.put(name, componentSummary);
componentSummary.set_name(name);
componentSummary.set_type(taskToType.get(taskIds.get(0)));
componentSummary.set_parallel(taskIds.size());
componentSummary.set_taskIds(taskIds);
}
Map<Integer, TaskSummary> taskSummaryMap = new TreeMap<Integer, TaskSummary>();
Map<Integer, List<TaskError>> taskErrors = Cluster.get_all_task_errors(stormClusterState, topologyId);
for (Integer taskId : taskInfoMap.keySet()) {
TaskSummary taskSummary = new TaskSummary();
taskSummaryMap.put(taskId, taskSummary);
taskSummary.set_taskId(taskId);
if (taskHbMap == null) {
taskSummary.set_status("Starting");
taskSummary.set_uptime(0);
} else {
TaskHeartbeat hb = taskHbMap.get(taskId);
if (hb == null) {
taskSummary.set_status("Starting");
taskSummary.set_uptime(0);
} else {
boolean isInactive = NimbusUtils.isTaskDead(data, topologyId, taskId);
if (isInactive)
taskSummary.set_status("INACTIVE");
else
taskSummary.set_status("ACTIVE");
taskSummary.set_uptime(hb.get_uptime());
}
}
if (StringUtils.isBlank(errorString)) {
continue;
}
List<TaskError> taskErrorList = taskErrors.get(taskId);
if (taskErrorList != null && taskErrorList.size() != 0) {
for (TaskError taskError : taskErrorList) {
ErrorInfo errorInfo = new ErrorInfo(taskError.getError(), taskError.getTimSecs(), taskError.getLevel(), taskError.getCode());
taskSummary.add_to_errors(errorInfo);
String component = taskToComponent.get(taskId);
componentSummaryMap.get(component).add_to_errors(errorInfo);
}
}
}
for (ResourceWorkerSlot workerSlot : assignment.getWorkers()) {
String hostname = workerSlot.getHostname();
int port = workerSlot.getPort();
for (Integer taskId : workerSlot.getTasks()) {
TaskSummary taskSummary = taskSummaryMap.get(taskId);
taskSummary.set_host(hostname);
taskSummary.set_port(port);
}
}
TopologyInfo topologyInfo = new TopologyInfo();
topologyInfo.set_topology(topologySummary);
topologyInfo.set_components(JStormUtils.mk_list(componentSummaryMap.values()));
topologyInfo.set_tasks(JStormUtils.mk_list(taskSummaryMap.values()));
// return topology metric & component metric only
List<MetricInfo> tpMetricList = data.getMetricCache().getMetricData(topologyId, MetaType.TOPOLOGY);
List<MetricInfo> compMetricList = data.getMetricCache().getMetricData(topologyId, MetaType.COMPONENT);
List<MetricInfo> workerMetricList = data.getMetricCache().getMetricData(topologyId, MetaType.WORKER);
MetricInfo taskMetric = MetricUtils.mkMetricInfo();
MetricInfo streamMetric = MetricUtils.mkMetricInfo();
MetricInfo nettyMetric = MetricUtils.mkMetricInfo();
MetricInfo tpMetric, compMetric, workerMetric;
if (tpMetricList == null || tpMetricList.size() == 0) {
tpMetric = MetricUtils.mkMetricInfo();
} else {
// get the last min topology metric
tpMetric = tpMetricList.get(tpMetricList.size() - 1);
}
if (compMetricList == null || compMetricList.size() == 0) {
compMetric = MetricUtils.mkMetricInfo();
} else {
compMetric = compMetricList.get(0);
}
if (workerMetricList == null || workerMetricList.size() == 0) {
workerMetric = MetricUtils.mkMetricInfo();
} else {
workerMetric = workerMetricList.get(0);
}
TopologyMetric topologyMetrics = new TopologyMetric(tpMetric, compMetric, workerMetric, taskMetric, streamMetric, nettyMetric);
topologyInfo.set_metrics(topologyMetrics);
return topologyInfo;
} catch (TException e) {
LOG.info("Failed to get topologyInfo " + topologyId, e);
throw e;
} catch (Exception e) {
LOG.info("Failed to get topologyInfo " + topologyId, e);
throw new TException("Failed to get topologyInfo" + topologyId);
} finally {
long end = System.nanoTime();
SimpleJStormMetric.updateNimbusHistogram("getTopologyInfo", (end - start) / TimeUtils.NS_PER_US);
}
}
use of com.alibaba.jstorm.task.TaskInfo in project jstorm by alibaba.
the class ServiceHandler method setupZkTaskInfo.
/**
* generate TaskInfo for every bolt or spout in ZK /ZK/tasks/topoologyId/xxx
*
* @param conf
* @param topologyId
* @param stormClusterState
* @throws Exception
*/
public void setupZkTaskInfo(Map<Object, Object> conf, String topologyId, StormClusterState stormClusterState) throws Exception {
Map<Integer, TaskInfo> taskToTaskInfo = mkTaskComponentAssignments(conf, topologyId);
// mkdir /ZK/taskbeats/topoologyId
int masterId = NimbusUtils.getTopologyMasterId(taskToTaskInfo);
TopologyTaskHbInfo topoTaskHbinfo = new TopologyTaskHbInfo(topologyId, masterId);
data.getTasksHeartbeat().put(topologyId, topoTaskHbinfo);
stormClusterState.topology_heartbeat(topologyId, topoTaskHbinfo);
if (taskToTaskInfo == null || taskToTaskInfo.size() == 0) {
throw new InvalidTopologyException("Failed to generate TaskIDs map");
}
// key is taskid, value is taskinfo
stormClusterState.set_task(topologyId, taskToTaskInfo);
}
use of com.alibaba.jstorm.task.TaskInfo in project jstorm by alibaba.
the class DoRebalanceTransitionCallback method setSpoutInfo.
private int setSpoutInfo(StormTopology oldTopology, StormTopology newTopology, int cnt, StormClusterState clusterState) throws Exception {
Map<String, SpoutSpec> oldSpouts = oldTopology.get_spouts();
Map<String, SpoutSpec> spouts = newTopology.get_spouts();
for (Entry<String, SpoutSpec> entry : oldSpouts.entrySet()) {
String spoutName = entry.getKey();
SpoutSpec oldSpout = entry.getValue();
SpoutSpec spout = spouts.get(spoutName);
if (oldSpout.get_common().get_parallelism_hint() > spout.get_common().get_parallelism_hint()) {
int removedTaskNum = oldSpout.get_common().get_parallelism_hint() - spout.get_common().get_parallelism_hint();
TreeSet<Integer> taskIds = new TreeSet<Integer>(clusterState.task_ids_by_componentId(topologyid, spoutName));
Iterator<Integer> descendIterator = taskIds.descendingIterator();
while (--removedTaskNum >= 0) {
int taskId = descendIterator.next();
removeTask(topologyid, taskId, clusterState);
LOG.info("Remove spout task, taskId=" + taskId + " for " + spoutName);
}
} else if (oldSpout.get_common().get_parallelism_hint() == spout.get_common().get_parallelism_hint()) {
continue;
} else {
int delta = spout.get_common().get_parallelism_hint() - oldSpout.get_common().get_parallelism_hint();
Map<Integer, TaskInfo> taskInfoMap = new HashMap<Integer, TaskInfo>();
for (int i = 1; i <= delta; i++) {
cnt++;
TaskInfo taskInfo = new TaskInfo((String) entry.getKey(), "spout");
taskInfoMap.put(cnt, taskInfo);
newTasks.add(cnt);
LOG.info("Setup new spout task, taskId=" + cnt + " for " + spoutName);
}
clusterState.add_task(topologyid, taskInfoMap);
}
}
return cnt;
}
use of com.alibaba.jstorm.task.TaskInfo in project jstorm by alibaba.
the class StormZkClusterState method task_ids_by_componentId.
@Override
public Set<Integer> task_ids_by_componentId(String topologyId, String componentId) throws Exception {
String stormTaskPath = Cluster.storm_task_root(topologyId);
Object data = getObject(stormTaskPath, false);
if (data == null) {
return null;
}
Map<Integer, TaskInfo> taskInfoMap = (Map<Integer, TaskInfo>) data;
Set<Integer> rtn = new HashSet<Integer>();
Set<Integer> taskIds = taskInfoMap.keySet();
for (Integer taskId : taskIds) {
TaskInfo taskInfo = taskInfoMap.get(taskId);
if (taskInfo != null) {
if (taskInfo.getComponentId().equalsIgnoreCase(componentId))
rtn.add(taskId);
}
}
return rtn;
}
Aggregations