use of backtype.storm.generated.TopologyInfo in project jstorm by alibaba.
the class list method main.
public static void main(String[] args) {
NimbusClient client = null;
try {
Map conf = Utils.readStormConfig();
client = NimbusClient.getConfiguredClient(conf);
if (args.length > 0 && !StringUtils.isBlank(args[0])) {
String topologyName = args[0];
TopologyInfo info = client.getClient().getTopologyInfoByName(topologyName);
System.out.println("Successfully get topology info \n" + Utils.toPrettyJsonString(info));
} else {
ClusterSummary clusterSummary = client.getClient().getClusterInfo();
System.out.println("Successfully get cluster info \n" + Utils.toPrettyJsonString(clusterSummary));
}
} catch (Exception e) {
System.out.println(e.getMessage());
e.printStackTrace();
throw new RuntimeException(e);
} finally {
if (client != null) {
client.close();
}
}
}
use of backtype.storm.generated.TopologyInfo in project jstorm by alibaba.
the class ServiceHandler method getTopologyInfo.
/**
* Get TopologyInfo, it contain all topology running data
*
* @return TopologyInfo
*/
@Override
public TopologyInfo getTopologyInfo(String topologyId) throws TException {
long start = System.nanoTime();
StormClusterState stormClusterState = data.getStormClusterState();
try {
// get topology's StormBase
StormBase base = stormClusterState.storm_base(topologyId, null);
if (base == null) {
throw new NotAliveException("No topology of " + topologyId);
}
Assignment assignment = stormClusterState.assignment_info(topologyId, null);
if (assignment == null) {
throw new NotAliveException("No topology of " + topologyId);
}
TopologyTaskHbInfo topologyTaskHbInfo = data.getTasksHeartbeat().get(topologyId);
Map<Integer, TaskHeartbeat> taskHbMap = null;
if (topologyTaskHbInfo != null)
taskHbMap = topologyTaskHbInfo.get_taskHbs();
Map<Integer, TaskInfo> taskInfoMap = Cluster.get_all_taskInfo(stormClusterState, topologyId);
Map<Integer, String> taskToComponent = Cluster.get_all_task_component(stormClusterState, topologyId, taskInfoMap);
Map<Integer, String> taskToType = Cluster.get_all_task_type(stormClusterState, topologyId, taskInfoMap);
String errorString;
if (Cluster.is_topology_exist_error(stormClusterState, topologyId)) {
errorString = "Y";
} else {
errorString = "";
}
TopologySummary topologySummary = new TopologySummary();
topologySummary.set_id(topologyId);
topologySummary.set_name(base.getStormName());
topologySummary.set_uptimeSecs(TimeUtils.time_delta(base.getLanchTimeSecs()));
topologySummary.set_status(base.getStatusString());
topologySummary.set_numTasks(NimbusUtils.getTopologyTaskNum(assignment));
topologySummary.set_numWorkers(assignment.getWorkers().size());
topologySummary.set_errorInfo(errorString);
Map<String, ComponentSummary> componentSummaryMap = new HashMap<>();
HashMap<String, List<Integer>> componentToTasks = JStormUtils.reverse_map(taskToComponent);
for (Entry<String, List<Integer>> entry : componentToTasks.entrySet()) {
String name = entry.getKey();
List<Integer> taskIds = entry.getValue();
if (taskIds == null || taskIds.size() == 0) {
LOG.warn("No task of component " + name);
continue;
}
ComponentSummary componentSummary = new ComponentSummary();
componentSummaryMap.put(name, componentSummary);
componentSummary.set_name(name);
componentSummary.set_type(taskToType.get(taskIds.get(0)));
componentSummary.set_parallel(taskIds.size());
componentSummary.set_taskIds(taskIds);
}
Map<Integer, TaskSummary> taskSummaryMap = new TreeMap<>();
Map<Integer, List<TaskError>> taskErrors = Cluster.get_all_task_errors(stormClusterState, topologyId);
for (Integer taskId : taskInfoMap.keySet()) {
TaskSummary taskSummary = new TaskSummary();
taskSummaryMap.put(taskId, taskSummary);
taskSummary.set_taskId(taskId);
if (taskHbMap == null) {
taskSummary.set_status("Starting");
taskSummary.set_uptime(0);
} else {
TaskHeartbeat hb = taskHbMap.get(taskId);
if (hb == null) {
taskSummary.set_status("Starting");
taskSummary.set_uptime(0);
} else {
boolean isInactive = NimbusUtils.isTaskDead(data, topologyId, taskId);
if (isInactive)
taskSummary.set_status("INACTIVE");
else
taskSummary.set_status("ACTIVE");
taskSummary.set_uptime(hb.get_uptime());
}
}
if (StringUtils.isBlank(errorString)) {
continue;
}
List<TaskError> taskErrorList = taskErrors.get(taskId);
if (taskErrorList != null && taskErrorList.size() != 0) {
for (TaskError taskError : taskErrorList) {
ErrorInfo errorInfo = new ErrorInfo(taskError.getError(), taskError.getTimSecs(), taskError.getLevel(), taskError.getCode());
taskSummary.add_to_errors(errorInfo);
String component = taskToComponent.get(taskId);
componentSummaryMap.get(component).add_to_errors(errorInfo);
}
}
}
for (ResourceWorkerSlot workerSlot : assignment.getWorkers()) {
String hostname = workerSlot.getHostname();
int port = workerSlot.getPort();
for (Integer taskId : workerSlot.getTasks()) {
TaskSummary taskSummary = taskSummaryMap.get(taskId);
taskSummary.set_host(hostname);
taskSummary.set_port(port);
}
}
TopologyInfo topologyInfo = new TopologyInfo();
topologyInfo.set_topology(topologySummary);
topologyInfo.set_components(JStormUtils.mk_list(componentSummaryMap.values()));
topologyInfo.set_tasks(JStormUtils.mk_list(taskSummaryMap.values()));
// return topology metric & component metric only
List<MetricInfo> tpMetricList = data.getMetricCache().getMetricData(topologyId, MetaType.TOPOLOGY);
List<MetricInfo> compMetricList = data.getMetricCache().getMetricData(topologyId, MetaType.COMPONENT);
List<MetricInfo> workerMetricList = data.getMetricCache().getMetricData(topologyId, MetaType.WORKER);
List<MetricInfo> compStreamMetricList = data.getMetricCache().getMetricData(topologyId, MetaType.COMPONENT_STREAM);
MetricInfo taskMetric = MetricUtils.mkMetricInfo();
MetricInfo streamMetric = MetricUtils.mkMetricInfo();
MetricInfo nettyMetric = MetricUtils.mkMetricInfo();
MetricInfo tpMetric, compMetric, compStreamMetric, workerMetric;
if (tpMetricList == null || tpMetricList.size() == 0) {
tpMetric = MetricUtils.mkMetricInfo();
} else {
// get the last min topology metric
tpMetric = tpMetricList.get(tpMetricList.size() - 1);
}
if (compMetricList == null || compMetricList.size() == 0) {
compMetric = MetricUtils.mkMetricInfo();
} else {
compMetric = compMetricList.get(0);
}
if (compStreamMetricList == null || compStreamMetricList.size() == 0) {
compStreamMetric = MetricUtils.mkMetricInfo();
} else {
compStreamMetric = compStreamMetricList.get(0);
}
if (workerMetricList == null || workerMetricList.size() == 0) {
workerMetric = MetricUtils.mkMetricInfo();
} else {
workerMetric = workerMetricList.get(0);
}
TopologyMetric topologyMetrics = new TopologyMetric(tpMetric, compMetric, workerMetric, taskMetric, streamMetric, nettyMetric);
topologyMetrics.set_compStreamMetric(compStreamMetric);
topologyInfo.set_metrics(topologyMetrics);
return topologyInfo;
} catch (TException e) {
LOG.info("Failed to get topologyInfo " + topologyId, e);
throw e;
} catch (Exception e) {
LOG.info("Failed to get topologyInfo " + topologyId, e);
throw new TException("Failed to get topologyInfo" + topologyId);
} finally {
long end = System.nanoTime();
SimpleJStormMetric.updateNimbusHistogram("getTopologyInfo", (end - start) / TimeUtils.NS_PER_US);
}
}
use of backtype.storm.generated.TopologyInfo in project jstorm by alibaba.
the class ServiceHandler method submitTopologyWithOpts.
/**
* Submit a topology
*
* @param topologyName String: topology name
* @param uploadedJarLocation String: already uploaded jar path
* @param jsonConf String: jsonConf serialize all toplogy configuration to
* Json
* @param topology StormTopology: topology Object
*/
@SuppressWarnings("unchecked")
@Override
public String submitTopologyWithOpts(String topologyName, String uploadedJarLocation, String jsonConf, StormTopology topology, SubmitOptions options) throws TException {
LOG.info("Received topology: " + topologyName + ", uploadedJarLocation:" + uploadedJarLocation);
long start = System.nanoTime();
// check whether topology name is valid
if (!Common.charValidate(topologyName)) {
throw new InvalidTopologyException(topologyName + " is not a valid topology name");
}
Map<Object, Object> serializedConf = (Map<Object, Object>) JStormUtils.from_json(jsonConf);
if (serializedConf == null) {
LOG.error("Failed to serialize configuration");
throw new InvalidTopologyException("Failed to serialize topology configuration");
}
Common.confValidate(serializedConf, data.getConf());
boolean enableDeploy = ConfigExtension.getTopologyHotDeplogyEnable(serializedConf);
boolean isUpgrade = ConfigExtension.isUpgradeTopology(serializedConf);
try {
checkTopologyActive(data, topologyName, enableDeploy || isUpgrade);
} catch (AlreadyAliveException e) {
LOG.info(topologyName + " already exists ");
throw e;
} catch (NotAliveException e) {
LOG.info(topologyName + " is not alive ");
throw e;
} catch (Throwable e) {
LOG.info("Failed to check whether topology {} is alive or not", topologyName, e);
throw new TException(e);
}
try {
if (isUpgrade || enableDeploy) {
LOG.info("start to deploy the topology");
String topologyId = getTopologyId(topologyName);
if (topologyId == null) {
throw new NotAliveException(topologyName);
}
if (isUpgrade) {
TopologyInfo topologyInfo = getTopologyInfo(topologyId);
if (topologyInfo == null) {
throw new TException("Failed to get topology info");
}
int workerNum = ConfigExtension.getUpgradeWorkerNum(serializedConf);
String component = ConfigExtension.getUpgradeComponent(serializedConf);
Set<String> workers = ConfigExtension.getUpgradeWorkers(serializedConf);
if (!ConfigExtension.isTmSingleWorker(serializedConf, topologyInfo.get_topology().get_numWorkers())) {
throw new TException("Gray upgrade requires that topology master to be a single worker, " + "cannot perform the upgrade!");
}
return grayUpgrade(topologyId, uploadedJarLocation, topology, serializedConf, component, workers, workerNum);
} else {
LOG.info("start to kill old topology {}", topologyId);
Map oldConf = new HashMap();
oldConf.putAll(conf);
Map killedStormConf = StormConfig.read_nimbus_topology_conf(topologyId, data.getBlobStore());
if (killedStormConf != null) {
oldConf.putAll(killedStormConf);
}
NimbusUtils.transitionName(data, topologyName, true, StatusType.kill, 0);
KillTopologyEvent.pushEvent(topologyId);
notifyTopologyActionListener(topologyName, "killTopology");
// wait all workers' are killed
final long timeoutSeconds = ConfigExtension.getTaskCleanupTimeoutSec(oldConf);
ConcurrentHashMap<String, Semaphore> topologyIdtoSem = data.getTopologyIdtoSem();
if (!topologyIdtoSem.contains(topologyId)) {
topologyIdtoSem.putIfAbsent(topologyId, new Semaphore(0));
}
Semaphore semaphore = topologyIdtoSem.get(topologyId);
if (semaphore != null) {
semaphore.tryAcquire(timeoutSeconds, TimeUnit.SECONDS);
topologyIdtoSem.remove(semaphore);
}
LOG.info("successfully killed old topology {}", topologyId);
}
}
} catch (Exception e) {
String errMsg = "Failed to submit topology " + topologyName;
LOG.error(errMsg, e);
throw new TException(errMsg);
}
String topologyId;
synchronized (data) {
// avoid same topologies from being submitted at the same time
Set<String> pendingTopologies = data.getPendingSubmitTopologies().buildMap().keySet();
Pattern topologyPattern = Pattern.compile("^" + topologyName + "-\\d+-\\d+$");
for (String cachedTopologyId : pendingTopologies) {
if (topologyPattern.matcher(cachedTopologyId).matches()) {
throw new AlreadyAliveException(topologyName + " were submitted");
}
}
int counter = data.getSubmittedCount().incrementAndGet();
topologyId = Common.topologyNameToId(topologyName, counter);
data.getPendingSubmitTopologies().put(topologyId, null);
}
try {
serializedConf.put(Config.TOPOLOGY_ID, topologyId);
serializedConf.put(Config.TOPOLOGY_NAME, topologyName);
Map<Object, Object> stormConf;
stormConf = NimbusUtils.normalizeConf(conf, serializedConf, topology);
LOG.info("Normalized configuration:" + stormConf);
Map<Object, Object> totalStormConf = new HashMap<>(conf);
totalStormConf.putAll(stormConf);
StormTopology normalizedTopology = NimbusUtils.normalizeTopology(stormConf, topology, true);
// this validates the structure of the topology
Common.validate_basic(normalizedTopology, totalStormConf, topologyId);
// don't need generate real topology, so skip Common.system_topology
// Common.system_topology(totalStormConf, topology);
StormClusterState stormClusterState = data.getStormClusterState();
// create /local-dir/nimbus/topologyId/xxxx files
setupStormCode(topologyId, uploadedJarLocation, stormConf, normalizedTopology, false);
// wait for blob replication before activate topology
waitForDesiredCodeReplication(conf, topologyId);
// generate TaskInfo for every bolt or spout in ZK
// /ZK/tasks/topoologyId/xxx
setupZkTaskInfo(conf, topologyId, stormClusterState);
// mkdir topology error directory
String path = Cluster.taskerror_storm_root(topologyId);
stormClusterState.mkdir(path);
String grayUpgradeBasePath = Cluster.gray_upgrade_base_path(topologyId);
stormClusterState.mkdir(grayUpgradeBasePath);
stormClusterState.mkdir(Cluster.gray_upgrade_upgraded_workers_path(topologyId));
stormClusterState.mkdir(Cluster.gray_upgrade_upgrading_workers_path(topologyId));
// make assignments for a topology
LOG.info("Submit topology {} with conf {}", topologyName, serializedConf);
makeAssignment(topologyName, topologyId, options.get_initial_status());
// push start event after startup
double metricsSampleRate = ConfigExtension.getMetricSampleRate(stormConf);
StartTopologyEvent.pushEvent(topologyId, metricsSampleRate);
notifyTopologyActionListener(topologyName, "submitTopology");
} catch (InvalidTopologyException e) {
LOG.error("Topology is invalid. {}", e.get_msg());
throw e;
} catch (Exception e) {
String errorMsg = String.format("Fail to submit topology, topologyId:%s, uploadedJarLocation:%s, root cause:%s\n\n", e.getMessage() == null ? "submit timeout" : e.getMessage(), topologyId, uploadedJarLocation);
LOG.error(errorMsg, e);
throw new TopologyAssignException(errorMsg);
} finally {
data.getPendingSubmitTopologies().remove(topologyId);
double spend = (System.nanoTime() - start) / TimeUtils.NS_PER_US;
SimpleJStormMetric.updateNimbusHistogram("submitTopologyWithOpts", spend);
LOG.info("submitTopologyWithOpts {} costs {}ms", topologyName, spend);
}
return topologyId;
}
use of backtype.storm.generated.TopologyInfo in project jstorm by alibaba.
the class LogController method deepSearch.
@RequestMapping(value = "/deepSearch", method = RequestMethod.GET)
public String deepSearch(@RequestParam(value = "cluster", required = true) String clusterName, @RequestParam(value = "tid", required = true) String topologyId, @RequestParam(value = "key", required = false) String keyword, @RequestParam(value = "caseIgnore", required = false) String caseIgnore, ModelMap model) {
clusterName = StringEscapeUtils.escapeHtml(clusterName);
topologyId = StringEscapeUtils.escapeHtml(topologyId);
boolean _caseIgnore = !StringUtils.isBlank(caseIgnore);
int port = UIUtils.getSupervisorPort(clusterName);
model.addAttribute("keyword", keyword);
List<Future<?>> futures = new ArrayList<>();
ConcurrentLinkedQueue<Map> result = new ConcurrentLinkedQueue<>();
if (filterKeyword(model, keyword)) {
NimbusClient client = null;
try {
// encode space and url characters
keyword = URLEncoder.encode(keyword, "UTF-8");
client = NimbusClientManager.getNimbusClient(clusterName);
TopologyInfo info = client.getClient().getTopologyInfo(topologyId);
String topologyName = info.get_topology().get_name();
List<UIWorkerMetric> workerData = UIMetricUtils.getWorkerMetrics(info.get_metrics().get_workerMetric(), topologyId, 60);
String dir = "." + File.separator + topologyName;
for (UIWorkerMetric metric : workerData) {
String logFile = topologyName + "-worker-" + metric.getPort() + ".log";
String url = String.format("http://%s:%s/logview?cmd=searchLog&file=%s&key=%s&offset=%s&case_ignore=%s", metric.getHost(), port, getFullFile(dir, logFile), keyword, 0, _caseIgnore);
futures.add(_backround.submit(new SearchRequest(url, metric.getHost(), metric.getPort(), dir, logFile, result)));
}
JStormServerUtils.checkFutures(futures);
model.addAttribute("result", result);
} catch (NotAliveException nae) {
model.addAttribute("tip", String.format("The topology: %s is dead.", topologyId));
} catch (Exception e) {
NimbusClientManager.removeClient(clusterName);
LOG.error(e.getMessage(), e);
UIUtils.addErrorAttribute(model, e);
}
}
model.addAttribute("clusterName", clusterName);
model.addAttribute("topologyId", topologyId);
model.addAttribute("logServerPort", port);
model.addAttribute("caseIgnore", _caseIgnore);
UIUtils.addTitleAttribute(model, "DeepSearch");
return "deepSearch";
}
use of backtype.storm.generated.TopologyInfo in project jstorm by alibaba.
the class ClusterInfoBolt method getTopologyTPS.
protected long getTopologyTPS(TopologySummary topology, Client client) throws NotAliveException, TException {
long topologyTps = 0l;
String topologyId = topology.get_id();
if (topologyId.startsWith("ClusterMonitor")) {
return topologyTps;
}
TopologyInfo topologyInfo = client.getTopologyInfo(topologyId);
if (topologyInfo == null) {
return topologyTps;
}
List<ExecutorSummary> executorSummaryList = topologyInfo.get_executors();
for (ExecutorSummary executor : executorSummaryList) {
topologyTps += getComponentTPS(executor);
}
LOGGER.info("topology = " + topology.get_name() + ", tps = " + topologyTps);
return topologyTps;
}
Aggregations