use of backtype.storm.generated.TopologyMetric in project jstorm by alibaba.
the class TopologyMetricContext method mergeMetrics.
public TopologyMetric mergeMetrics() {
long start = System.currentTimeMillis();
if (getMemCache().size() == 0) {
//LOG.info("topology:{}, metric size is 0, skip...", topologyId);
return null;
}
if (isMerging()) {
LOG.info("topology {} is already merging, skip...", topologyId);
return null;
}
setMerging(true);
try {
Map<String, MetricInfo> workerMetricMap = this.memCache;
// reset mem cache
this.memCache = new ConcurrentHashMap<>();
MetricInfo topologyMetrics = MetricUtils.mkMetricInfo();
MetricInfo componentMetrics = MetricUtils.mkMetricInfo();
MetricInfo taskMetrics = MetricUtils.mkMetricInfo();
MetricInfo streamMetrics = MetricUtils.mkMetricInfo();
MetricInfo workerMetrics = MetricUtils.mkMetricInfo();
MetricInfo nettyMetrics = MetricUtils.mkMetricInfo();
TopologyMetric tpMetric = new TopologyMetric(topologyMetrics, componentMetrics, workerMetrics, taskMetrics, streamMetrics, nettyMetrics);
// metric name => worker count
Map<String, Integer> metricNameCounters = new HashMap<>();
// special for histograms & timers, we merge the points to get a new snapshot data.
Map<String, Map<Integer, Histogram>> histograms = new HashMap<>();
// iterate metrics of all workers within the same topology
for (ConcurrentMap.Entry<String, MetricInfo> metricEntry : workerMetricMap.entrySet()) {
MetricInfo metricInfo = metricEntry.getValue();
// merge counters: add old and new values, note we only add incoming new metrics and overwrite
// existing data, same for all below.
Map<String, Map<Integer, MetricSnapshot>> metrics = metricInfo.get_metrics();
for (Map.Entry<String, Map<Integer, MetricSnapshot>> metric : metrics.entrySet()) {
String metricName = metric.getKey();
Map<Integer, MetricSnapshot> data = metric.getValue();
MetaType metaType = MetricUtils.metaType(metricName);
MetricType metricType = MetricUtils.metricType(metricName);
if (metricType == MetricType.COUNTER) {
mergeCounters(tpMetric, metaType, metricName, data);
} else if (metricType == MetricType.GAUGE) {
mergeGauges(tpMetric, metaType, metricName, data);
} else if (metricType == MetricType.METER) {
mergeMeters(getMetricInfoByType(tpMetric, metaType), metricName, data, metricNameCounters);
} else if (metricType == MetricType.HISTOGRAM) {
mergeHistograms(getMetricInfoByType(tpMetric, metaType), metricName, data, metricNameCounters, histograms);
}
}
}
adjustHistogramTimerMetrics(tpMetric, metricNameCounters, histograms);
// for counters, we only report delta data every time, need to sum with old data
//adjustCounterMetrics(tpMetric, oldTpMetric);
LOG.info("merge topology metrics:{}, cost:{}", topologyId, System.currentTimeMillis() - start);
// debug logs
MetricUtils.printMetricInfo(tpMetric.get_topologyMetric());
return tpMetric;
} finally {
setMerging(false);
}
}
use of backtype.storm.generated.TopologyMetric in project jstorm by alibaba.
the class MetricsUploader method uploadMetrics.
/**
* upload metrics sequentially due to thrift frame size limit (15MB)
*/
private void uploadMetrics(TopologyMetric tpMetric) throws Exception {
long start = System.currentTimeMillis();
if (tpMetric == null) {
return;
}
try {
synchronized (lock) {
if (client == null || !client.isValid()) {
client = new NimbusClientWrapper();
client.init(conf);
}
}
MetricInfo topologyMetrics = tpMetric.get_topologyMetric();
MetricInfo componentMetrics = tpMetric.get_componentMetric();
MetricInfo taskMetrics = tpMetric.get_taskMetric();
MetricInfo streamMetrics = tpMetric.get_streamMetric();
MetricInfo workerMetrics = tpMetric.get_workerMetric();
MetricInfo nettyMetrics = tpMetric.get_nettyMetric();
int totalSize = topologyMetrics.get_metrics_size() + componentMetrics.get_metrics_size() + taskMetrics.get_metrics_size() + streamMetrics.get_metrics_size() + workerMetrics.get_metrics_size() + nettyMetrics.get_metrics_size();
// pressure of nimbus
if (totalSize < MAX_BATCH_SIZE) {
client.getClient().uploadTopologyMetrics(topologyId, new TopologyMetric(topologyMetrics, componentMetrics, workerMetrics, taskMetrics, streamMetrics, nettyMetrics));
} else {
client.getClient().uploadTopologyMetrics(topologyId, new TopologyMetric(topologyMetrics, componentMetrics, dummy, dummy, dummy, dummy));
batchUploadMetrics(workerMetrics, MetaType.WORKER);
batchUploadMetrics(taskMetrics, MetaType.TASK);
batchUploadMetrics(streamMetrics, MetaType.STREAM);
batchUploadMetrics(nettyMetrics, MetaType.NETTY);
}
} catch (Exception e) {
String errorInfo = "Failed to upload worker metrics";
LOG.error("Failed to upload worker metrics ", e);
if (client != null) {
client.cleanup();
}
zkCluster.report_task_error(context.getTopologyId(), context.getThisTaskId(), errorInfo, ErrorConstants.WARN, ErrorConstants.CODE_USER);
}
metricLogger.info("upload metrics, cost:{}", System.currentTimeMillis() - start);
}
Aggregations