use of backtype.storm.generated.MetricInfo in project jstorm by alibaba.
the class JStormMetrics method computeAllMetrics.
/**
* convert snapshots to thrift objects, note that timestamps are aligned to min during the conversion,
* so nimbus server will get snapshots with aligned timestamps (still in ms as TDDL will use it).
*/
public static MetricInfo computeAllMetrics() {
long start = System.currentTimeMillis();
MetricInfo metricInfo = MetricUtils.mkMetricInfo();
List<Map.Entry<String, AsmMetric>> entries = Lists.newLinkedList();
if (enableStreamMetrics) {
entries.addAll(streamMetrics.metrics.entrySet());
}
entries.addAll(taskMetrics.metrics.entrySet());
entries.addAll(componentMetrics.metrics.entrySet());
entries.addAll(workerMetrics.metrics.entrySet());
entries.addAll(nettyMetrics.metrics.entrySet());
entries.addAll(topologyMetrics.metrics.entrySet());
for (Map.Entry<String, AsmMetric> entry : entries) {
String name = entry.getKey();
AsmMetric metric = entry.getValue();
// skip disabled metrics, double check
if (disabledMetricNames.contains(metric.getShortName())) {
continue;
}
Map<Integer, AsmSnapshot> snapshots = metric.getSnapshots();
if (snapshots.size() == 0) {
continue;
}
int op = metric.getOp();
if ((op & AsmMetric.MetricOp.LOG) == AsmMetric.MetricOp.LOG) {
MetricUtils.printMetricSnapshot(metric, snapshots);
}
if ((op & AsmMetric.MetricOp.REPORT) == AsmMetric.MetricOp.REPORT) {
MetaType metaType = MetricUtils.metaType(metric.getMetricName());
try {
if (metric instanceof AsmCounter) {
Map data = MetricUtils.toThriftCounterSnapshots(snapshots);
putIfNotEmpty(metricInfo.get_metrics(), name, data);
} else if (metric instanceof AsmGauge) {
Map data = MetricUtils.toThriftGaugeSnapshots(snapshots);
putIfNotEmpty(metricInfo.get_metrics(), name, data);
} else if (metric instanceof AsmMeter) {
Map data = MetricUtils.toThriftMeterSnapshots(snapshots);
putIfNotEmpty(metricInfo.get_metrics(), name, data);
} else if (metric instanceof AsmHistogram) {
Map data = MetricUtils.toThriftHistoSnapshots(metaType, snapshots);
putIfNotEmpty(metricInfo.get_metrics(), name, data);
}
} catch (Exception ex) {
LOG.error("Error", ex);
}
}
}
if (debug) {
MetricUtils.printMetricInfo(metricInfo, debugMetricNames);
}
LOG.debug("compute all metrics, cost:{}", System.currentTimeMillis() - start);
return metricInfo;
}
use of backtype.storm.generated.MetricInfo in project jstorm by alibaba.
the class JStormMetrics method approximateComputeAllMetrics.
public static MetricInfo approximateComputeAllMetrics() {
long start = System.currentTimeMillis();
MetricInfo metricInfo = MetricUtils.mkMetricInfo();
Map<String, Map<Integer, MetricSnapshot>> mergeWorkerMetrics = metricInfo.get_metrics();
mergeLevelMetricSnapshot(mergeWorkerMetrics, streamMetrics.metrics);
mergeLevelMetricSnapshot(mergeWorkerMetrics, taskMetrics.metrics);
mergeLevelMetricSnapshot(mergeWorkerMetrics, componentMetrics.metrics);
mergeLevelMetricSnapshot(mergeWorkerMetrics, workerMetrics.metrics);
mergeLevelMetricSnapshot(mergeWorkerMetrics, nettyMetrics.metrics);
mergeLevelMetricSnapshot(mergeWorkerMetrics, topologyMetrics.metrics);
if (debug) {
MetricUtils.printMetricInfo(metricInfo, debugMetricNames);
}
Set<String> fiterStreamNames = new HashSet<>();
if (!enableStreamMetrics) {
for (Map.Entry<String, AsmMetric> entry : streamMetrics.metrics.entrySet()) {
fiterStreamNames.add(entry.getKey());
}
}
Map<String, Map<Integer, MetricSnapshot>> uploadWorkerMetrics = new HashMap<>();
for (Map.Entry<String, Map<Integer, MetricSnapshot>> entry : mergeWorkerMetrics.entrySet()) {
if (!fiterStreamNames.contains(entry.getKey()))
uploadWorkerMetrics.put(entry.getKey(), entry.getValue());
}
metricInfo.set_metrics(uploadWorkerMetrics);
LOG.debug("approximate compute all metrics, cost:{}", System.currentTimeMillis() - start);
return metricInfo;
}
use of backtype.storm.generated.MetricInfo in project jstorm by alibaba.
the class JStormMetricsReporter method uploadMetricData.
public void uploadMetricData() {
if (test) {
return;
}
try {
long start = System.currentTimeMillis();
MetricInfo workerMetricInfo = MetricUtils.metricAccurateCal ? JStormMetrics.computeAllMetrics() : JStormMetrics.approximateComputeAllMetrics();
WorkerUploadMetrics upload = new WorkerUploadMetrics();
upload.set_topologyId(topologyId);
upload.set_supervisorId(host);
upload.set_port(port);
upload.set_allMetrics(workerMetricInfo);
if (workerMetricInfo.get_metrics_size() > 0) {
uploadMetricData(upload);
LOG.debug("Successfully upload worker metrics, size:{}, cost:{}", workerMetricInfo.get_metrics_size(), System.currentTimeMillis() - start);
} else {
LOG.debug("No metrics to upload.");
}
} catch (Exception e) {
LOG.error("Failed to upload worker metrics", e);
}
}
use of backtype.storm.generated.MetricInfo in project jstorm by alibaba.
the class MetricUtils method mkTopologyMetric.
public static TopologyMetric mkTopologyMetric() {
TopologyMetric emptyTopologyMetric = new TopologyMetric();
emptyTopologyMetric.set_topologyMetric(new MetricInfo());
emptyTopologyMetric.set_componentMetric(new MetricInfo());
emptyTopologyMetric.set_workerMetric(new MetricInfo());
emptyTopologyMetric.set_taskMetric(new MetricInfo());
emptyTopologyMetric.set_streamMetric(new MetricInfo());
emptyTopologyMetric.set_nettyMetric(new MetricInfo());
return emptyTopologyMetric;
}
use of backtype.storm.generated.MetricInfo in project jstorm by alibaba.
the class TopologyMetricContext method mergeMetrics.
public TopologyMetric mergeMetrics() {
long start = System.currentTimeMillis();
if (getMemCache().size() == 0) {
//LOG.info("topology:{}, metric size is 0, skip...", topologyId);
return null;
}
if (isMerging()) {
LOG.info("topology {} is already merging, skip...", topologyId);
return null;
}
setMerging(true);
try {
Map<String, MetricInfo> workerMetricMap = this.memCache;
// reset mem cache
this.memCache = new ConcurrentHashMap<>();
MetricInfo topologyMetrics = MetricUtils.mkMetricInfo();
MetricInfo componentMetrics = MetricUtils.mkMetricInfo();
MetricInfo taskMetrics = MetricUtils.mkMetricInfo();
MetricInfo streamMetrics = MetricUtils.mkMetricInfo();
MetricInfo workerMetrics = MetricUtils.mkMetricInfo();
MetricInfo nettyMetrics = MetricUtils.mkMetricInfo();
TopologyMetric tpMetric = new TopologyMetric(topologyMetrics, componentMetrics, workerMetrics, taskMetrics, streamMetrics, nettyMetrics);
// metric name => worker count
Map<String, Integer> metricNameCounters = new HashMap<>();
// special for histograms & timers, we merge the points to get a new snapshot data.
Map<String, Map<Integer, Histogram>> histograms = new HashMap<>();
// iterate metrics of all workers within the same topology
for (ConcurrentMap.Entry<String, MetricInfo> metricEntry : workerMetricMap.entrySet()) {
MetricInfo metricInfo = metricEntry.getValue();
// merge counters: add old and new values, note we only add incoming new metrics and overwrite
// existing data, same for all below.
Map<String, Map<Integer, MetricSnapshot>> metrics = metricInfo.get_metrics();
for (Map.Entry<String, Map<Integer, MetricSnapshot>> metric : metrics.entrySet()) {
String metricName = metric.getKey();
Map<Integer, MetricSnapshot> data = metric.getValue();
MetaType metaType = MetricUtils.metaType(metricName);
MetricType metricType = MetricUtils.metricType(metricName);
if (metricType == MetricType.COUNTER) {
mergeCounters(tpMetric, metaType, metricName, data);
} else if (metricType == MetricType.GAUGE) {
mergeGauges(tpMetric, metaType, metricName, data);
} else if (metricType == MetricType.METER) {
mergeMeters(getMetricInfoByType(tpMetric, metaType), metricName, data, metricNameCounters);
} else if (metricType == MetricType.HISTOGRAM) {
mergeHistograms(getMetricInfoByType(tpMetric, metaType), metricName, data, metricNameCounters, histograms);
}
}
}
adjustHistogramTimerMetrics(tpMetric, metricNameCounters, histograms);
// for counters, we only report delta data every time, need to sum with old data
//adjustCounterMetrics(tpMetric, oldTpMetric);
LOG.info("merge topology metrics:{}, cost:{}", topologyId, System.currentTimeMillis() - start);
// debug logs
MetricUtils.printMetricInfo(tpMetric.get_topologyMetric());
return tpMetric;
} finally {
setMerging(false);
}
}
Aggregations