use of org.apache.hadoop.metrics2.util.MetricsCache.Record in project hbase by apache.
the class HBaseMetrics2HadoopMetricsAdapter method snapshotAllMetrics.
/**
* Iterates over the MetricRegistry and adds them to the {@code builder}.
*
* @param builder A record builder
*/
public void snapshotAllMetrics(MetricRegistry metricRegistry, MetricsRecordBuilder builder) {
Map<String, Metric> metrics = metricRegistry.getMetrics();
for (Map.Entry<String, Metric> e : metrics.entrySet()) {
// Always capitalize the name
String name = StringUtils.capitalize(e.getKey());
Metric metric = e.getValue();
if (metric instanceof Gauge) {
addGauge(name, (Gauge<?>) metric, builder);
} else if (metric instanceof Counter) {
addCounter(name, (Counter) metric, builder);
} else if (metric instanceof Histogram) {
addHistogram(name, (Histogram) metric, builder);
} else if (metric instanceof Meter) {
addMeter(name, (Meter) metric, builder);
} else if (metric instanceof Timer) {
addTimer(name, (Timer) metric, builder);
} else {
LOG.info("Ignoring unknown Metric class " + metric.getClass().getName());
}
}
}
use of org.apache.hadoop.metrics2.util.MetricsCache.Record in project hadoop by apache.
the class TestSchedulingUpdate method verifyExpectedCalls.
private void verifyExpectedCalls(long expectedCalls, int memory, int vcores) throws InterruptedException {
boolean verified = false;
int count = 0;
while (count < 100) {
if (scheduler.fsOpDurations.hasUpdateThreadRunChanged()) {
break;
}
count++;
Thread.sleep(10);
}
assertTrue("Update Thread has not run based on its metrics", scheduler.fsOpDurations.hasUpdateThreadRunChanged());
assertEquals("Root queue metrics memory does not have expected value", memory, scheduler.getRootQueueMetrics().getAvailableMB());
assertEquals("Root queue metrics cpu does not have expected value", vcores, scheduler.getRootQueueMetrics().getAvailableVirtualCores());
MetricsCollectorImpl collector = new MetricsCollectorImpl();
scheduler.fsOpDurations.getMetrics(collector, true);
MetricsRecord record = collector.getRecords().get(0);
for (AbstractMetric abstractMetric : record.metrics()) {
if (abstractMetric.name().contains("UpdateThreadRunNumOps")) {
assertEquals("Update Thread did not run expected number of times " + "based on metric record count", expectedCalls, abstractMetric.value());
verified = true;
}
}
assertTrue("Did not find metric for UpdateThreadRunNumOps", verified);
}
use of org.apache.hadoop.metrics2.util.MetricsCache.Record in project hadoop by apache.
the class StatsDSink method putMetrics.
@Override
public void putMetrics(MetricsRecord record) {
String hn = hostName;
String ctx = record.context();
String sn = serviceName;
for (MetricsTag tag : record.tags()) {
if (tag.info().name().equals(MsInfo.Hostname.name()) && tag.value() != null) {
hn = tag.value();
} else if (tag.info().name().equals(MsInfo.Context.name()) && tag.value() != null) {
ctx = tag.value();
} else if (tag.info().name().equals(MsInfo.ProcessName.name()) && tag.value() != null) {
sn = tag.value();
}
}
StringBuilder buf = new StringBuilder();
if (!skipHostname && hn != null) {
int idx = hn.indexOf(".");
if (idx == -1) {
buf.append(hn).append(PERIOD);
} else {
buf.append(hn.substring(0, idx)).append(PERIOD);
}
}
buf.append(sn).append(PERIOD);
buf.append(ctx).append(PERIOD);
buf.append(record.name().replaceAll("\\.", "-")).append(PERIOD);
// Collect datapoints.
for (AbstractMetric metric : record.metrics()) {
String type = null;
if (metric.type().equals(MetricType.COUNTER)) {
type = "c";
} else if (metric.type().equals(MetricType.GAUGE)) {
type = "g";
}
StringBuilder line = new StringBuilder();
line.append(buf.toString()).append(metric.name().replace(' ', '_')).append(":").append(metric.value()).append("|").append(type);
writeMetric(line.toString());
}
}
use of org.apache.hadoop.metrics2.util.MetricsCache.Record in project hadoop by apache.
the class GangliaSink30 method putMetrics.
@Override
public void putMetrics(MetricsRecord record) {
// of metrics of sparse (only on change) publish of metrics
try {
String recordName = record.name();
String contextName = record.context();
StringBuilder sb = new StringBuilder();
sb.append(contextName);
sb.append('.');
sb.append(recordName);
appendPrefix(record, sb);
String groupName = sb.toString();
sb.append('.');
int sbBaseLen = sb.length();
String type = null;
GangliaSlope slopeFromMetric = null;
GangliaSlope calculatedSlope = null;
Record cachedMetrics = null;
// reset the buffer to the beginning
resetBuffer();
if (!isSupportSparseMetrics()) {
// for sending dense metrics, update metrics cache
// and get the updated data
cachedMetrics = metricsCache.update(record);
if (cachedMetrics != null && cachedMetrics.metricsEntrySet() != null) {
for (Map.Entry<String, AbstractMetric> entry : cachedMetrics.metricsEntrySet()) {
AbstractMetric metric = entry.getValue();
sb.append(metric.name());
String name = sb.toString();
// visit the metric to identify the Ganglia type and
// slope
metric.visit(gangliaMetricVisitor);
type = gangliaMetricVisitor.getType();
slopeFromMetric = gangliaMetricVisitor.getSlope();
GangliaConf gConf = getGangliaConfForMetric(name);
calculatedSlope = calculateSlope(gConf, slopeFromMetric);
// send metric to Ganglia
emitMetric(groupName, name, type, metric.value().toString(), gConf, calculatedSlope);
// reset the length of the buffer for next iteration
sb.setLength(sbBaseLen);
}
}
} else {
// we support sparse updates
Collection<AbstractMetric> metrics = (Collection<AbstractMetric>) record.metrics();
if (metrics.size() > 0) {
// we got metrics. so send the latest
for (AbstractMetric metric : record.metrics()) {
sb.append(metric.name());
String name = sb.toString();
// visit the metric to identify the Ganglia type and
// slope
metric.visit(gangliaMetricVisitor);
type = gangliaMetricVisitor.getType();
slopeFromMetric = gangliaMetricVisitor.getSlope();
GangliaConf gConf = getGangliaConfForMetric(name);
calculatedSlope = calculateSlope(gConf, slopeFromMetric);
// send metric to Ganglia
emitMetric(groupName, name, type, metric.value().toString(), gConf, calculatedSlope);
// reset the length of the buffer for next iteration
sb.setLength(sbBaseLen);
}
}
}
} catch (IOException io) {
throw new MetricsException("Failed to putMetrics", io);
}
}
use of org.apache.hadoop.metrics2.util.MetricsCache.Record in project hadoop by apache.
the class GangliaSink30 method appendPrefix.
@InterfaceAudience.Private
public void appendPrefix(MetricsRecord record, StringBuilder sb) {
String contextName = record.context();
Collection<MetricsTag> tags = record.tags();
if (useTagsMap.containsKey(contextName)) {
Set<String> useTags = useTagsMap.get(contextName);
for (MetricsTag t : tags) {
if (useTags == null || useTags.contains(t.name())) {
if (t.info() != MsInfo.Context && t.info() != MsInfo.Hostname && t.value() != null) {
sb.append('.').append(t.name()).append('=').append(t.value());
}
}
}
}
}
Aggregations