use of org.apache.hadoop.metrics2.util.MetricsCache.Record in project hadoop by apache.
the class GangliaSink30 method putMetrics.
@Override
public void putMetrics(MetricsRecord record) {
// of metrics of sparse (only on change) publish of metrics
try {
String recordName = record.name();
String contextName = record.context();
StringBuilder sb = new StringBuilder();
sb.append(contextName);
sb.append('.');
sb.append(recordName);
appendPrefix(record, sb);
String groupName = sb.toString();
sb.append('.');
int sbBaseLen = sb.length();
String type = null;
GangliaSlope slopeFromMetric = null;
GangliaSlope calculatedSlope = null;
Record cachedMetrics = null;
// reset the buffer to the beginning
resetBuffer();
if (!isSupportSparseMetrics()) {
// for sending dense metrics, update metrics cache
// and get the updated data
cachedMetrics = metricsCache.update(record);
if (cachedMetrics != null && cachedMetrics.metricsEntrySet() != null) {
for (Map.Entry<String, AbstractMetric> entry : cachedMetrics.metricsEntrySet()) {
AbstractMetric metric = entry.getValue();
sb.append(metric.name());
String name = sb.toString();
// visit the metric to identify the Ganglia type and
// slope
metric.visit(gangliaMetricVisitor);
type = gangliaMetricVisitor.getType();
slopeFromMetric = gangliaMetricVisitor.getSlope();
GangliaConf gConf = getGangliaConfForMetric(name);
calculatedSlope = calculateSlope(gConf, slopeFromMetric);
// send metric to Ganglia
emitMetric(groupName, name, type, metric.value().toString(), gConf, calculatedSlope);
// reset the length of the buffer for next iteration
sb.setLength(sbBaseLen);
}
}
} else {
// we support sparse updates
Collection<AbstractMetric> metrics = (Collection<AbstractMetric>) record.metrics();
if (metrics.size() > 0) {
// we got metrics. so send the latest
for (AbstractMetric metric : record.metrics()) {
sb.append(metric.name());
String name = sb.toString();
// visit the metric to identify the Ganglia type and
// slope
metric.visit(gangliaMetricVisitor);
type = gangliaMetricVisitor.getType();
slopeFromMetric = gangliaMetricVisitor.getSlope();
GangliaConf gConf = getGangliaConfForMetric(name);
calculatedSlope = calculateSlope(gConf, slopeFromMetric);
// send metric to Ganglia
emitMetric(groupName, name, type, metric.value().toString(), gConf, calculatedSlope);
// reset the length of the buffer for next iteration
sb.setLength(sbBaseLen);
}
}
}
} catch (IOException io) {
throw new MetricsException("Failed to putMetrics", io);
}
}
use of org.apache.hadoop.metrics2.util.MetricsCache.Record in project hadoop by apache.
the class FileSink method putMetrics.
@Override
public void putMetrics(MetricsRecord record) {
writer.print(record.timestamp());
writer.print(" ");
writer.print(record.context());
writer.print(".");
writer.print(record.name());
String separator = ": ";
for (MetricsTag tag : record.tags()) {
writer.print(separator);
separator = ", ";
writer.print(tag.name());
writer.print("=");
writer.print(tag.value());
}
for (AbstractMetric metric : record.metrics()) {
writer.print(separator);
separator = ", ";
writer.print(metric.name());
writer.print("=");
writer.print(metric.value());
}
writer.println();
}
use of org.apache.hadoop.metrics2.util.MetricsCache.Record in project hadoop by apache.
the class MetricsAsserts method getMetrics.
/**
* Call getMetrics on source and get a record builder mock to verify
* @param source the metrics source
* @param all if true, return all metrics even if not changed
* @return the record builder mock to verify
*/
public static MetricsRecordBuilder getMetrics(MetricsSource source, boolean all) {
MetricsRecordBuilder rb = mockMetricsRecordBuilder();
MetricsCollector mc = rb.parent();
source.getMetrics(mc, all);
return rb;
}
Aggregations