use of org.apache.hadoop.metrics2.annotation.Metrics in project hbase by apache.
the class DynamicMetricsRegistry method tag.
/**
* Add a tag to the metrics
* @param info metadata of the tag
* @param value of the tag
* @param override existing tag if true
* @return the registry (for keep adding tags etc.)
*/
public DynamicMetricsRegistry tag(MetricsInfo info, String value, boolean override) {
MetricsTag tag = Interns.tag(info, value);
if (!override) {
MetricsTag existing = tagsMap.putIfAbsent(info.name(), tag);
if (existing != null) {
throw new MetricsException("Tag " + info.name() + " already exists!");
}
return this;
}
tagsMap.put(info.name(), tag);
return this;
}
use of org.apache.hadoop.metrics2.annotation.Metrics in project hadoop by apache.
the class TestSchedulingUpdate method verifyExpectedCalls.
private void verifyExpectedCalls(long expectedCalls, int memory, int vcores) throws InterruptedException {
boolean verified = false;
int count = 0;
while (count < 100) {
if (scheduler.fsOpDurations.hasUpdateThreadRunChanged()) {
break;
}
count++;
Thread.sleep(10);
}
assertTrue("Update Thread has not run based on its metrics", scheduler.fsOpDurations.hasUpdateThreadRunChanged());
assertEquals("Root queue metrics memory does not have expected value", memory, scheduler.getRootQueueMetrics().getAvailableMB());
assertEquals("Root queue metrics cpu does not have expected value", vcores, scheduler.getRootQueueMetrics().getAvailableVirtualCores());
MetricsCollectorImpl collector = new MetricsCollectorImpl();
scheduler.fsOpDurations.getMetrics(collector, true);
MetricsRecord record = collector.getRecords().get(0);
for (AbstractMetric abstractMetric : record.metrics()) {
if (abstractMetric.name().contains("UpdateThreadRunNumOps")) {
assertEquals("Update Thread did not run expected number of times " + "based on metric record count", expectedCalls, abstractMetric.value());
verified = true;
}
}
assertTrue("Did not find metric for UpdateThreadRunNumOps", verified);
}
use of org.apache.hadoop.metrics2.annotation.Metrics in project hive by apache.
the class LlapTaskSchedulerMetrics method create.
public static LlapTaskSchedulerMetrics create(String displayName, String sessionId) {
MetricsSystem ms = LlapMetricsSystem.instance();
JvmMetrics jm = JvmMetrics.create(MetricsUtils.METRICS_PROCESS_NAME, sessionId, ms);
return ms.register(displayName, "Llap Task Scheduler Metrics", new LlapTaskSchedulerMetrics(displayName, jm, sessionId));
}
use of org.apache.hadoop.metrics2.annotation.Metrics in project hadoop by apache.
the class GangliaSink30 method putMetrics.
@Override
public void putMetrics(MetricsRecord record) {
// of metrics of sparse (only on change) publish of metrics
try {
String recordName = record.name();
String contextName = record.context();
StringBuilder sb = new StringBuilder();
sb.append(contextName);
sb.append('.');
sb.append(recordName);
appendPrefix(record, sb);
String groupName = sb.toString();
sb.append('.');
int sbBaseLen = sb.length();
String type = null;
GangliaSlope slopeFromMetric = null;
GangliaSlope calculatedSlope = null;
Record cachedMetrics = null;
// reset the buffer to the beginning
resetBuffer();
if (!isSupportSparseMetrics()) {
// for sending dense metrics, update metrics cache
// and get the updated data
cachedMetrics = metricsCache.update(record);
if (cachedMetrics != null && cachedMetrics.metricsEntrySet() != null) {
for (Map.Entry<String, AbstractMetric> entry : cachedMetrics.metricsEntrySet()) {
AbstractMetric metric = entry.getValue();
sb.append(metric.name());
String name = sb.toString();
// visit the metric to identify the Ganglia type and
// slope
metric.visit(gangliaMetricVisitor);
type = gangliaMetricVisitor.getType();
slopeFromMetric = gangliaMetricVisitor.getSlope();
GangliaConf gConf = getGangliaConfForMetric(name);
calculatedSlope = calculateSlope(gConf, slopeFromMetric);
// send metric to Ganglia
emitMetric(groupName, name, type, metric.value().toString(), gConf, calculatedSlope);
// reset the length of the buffer for next iteration
sb.setLength(sbBaseLen);
}
}
} else {
// we support sparse updates
Collection<AbstractMetric> metrics = (Collection<AbstractMetric>) record.metrics();
if (metrics.size() > 0) {
// we got metrics. so send the latest
for (AbstractMetric metric : record.metrics()) {
sb.append(metric.name());
String name = sb.toString();
// visit the metric to identify the Ganglia type and
// slope
metric.visit(gangliaMetricVisitor);
type = gangliaMetricVisitor.getType();
slopeFromMetric = gangliaMetricVisitor.getSlope();
GangliaConf gConf = getGangliaConfForMetric(name);
calculatedSlope = calculateSlope(gConf, slopeFromMetric);
// send metric to Ganglia
emitMetric(groupName, name, type, metric.value().toString(), gConf, calculatedSlope);
// reset the length of the buffer for next iteration
sb.setLength(sbBaseLen);
}
}
}
} catch (IOException io) {
throw new MetricsException("Failed to putMetrics", io);
}
}
use of org.apache.hadoop.metrics2.annotation.Metrics in project hadoop by apache.
the class RollingFileSystemSinkTestBase method initMetricsSystem.
/**
* Set up the metrics system, start it, and return it.
* @param path the base path for the sink
* @param ignoreErrors whether the sink should ignore errors
* @param allowAppend whether the sink is allowed to append to existing files
* @param useSecureParams whether to set the principal and keytab properties
* @return the org.apache.hadoop.metrics2.MetricsSystem
*/
protected MetricsSystem initMetricsSystem(String path, boolean ignoreErrors, boolean allowAppend, boolean useSecureParams) {
// If the prefix is not lower case, the metrics system won't be able to
// read any of the properties.
String prefix = methodName.getMethodName().toLowerCase();
ConfigBuilder builder = new ConfigBuilder().add("*.period", 10000).add(prefix + ".sink.mysink0.class", MockSink.class.getName()).add(prefix + ".sink.mysink0.basepath", path).add(prefix + ".sink.mysink0.source", "testsrc").add(prefix + ".sink.mysink0.context", "test1").add(prefix + ".sink.mysink0.ignore-error", ignoreErrors).add(prefix + ".sink.mysink0.allow-append", allowAppend).add(prefix + ".sink.mysink0.roll-offset-interval-millis", 0).add(prefix + ".sink.mysink0.roll-interval", "1h");
if (useSecureParams) {
builder.add(prefix + ".sink.mysink0.keytab-key", SINK_KEYTAB_FILE_KEY).add(prefix + ".sink.mysink0.principal-key", SINK_PRINCIPAL_KEY);
}
builder.save(TestMetricsConfig.getTestFilename("hadoop-metrics2-" + prefix));
MetricsSystemImpl ms = new MetricsSystemImpl(prefix);
ms.start();
return ms;
}
Aggregations