Search in sources :

Example 71 with Metrics

use of org.apache.hadoop.metrics2.annotation.Metrics in project hbase by apache.

the class DynamicMetricsRegistry method tag.

/**
   * Add a tag to the metrics
   * @param info  metadata of the tag
   * @param value of the tag
   * @param override existing tag if true
   * @return the registry (for keep adding tags etc.)
   */
public DynamicMetricsRegistry tag(MetricsInfo info, String value, boolean override) {
    MetricsTag tag = Interns.tag(info, value);
    if (!override) {
        MetricsTag existing = tagsMap.putIfAbsent(info.name(), tag);
        if (existing != null) {
            throw new MetricsException("Tag " + info.name() + " already exists!");
        }
        return this;
    }
    tagsMap.put(info.name(), tag);
    return this;
}
Also used : MetricsException(org.apache.hadoop.metrics2.MetricsException) MetricsTag(org.apache.hadoop.metrics2.MetricsTag)

Example 72 with Metrics

use of org.apache.hadoop.metrics2.annotation.Metrics in project hadoop by apache.

the class TestSchedulingUpdate method verifyExpectedCalls.

private void verifyExpectedCalls(long expectedCalls, int memory, int vcores) throws InterruptedException {
    boolean verified = false;
    int count = 0;
    while (count < 100) {
        if (scheduler.fsOpDurations.hasUpdateThreadRunChanged()) {
            break;
        }
        count++;
        Thread.sleep(10);
    }
    assertTrue("Update Thread has not run based on its metrics", scheduler.fsOpDurations.hasUpdateThreadRunChanged());
    assertEquals("Root queue metrics memory does not have expected value", memory, scheduler.getRootQueueMetrics().getAvailableMB());
    assertEquals("Root queue metrics cpu does not have expected value", vcores, scheduler.getRootQueueMetrics().getAvailableVirtualCores());
    MetricsCollectorImpl collector = new MetricsCollectorImpl();
    scheduler.fsOpDurations.getMetrics(collector, true);
    MetricsRecord record = collector.getRecords().get(0);
    for (AbstractMetric abstractMetric : record.metrics()) {
        if (abstractMetric.name().contains("UpdateThreadRunNumOps")) {
            assertEquals("Update Thread did not run expected number of times " + "based on metric record count", expectedCalls, abstractMetric.value());
            verified = true;
        }
    }
    assertTrue("Did not find metric for UpdateThreadRunNumOps", verified);
}
Also used : MetricsRecord(org.apache.hadoop.metrics2.MetricsRecord) AbstractMetric(org.apache.hadoop.metrics2.AbstractMetric) MetricsCollectorImpl(org.apache.hadoop.metrics2.impl.MetricsCollectorImpl)

Example 73 with Metrics

use of org.apache.hadoop.metrics2.annotation.Metrics in project hive by apache.

the class LlapTaskSchedulerMetrics method create.

public static LlapTaskSchedulerMetrics create(String displayName, String sessionId) {
    MetricsSystem ms = LlapMetricsSystem.instance();
    JvmMetrics jm = JvmMetrics.create(MetricsUtils.METRICS_PROCESS_NAME, sessionId, ms);
    return ms.register(displayName, "Llap Task Scheduler Metrics", new LlapTaskSchedulerMetrics(displayName, jm, sessionId));
}
Also used : JvmMetrics(org.apache.hadoop.metrics2.source.JvmMetrics) MetricsSystem(org.apache.hadoop.metrics2.MetricsSystem) LlapMetricsSystem(org.apache.hadoop.hive.llap.metrics.LlapMetricsSystem)

Example 74 with Metrics

use of org.apache.hadoop.metrics2.annotation.Metrics in project hadoop by apache.

the class GangliaSink30 method putMetrics.

@Override
public void putMetrics(MetricsRecord record) {
    // of metrics of sparse (only on change) publish of metrics
    try {
        String recordName = record.name();
        String contextName = record.context();
        StringBuilder sb = new StringBuilder();
        sb.append(contextName);
        sb.append('.');
        sb.append(recordName);
        appendPrefix(record, sb);
        String groupName = sb.toString();
        sb.append('.');
        int sbBaseLen = sb.length();
        String type = null;
        GangliaSlope slopeFromMetric = null;
        GangliaSlope calculatedSlope = null;
        Record cachedMetrics = null;
        // reset the buffer to the beginning
        resetBuffer();
        if (!isSupportSparseMetrics()) {
            // for sending dense metrics, update metrics cache
            // and get the updated data
            cachedMetrics = metricsCache.update(record);
            if (cachedMetrics != null && cachedMetrics.metricsEntrySet() != null) {
                for (Map.Entry<String, AbstractMetric> entry : cachedMetrics.metricsEntrySet()) {
                    AbstractMetric metric = entry.getValue();
                    sb.append(metric.name());
                    String name = sb.toString();
                    // visit the metric to identify the Ganglia type and
                    // slope
                    metric.visit(gangliaMetricVisitor);
                    type = gangliaMetricVisitor.getType();
                    slopeFromMetric = gangliaMetricVisitor.getSlope();
                    GangliaConf gConf = getGangliaConfForMetric(name);
                    calculatedSlope = calculateSlope(gConf, slopeFromMetric);
                    // send metric to Ganglia
                    emitMetric(groupName, name, type, metric.value().toString(), gConf, calculatedSlope);
                    // reset the length of the buffer for next iteration
                    sb.setLength(sbBaseLen);
                }
            }
        } else {
            // we support sparse updates
            Collection<AbstractMetric> metrics = (Collection<AbstractMetric>) record.metrics();
            if (metrics.size() > 0) {
                // we got metrics. so send the latest
                for (AbstractMetric metric : record.metrics()) {
                    sb.append(metric.name());
                    String name = sb.toString();
                    // visit the metric to identify the Ganglia type and
                    // slope
                    metric.visit(gangliaMetricVisitor);
                    type = gangliaMetricVisitor.getType();
                    slopeFromMetric = gangliaMetricVisitor.getSlope();
                    GangliaConf gConf = getGangliaConfForMetric(name);
                    calculatedSlope = calculateSlope(gConf, slopeFromMetric);
                    // send metric to Ganglia
                    emitMetric(groupName, name, type, metric.value().toString(), gConf, calculatedSlope);
                    // reset the length of the buffer for next iteration
                    sb.setLength(sbBaseLen);
                }
            }
        }
    } catch (IOException io) {
        throw new MetricsException("Failed to putMetrics", io);
    }
}
Also used : AbstractMetric(org.apache.hadoop.metrics2.AbstractMetric) IOException(java.io.IOException) Collection(java.util.Collection) Record(org.apache.hadoop.metrics2.util.MetricsCache.Record) MetricsRecord(org.apache.hadoop.metrics2.MetricsRecord) MetricsException(org.apache.hadoop.metrics2.MetricsException) HashMap(java.util.HashMap) Map(java.util.Map)

Example 75 with Metrics

use of org.apache.hadoop.metrics2.annotation.Metrics in project hadoop by apache.

the class RollingFileSystemSinkTestBase method initMetricsSystem.

/**
   * Set up the metrics system, start it, and return it.
   * @param path the base path for the sink
   * @param ignoreErrors whether the sink should ignore errors
   * @param allowAppend whether the sink is allowed to append to existing files
   * @param useSecureParams whether to set the principal and keytab properties
   * @return the org.apache.hadoop.metrics2.MetricsSystem
   */
protected MetricsSystem initMetricsSystem(String path, boolean ignoreErrors, boolean allowAppend, boolean useSecureParams) {
    // If the prefix is not lower case, the metrics system won't be able to
    // read any of the properties.
    String prefix = methodName.getMethodName().toLowerCase();
    ConfigBuilder builder = new ConfigBuilder().add("*.period", 10000).add(prefix + ".sink.mysink0.class", MockSink.class.getName()).add(prefix + ".sink.mysink0.basepath", path).add(prefix + ".sink.mysink0.source", "testsrc").add(prefix + ".sink.mysink0.context", "test1").add(prefix + ".sink.mysink0.ignore-error", ignoreErrors).add(prefix + ".sink.mysink0.allow-append", allowAppend).add(prefix + ".sink.mysink0.roll-offset-interval-millis", 0).add(prefix + ".sink.mysink0.roll-interval", "1h");
    if (useSecureParams) {
        builder.add(prefix + ".sink.mysink0.keytab-key", SINK_KEYTAB_FILE_KEY).add(prefix + ".sink.mysink0.principal-key", SINK_PRINCIPAL_KEY);
    }
    builder.save(TestMetricsConfig.getTestFilename("hadoop-metrics2-" + prefix));
    MetricsSystemImpl ms = new MetricsSystemImpl(prefix);
    ms.start();
    return ms;
}
Also used : ConfigBuilder(org.apache.hadoop.metrics2.impl.ConfigBuilder) MetricsSystemImpl(org.apache.hadoop.metrics2.impl.MetricsSystemImpl)

Aggregations

Test (org.junit.Test)67 MetricsRecordBuilder (org.apache.hadoop.metrics2.MetricsRecordBuilder)30 MetricsRecord (org.apache.hadoop.metrics2.MetricsRecord)20 AbstractMetric (org.apache.hadoop.metrics2.AbstractMetric)19 MetricsSystem (org.apache.hadoop.metrics2.MetricsSystem)19 MetricsTag (org.apache.hadoop.metrics2.MetricsTag)18 MetricsSource (org.apache.hadoop.metrics2.MetricsSource)16 ArrayList (java.util.ArrayList)11 IOException (java.io.IOException)10 HashSet (java.util.HashSet)8 Path (org.apache.hadoop.fs.Path)8 MetricsException (org.apache.hadoop.metrics2.MetricsException)8 MetricsCollectorImpl (org.apache.hadoop.metrics2.impl.MetricsCollectorImpl)7 DefaultMetricsSystem (org.apache.hadoop.metrics2.lib.DefaultMetricsSystem)7 Configuration (org.apache.hadoop.conf.Configuration)5 Map (java.util.Map)4 FileSystem (org.apache.hadoop.fs.FileSystem)4 MetricsSink (org.apache.hadoop.metrics2.MetricsSink)4 MetricsSystemImpl (org.apache.hadoop.metrics2.impl.MetricsSystemImpl)4 GraphiteSink (org.apache.hadoop.metrics2.sink.GraphiteSink)4