Search in sources :

Example 36 with MetricsRecord

use of org.apache.hadoop.metrics2.MetricsRecord in project hadoop by apache.

the class AzureBlobStorageTestAccount method getLatestMetricValue.

public Number getLatestMetricValue(String metricName, Number defaultValue) throws IndexOutOfBoundsException {
    boolean found = false;
    Number ret = null;
    for (MetricsRecord currentRecord : allMetrics) {
        // First check if this record is coming for my file system.
        if (wasGeneratedByMe(currentRecord)) {
            for (AbstractMetric currentMetric : currentRecord.metrics()) {
                if (currentMetric.name().equalsIgnoreCase(metricName)) {
                    found = true;
                    ret = currentMetric.value();
                    break;
                }
            }
        }
    }
    if (!found) {
        if (defaultValue != null) {
            return defaultValue;
        }
        throw new IndexOutOfBoundsException(metricName);
    }
    return ret;
}
Also used : MetricsRecord(org.apache.hadoop.metrics2.MetricsRecord) AbstractMetric(org.apache.hadoop.metrics2.AbstractMetric)

Example 37 with MetricsRecord

use of org.apache.hadoop.metrics2.MetricsRecord in project phoenix by apache.

the class PhoenixMetricsSink method putMetrics.

/**
 * Add a new metric record to be written.
 *
 * @param record
 */
@Override
public void putMetrics(MetricsRecord record) {
    // to do it here, in case it gets misconfigured
    if (!record.name().startsWith(TracingUtils.METRIC_SOURCE_KEY)) {
        return;
    }
    // don't initialize until we actually have something to write
    lazyInitialize();
    String stmt = "UPSERT INTO " + table + " (";
    // drop it into the queue of things that should be written
    List<String> keys = new ArrayList<String>();
    List<Object> values = new ArrayList<Object>();
    // we need to keep variable values in a separate set since they may have spaces, which
    // causes the parser to barf. Instead, we need to add them after the statement is prepared
    List<String> variableValues = new ArrayList<String>(record.tags().size());
    keys.add(TRACE.columnName);
    values.add(Long.parseLong(record.name().substring(TracingUtils.METRIC_SOURCE_KEY.length())));
    keys.add(DESCRIPTION.columnName);
    values.add(VARIABLE_VALUE);
    variableValues.add(record.description());
    // add each of the metrics
    for (AbstractMetric metric : record.metrics()) {
        // name of the metric is also the column name to which we write
        keys.add(MetricInfo.getColumnName(metric.name()));
        values.add(metric.value());
    }
    // get the tags out so we can set them later (otherwise, need to be a single value)
    int annotationCount = 0;
    int tagCount = 0;
    for (MetricsTag tag : record.tags()) {
        if (tag.name().equals(ANNOTATION.traceName)) {
            addDynamicEntry(keys, values, variableValues, ANNOTATION_FAMILY, tag, ANNOTATION, annotationCount);
            annotationCount++;
        } else if (tag.name().equals(TAG.traceName)) {
            addDynamicEntry(keys, values, variableValues, TAG_FAMILY, tag, TAG, tagCount);
            tagCount++;
        } else if (tag.name().equals(HOSTNAME.traceName)) {
            keys.add(HOSTNAME.columnName);
            values.add(VARIABLE_VALUE);
            variableValues.add(tag.value());
        } else if (tag.name().equals("Context")) {
        // ignored
        } else {
            LOG.error("Got an unexpected tag: " + tag);
        }
    }
    // add the tag count, now that we know it
    keys.add(TAG_COUNT);
    // ignore the hostname in the tags, if we know it
    values.add(tagCount);
    keys.add(ANNOTATION_COUNT);
    values.add(annotationCount);
    // compile the statement together
    stmt += COMMAS.join(keys);
    stmt += ") VALUES (" + COMMAS.join(values) + ")";
    if (LOG.isTraceEnabled()) {
        LOG.trace("Logging metrics to phoenix table via: " + stmt);
        LOG.trace("With tags: " + variableValues);
    }
    try {
        PreparedStatement ps = conn.prepareStatement(stmt);
        // add everything that wouldn't/may not parse
        int index = 1;
        for (String tag : variableValues) {
            ps.setString(index++, tag);
        }
        // Not going through the standard route of using statement.execute() as that code path
        // is blocked if the metadata hasn't been been upgraded to the new minor release.
        MutationPlan plan = ps.unwrap(PhoenixPreparedStatement.class).compileMutation(stmt);
        MutationState state = conn.unwrap(PhoenixConnection.class).getMutationState();
        MutationState newState = plan.execute();
        state.join(newState);
    } catch (SQLException e) {
        LOG.error("Could not write metric: \n" + record + " to prepared statement:\n" + stmt, e);
    }
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) SQLException(java.sql.SQLException) ArrayList(java.util.ArrayList) AbstractMetric(org.apache.hadoop.metrics2.AbstractMetric) PreparedStatement(java.sql.PreparedStatement) PhoenixPreparedStatement(org.apache.phoenix.jdbc.PhoenixPreparedStatement) MetricsTag(org.apache.hadoop.metrics2.MetricsTag) MutationPlan(org.apache.phoenix.compile.MutationPlan) MutationState(org.apache.phoenix.execute.MutationState) PhoenixPreparedStatement(org.apache.phoenix.jdbc.PhoenixPreparedStatement)

Example 38 with MetricsRecord

use of org.apache.hadoop.metrics2.MetricsRecord in project hbase by apache.

the class TestMutableRangeHistogram method testLastBucketWithSizeHistogram.

/**
 * calculate the distribution for last bucket, see HBASE-24615 for detail.
 */
@Test
public void testLastBucketWithSizeHistogram() {
    // create and init histogram minValue and maxValue
    MetricsCollectorImpl collector = new MetricsCollectorImpl();
    MutableSizeHistogram histogram = new MutableSizeHistogram(SIZE_HISTOGRAM_NAME, "");
    long[] ranges = histogram.getRanges();
    int len = ranges.length;
    histogram.add(0L);
    histogram.add(ranges[len - 1]);
    histogram.snapshot(collector.addRecord(RECORD_NAME), true);
    collector.clear();
    // fill up values and snapshot
    histogram.add(ranges[len - 2] * 2);
    histogram.add(ranges[len - 1] * 2);
    histogram.snapshot(collector.addRecord(RECORD_NAME), true);
    List<? extends MetricsRecord> records = collector.getRecords();
    assertEquals(1, records.size());
    MetricsRecord record = records.iterator().next();
    assertEquals(RECORD_NAME, record.name());
    // get size range metrics
    String histogramMetricPrefix = SIZE_HISTOGRAM_NAME + "_" + histogram.getRangeType();
    List<AbstractMetric> metrics = new ArrayList<>();
    for (AbstractMetric metric : record.metrics()) {
        if (metric.name().startsWith(histogramMetricPrefix)) {
            metrics.add(metric);
        }
    }
    assertEquals(2, metrics.size());
    // check range [10000000,100000000]
    String metricName = histogramMetricPrefix + "_" + ranges[len - 2] + "-" + ranges[len - 1];
    assertEquals(metricName, metrics.get(0).name());
    assertEquals(1, metrics.get(0).value().longValue());
    // check range [100000000, inf]
    metricName = histogramMetricPrefix + "_" + ranges[len - 1] + "-inf";
    assertEquals(metricName, metrics.get(1).name(), metricName);
    assertEquals(1, metrics.get(1).value().longValue());
}
Also used : MetricsRecord(org.apache.hadoop.metrics2.MetricsRecord) ArrayList(java.util.ArrayList) AbstractMetric(org.apache.hadoop.metrics2.AbstractMetric) MetricsCollectorImpl(org.apache.hadoop.metrics2.impl.MetricsCollectorImpl) Test(org.junit.Test)

Aggregations

AbstractMetric (org.apache.hadoop.metrics2.AbstractMetric)27 MetricsRecord (org.apache.hadoop.metrics2.MetricsRecord)25 MetricsTag (org.apache.hadoop.metrics2.MetricsTag)20 Test (org.junit.Test)18 ArrayList (java.util.ArrayList)10 HashSet (java.util.HashSet)8 IOException (java.io.IOException)7 MetricsCollectorImpl (org.apache.hadoop.metrics2.impl.MetricsCollectorImpl)4 GraphiteSink (org.apache.hadoop.metrics2.sink.GraphiteSink)4 MetricsException (org.apache.hadoop.metrics2.MetricsException)3 MetricsSink (org.apache.hadoop.metrics2.MetricsSink)3 Matchers.anyString (org.mockito.Matchers.anyString)3 DatagramPacket (java.net.DatagramPacket)2 DatagramSocket (java.net.DatagramSocket)2 HashMap (java.util.HashMap)2 StatsDSink (org.apache.hadoop.metrics2.sink.StatsDSink)2 StatsD (org.apache.hadoop.metrics2.sink.StatsDSink.StatsD)2 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)2 ProducerRecord (org.apache.kafka.clients.producer.ProducerRecord)2 RecordMetadata (org.apache.kafka.clients.producer.RecordMetadata)2