Search in sources :

Example 1 with MetricsException

use of org.apache.hadoop.metrics2.MetricsException in project hadoop by apache.

the class MetricsSystemImpl method start.

@Override
public synchronized void start() {
    checkNotNull(prefix, "prefix");
    if (monitoring) {
        LOG.warn(prefix + " metrics system already started!", new MetricsException("Illegal start"));
        return;
    }
    for (Callback cb : callbacks) cb.preStart();
    for (Callback cb : namedCallbacks.values()) cb.preStart();
    configure(prefix);
    startTimer();
    monitoring = true;
    LOG.info(prefix + " metrics system started");
    for (Callback cb : callbacks) cb.postStart();
    for (Callback cb : namedCallbacks.values()) cb.postStart();
}
Also used : MetricsException(org.apache.hadoop.metrics2.MetricsException)

Example 2 with MetricsException

use of org.apache.hadoop.metrics2.MetricsException in project hadoop by apache.

the class MetricsSystemImpl method stop.

@Override
public synchronized void stop() {
    if (!monitoring && !DefaultMetricsSystem.inMiniClusterMode()) {
        LOG.warn(prefix + " metrics system not yet started!", new MetricsException("Illegal stop"));
        return;
    }
    if (!monitoring) {
        // in mini cluster mode
        LOG.info(prefix + " metrics system stopped (again)");
        return;
    }
    for (Callback cb : callbacks) cb.preStop();
    for (Callback cb : namedCallbacks.values()) cb.preStop();
    LOG.info("Stopping " + prefix + " metrics system...");
    stopTimer();
    stopSources();
    stopSinks();
    clearConfigs();
    monitoring = false;
    LOG.info(prefix + " metrics system stopped.");
    for (Callback cb : callbacks) cb.postStop();
    for (Callback cb : namedCallbacks.values()) cb.postStop();
}
Also used : MetricsException(org.apache.hadoop.metrics2.MetricsException)

Example 3 with MetricsException

use of org.apache.hadoop.metrics2.MetricsException in project hadoop by apache.

the class RollingFileSystemSink method initFs.

/**
   * Initialize the connection to HDFS and create the base directory. Also
   * launch the flush thread.
   */
private boolean initFs() {
    boolean success = false;
    fileSystem = getFileSystem();
    // copious debug info if it fails.
    try {
        fileSystem.mkdirs(basePath);
        success = true;
    } catch (Exception ex) {
        if (!ignoreError) {
            throw new MetricsException("Failed to create " + basePath + "[" + SOURCE_KEY + "=" + source + ", " + ALLOW_APPEND_KEY + "=" + allowAppend + ", " + stringifySecurityProperty(KEYTAB_PROPERTY_KEY) + ", " + stringifySecurityProperty(USERNAME_PROPERTY_KEY) + "] -- " + ex.toString(), ex);
        }
    }
    if (success) {
        // If we're permitted to append, check if we actually can
        if (allowAppend) {
            allowAppend = checkAppend(fileSystem);
        }
        flushTimer = new Timer("RollingFileSystemSink Flusher", true);
        setInitialFlushTime(new Date());
    }
    return success;
}
Also used : Timer(java.util.Timer) MetricsException(org.apache.hadoop.metrics2.MetricsException) URISyntaxException(java.net.URISyntaxException) MetricsException(org.apache.hadoop.metrics2.MetricsException) IOException(java.io.IOException) Date(java.util.Date)

Example 4 with MetricsException

use of org.apache.hadoop.metrics2.MetricsException in project hadoop by apache.

the class RollingFileSystemSink method init.

@Override
public void init(SubsetConfiguration metrics2Properties) {
    properties = metrics2Properties;
    basePath = new Path(properties.getString(BASEPATH_KEY, BASEPATH_DEFAULT));
    source = properties.getString(SOURCE_KEY, SOURCE_DEFAULT);
    ignoreError = properties.getBoolean(IGNORE_ERROR_KEY, DEFAULT_IGNORE_ERROR);
    allowAppend = properties.getBoolean(ALLOW_APPEND_KEY, DEFAULT_ALLOW_APPEND);
    rollOffsetIntervalMillis = getNonNegative(ROLL_OFFSET_INTERVAL_MILLIS_KEY, DEFAULT_ROLL_OFFSET_INTERVAL_MILLIS);
    rollIntervalMillis = getRollInterval();
    conf = loadConf();
    UserGroupInformation.setConfiguration(conf);
    // Don't do secure setup if it's not needed.
    if (UserGroupInformation.isSecurityEnabled()) {
        // Validate config so that we don't get an NPE
        checkIfPropertyExists(KEYTAB_PROPERTY_KEY);
        checkIfPropertyExists(USERNAME_PROPERTY_KEY);
        try {
            // Login as whoever we're supposed to be and let the hostname be pulled
            // from localhost. If security isn't enabled, this does nothing.
            SecurityUtil.login(conf, properties.getString(KEYTAB_PROPERTY_KEY), properties.getString(USERNAME_PROPERTY_KEY));
        } catch (IOException ex) {
            throw new MetricsException("Error logging in securely: [" + ex.toString() + "]", ex);
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MetricsException(org.apache.hadoop.metrics2.MetricsException) IOException(java.io.IOException)

Example 5 with MetricsException

use of org.apache.hadoop.metrics2.MetricsException in project hadoop by apache.

the class GraphiteSink method putMetrics.

@Override
public void putMetrics(MetricsRecord record) {
    StringBuilder lines = new StringBuilder();
    StringBuilder metricsPathPrefix = new StringBuilder();
    // Configure the hierarchical place to display the graph.
    metricsPathPrefix.append(metricsPrefix).append(".").append(record.context()).append(".").append(record.name());
    for (MetricsTag tag : record.tags()) {
        if (tag.value() != null) {
            metricsPathPrefix.append(".");
            metricsPathPrefix.append(tag.name());
            metricsPathPrefix.append("=");
            metricsPathPrefix.append(tag.value());
        }
    }
    // The record timestamp is in milliseconds while Graphite expects an epoc time in seconds.
    long timestamp = record.timestamp() / 1000L;
    // Collect datapoints.
    for (AbstractMetric metric : record.metrics()) {
        lines.append(metricsPathPrefix.toString() + "." + metric.name().replace(' ', '.')).append(" ").append(metric.value()).append(" ").append(timestamp).append("\n");
    }
    try {
        graphite.write(lines.toString());
    } catch (Exception e) {
        LOG.warn("Error sending metrics to Graphite", e);
        try {
            graphite.close();
        } catch (Exception e1) {
            throw new MetricsException("Error closing connection to Graphite", e1);
        }
    }
}
Also used : AbstractMetric(org.apache.hadoop.metrics2.AbstractMetric) MetricsException(org.apache.hadoop.metrics2.MetricsException) MetricsTag(org.apache.hadoop.metrics2.MetricsTag) IOException(java.io.IOException) MetricsException(org.apache.hadoop.metrics2.MetricsException)

Aggregations

MetricsException (org.apache.hadoop.metrics2.MetricsException)16 IOException (java.io.IOException)5 AbstractMetric (org.apache.hadoop.metrics2.AbstractMetric)3 MetricsTag (org.apache.hadoop.metrics2.MetricsTag)3 Test (org.junit.Test)3 ExecutionException (java.util.concurrent.ExecutionException)2 Matcher (java.util.regex.Matcher)2 Pattern (java.util.regex.Pattern)2 VisibleForTesting (com.google.common.annotations.VisibleForTesting)1 Matcher (com.google.re2j.Matcher)1 Annotation (java.lang.annotation.Annotation)1 URISyntaxException (java.net.URISyntaxException)1 Instant (java.time.Instant)1 LocalDateTime (java.time.LocalDateTime)1 Collection (java.util.Collection)1 Date (java.util.Date)1 HashMap (java.util.HashMap)1 Map (java.util.Map)1 Properties (java.util.Properties)1 Timer (java.util.Timer)1