use of org.apache.hadoop.metrics2.MetricsException in project hadoop by apache.
the class MetricsSystemImpl method start.
@Override
public synchronized void start() {
checkNotNull(prefix, "prefix");
if (monitoring) {
LOG.warn(prefix + " metrics system already started!", new MetricsException("Illegal start"));
return;
}
for (Callback cb : callbacks) cb.preStart();
for (Callback cb : namedCallbacks.values()) cb.preStart();
configure(prefix);
startTimer();
monitoring = true;
LOG.info(prefix + " metrics system started");
for (Callback cb : callbacks) cb.postStart();
for (Callback cb : namedCallbacks.values()) cb.postStart();
}
use of org.apache.hadoop.metrics2.MetricsException in project hadoop by apache.
the class MetricsSystemImpl method stop.
@Override
public synchronized void stop() {
if (!monitoring && !DefaultMetricsSystem.inMiniClusterMode()) {
LOG.warn(prefix + " metrics system not yet started!", new MetricsException("Illegal stop"));
return;
}
if (!monitoring) {
// in mini cluster mode
LOG.info(prefix + " metrics system stopped (again)");
return;
}
for (Callback cb : callbacks) cb.preStop();
for (Callback cb : namedCallbacks.values()) cb.preStop();
LOG.info("Stopping " + prefix + " metrics system...");
stopTimer();
stopSources();
stopSinks();
clearConfigs();
monitoring = false;
LOG.info(prefix + " metrics system stopped.");
for (Callback cb : callbacks) cb.postStop();
for (Callback cb : namedCallbacks.values()) cb.postStop();
}
use of org.apache.hadoop.metrics2.MetricsException in project hadoop by apache.
the class RollingFileSystemSink method initFs.
/**
* Initialize the connection to HDFS and create the base directory. Also
* launch the flush thread.
*/
private boolean initFs() {
boolean success = false;
fileSystem = getFileSystem();
// copious debug info if it fails.
try {
fileSystem.mkdirs(basePath);
success = true;
} catch (Exception ex) {
if (!ignoreError) {
throw new MetricsException("Failed to create " + basePath + "[" + SOURCE_KEY + "=" + source + ", " + ALLOW_APPEND_KEY + "=" + allowAppend + ", " + stringifySecurityProperty(KEYTAB_PROPERTY_KEY) + ", " + stringifySecurityProperty(USERNAME_PROPERTY_KEY) + "] -- " + ex.toString(), ex);
}
}
if (success) {
// If we're permitted to append, check if we actually can
if (allowAppend) {
allowAppend = checkAppend(fileSystem);
}
flushTimer = new Timer("RollingFileSystemSink Flusher", true);
setInitialFlushTime(new Date());
}
return success;
}
use of org.apache.hadoop.metrics2.MetricsException in project hadoop by apache.
the class RollingFileSystemSink method init.
@Override
public void init(SubsetConfiguration metrics2Properties) {
properties = metrics2Properties;
basePath = new Path(properties.getString(BASEPATH_KEY, BASEPATH_DEFAULT));
source = properties.getString(SOURCE_KEY, SOURCE_DEFAULT);
ignoreError = properties.getBoolean(IGNORE_ERROR_KEY, DEFAULT_IGNORE_ERROR);
allowAppend = properties.getBoolean(ALLOW_APPEND_KEY, DEFAULT_ALLOW_APPEND);
rollOffsetIntervalMillis = getNonNegative(ROLL_OFFSET_INTERVAL_MILLIS_KEY, DEFAULT_ROLL_OFFSET_INTERVAL_MILLIS);
rollIntervalMillis = getRollInterval();
conf = loadConf();
UserGroupInformation.setConfiguration(conf);
// Don't do secure setup if it's not needed.
if (UserGroupInformation.isSecurityEnabled()) {
// Validate config so that we don't get an NPE
checkIfPropertyExists(KEYTAB_PROPERTY_KEY);
checkIfPropertyExists(USERNAME_PROPERTY_KEY);
try {
// Login as whoever we're supposed to be and let the hostname be pulled
// from localhost. If security isn't enabled, this does nothing.
SecurityUtil.login(conf, properties.getString(KEYTAB_PROPERTY_KEY), properties.getString(USERNAME_PROPERTY_KEY));
} catch (IOException ex) {
throw new MetricsException("Error logging in securely: [" + ex.toString() + "]", ex);
}
}
}
use of org.apache.hadoop.metrics2.MetricsException in project hadoop by apache.
the class GraphiteSink method putMetrics.
@Override
public void putMetrics(MetricsRecord record) {
StringBuilder lines = new StringBuilder();
StringBuilder metricsPathPrefix = new StringBuilder();
// Configure the hierarchical place to display the graph.
metricsPathPrefix.append(metricsPrefix).append(".").append(record.context()).append(".").append(record.name());
for (MetricsTag tag : record.tags()) {
if (tag.value() != null) {
metricsPathPrefix.append(".");
metricsPathPrefix.append(tag.name());
metricsPathPrefix.append("=");
metricsPathPrefix.append(tag.value());
}
}
// The record timestamp is in milliseconds while Graphite expects an epoc time in seconds.
long timestamp = record.timestamp() / 1000L;
// Collect datapoints.
for (AbstractMetric metric : record.metrics()) {
lines.append(metricsPathPrefix.toString() + "." + metric.name().replace(' ', '.')).append(" ").append(metric.value()).append(" ").append(timestamp).append("\n");
}
try {
graphite.write(lines.toString());
} catch (Exception e) {
LOG.warn("Error sending metrics to Graphite", e);
try {
graphite.close();
} catch (Exception e1) {
throw new MetricsException("Error closing connection to Graphite", e1);
}
}
}
Aggregations