use of org.apache.hadoop.metrics2.MetricsException in project hadoop by apache.
the class GangliaSink30 method putMetrics.
@Override
public void putMetrics(MetricsRecord record) {
// of metrics of sparse (only on change) publish of metrics
try {
String recordName = record.name();
String contextName = record.context();
StringBuilder sb = new StringBuilder();
sb.append(contextName);
sb.append('.');
sb.append(recordName);
appendPrefix(record, sb);
String groupName = sb.toString();
sb.append('.');
int sbBaseLen = sb.length();
String type = null;
GangliaSlope slopeFromMetric = null;
GangliaSlope calculatedSlope = null;
Record cachedMetrics = null;
// reset the buffer to the beginning
resetBuffer();
if (!isSupportSparseMetrics()) {
// for sending dense metrics, update metrics cache
// and get the updated data
cachedMetrics = metricsCache.update(record);
if (cachedMetrics != null && cachedMetrics.metricsEntrySet() != null) {
for (Map.Entry<String, AbstractMetric> entry : cachedMetrics.metricsEntrySet()) {
AbstractMetric metric = entry.getValue();
sb.append(metric.name());
String name = sb.toString();
// visit the metric to identify the Ganglia type and
// slope
metric.visit(gangliaMetricVisitor);
type = gangliaMetricVisitor.getType();
slopeFromMetric = gangliaMetricVisitor.getSlope();
GangliaConf gConf = getGangliaConfForMetric(name);
calculatedSlope = calculateSlope(gConf, slopeFromMetric);
// send metric to Ganglia
emitMetric(groupName, name, type, metric.value().toString(), gConf, calculatedSlope);
// reset the length of the buffer for next iteration
sb.setLength(sbBaseLen);
}
}
} else {
// we support sparse updates
Collection<AbstractMetric> metrics = (Collection<AbstractMetric>) record.metrics();
if (metrics.size() > 0) {
// we got metrics. so send the latest
for (AbstractMetric metric : record.metrics()) {
sb.append(metric.name());
String name = sb.toString();
// visit the metric to identify the Ganglia type and
// slope
metric.visit(gangliaMetricVisitor);
type = gangliaMetricVisitor.getType();
slopeFromMetric = gangliaMetricVisitor.getSlope();
GangliaConf gConf = getGangliaConfForMetric(name);
calculatedSlope = calculateSlope(gConf, slopeFromMetric);
// send metric to Ganglia
emitMetric(groupName, name, type, metric.value().toString(), gConf, calculatedSlope);
// reset the length of the buffer for next iteration
sb.setLength(sbBaseLen);
}
}
}
} catch (IOException io) {
throw new MetricsException("Failed to putMetrics", io);
}
}
use of org.apache.hadoop.metrics2.MetricsException in project hadoop by apache.
the class MetricsSourceBuilder method add.
/**
* Change the declared field {@code field} in {@code source} Object to
* {@link MutableMetric}
*/
private void add(Object source, Field field) {
for (Annotation annotation : field.getAnnotations()) {
if (!(annotation instanceof Metric)) {
continue;
}
try {
// skip fields already set
field.setAccessible(true);
if (field.get(source) != null)
continue;
} catch (Exception e) {
LOG.warn("Error accessing field " + field + " annotated with" + annotation, e);
continue;
}
MutableMetric mutable = factory.newForField(field, (Metric) annotation, registry);
if (mutable != null) {
try {
// Set the source field to MutableMetric
field.set(source, mutable);
hasAtMetric = true;
} catch (Exception e) {
throw new MetricsException("Error setting field " + field + " annotated with " + annotation, e);
}
}
}
}
use of org.apache.hadoop.metrics2.MetricsException in project hadoop by apache.
the class MutableMetricsFactory method newForField.
MutableMetric newForField(Field field, Metric annotation, MetricsRegistry registry) {
if (LOG.isDebugEnabled()) {
LOG.debug("field " + field + " with annotation " + annotation);
}
MetricsInfo info = getInfo(annotation, field);
MutableMetric metric = newForField(field, annotation);
if (metric != null) {
registry.add(info.name(), metric);
return metric;
}
final Class<?> cls = field.getType();
if (cls == MutableCounterInt.class) {
return registry.newCounter(info, 0);
}
if (cls == MutableCounterLong.class) {
return registry.newCounter(info, 0L);
}
if (cls == MutableGaugeInt.class) {
return registry.newGauge(info, 0);
}
if (cls == MutableGaugeLong.class) {
return registry.newGauge(info, 0L);
}
if (cls == MutableRate.class) {
return registry.newRate(info.name(), info.description(), annotation.always());
}
if (cls == MutableRates.class) {
return new MutableRates(registry);
}
if (cls == MutableRatesWithAggregation.class) {
return registry.newRatesWithAggregation(info.name());
}
if (cls == MutableStat.class) {
return registry.newStat(info.name(), info.description(), annotation.sampleName(), annotation.valueName(), annotation.always());
}
throw new MetricsException("Unsupported metric field " + field.getName() + " of type " + field.getType().getName());
}
use of org.apache.hadoop.metrics2.MetricsException in project hadoop by apache.
the class KafkaSink method putMetrics.
@Override
public void putMetrics(MetricsRecord record) {
if (producer == null) {
throw new MetricsException("Producer in KafkaSink is null!");
}
// Create the json object.
StringBuilder jsonLines = new StringBuilder();
long timestamp = record.timestamp();
Instant instant = Instant.ofEpochMilli(timestamp);
LocalDateTime ldt = LocalDateTime.ofInstant(instant, zoneId);
String date = ldt.format(dateFormat);
String time = ldt.format(timeFormat);
// Collect datapoints and populate the json object.
jsonLines.append("{\"hostname\": \"" + hostname);
jsonLines.append("\", \"timestamp\": " + timestamp);
jsonLines.append(", \"date\": \"" + date);
jsonLines.append("\",\"time\": \"" + time);
jsonLines.append("\",\"name\": \"" + record.name() + "\" ");
for (MetricsTag tag : record.tags()) {
jsonLines.append(", \"" + tag.name().toString().replaceAll("[\\p{Cc}]", "") + "\": ");
jsonLines.append(" \"" + tag.value().toString() + "\"");
}
for (AbstractMetric metric : record.metrics()) {
jsonLines.append(", \"" + metric.name().toString().replaceAll("[\\p{Cc}]", "") + "\": ");
jsonLines.append(" \"" + metric.value().toString() + "\"");
}
jsonLines.append("}");
LOG.debug("kafka message: " + jsonLines.toString());
// Create the record to be sent from the json.
ProducerRecord<Integer, byte[]> data = new ProducerRecord<Integer, byte[]>(topic, jsonLines.toString().getBytes(Charset.forName("UTF-8")));
// Send the data to the Kafka broker. Here is an example of this data:
// {"hostname": "...", "timestamp": 1436913651516,
// "date": "2015-6-14","time": "22:40:51","context": "yarn","name":
// "QueueMetrics, "running_0": "1", "running_60": "0", "running_300": "0",
// "running_1440": "0", "AppsSubmitted": "1", "AppsRunning": "1",
// "AppsPending": "0", "AppsCompleted": "0", "AppsKilled": "0",
// "AppsFailed": "0", "AllocatedMB": "134656", "AllocatedVCores": "132",
// "AllocatedContainers": "132", "AggregateContainersAllocated": "132",
// "AggregateContainersReleased": "0", "AvailableMB": "0",
// "AvailableVCores": "0", "PendingMB": "275456", "PendingVCores": "269",
// "PendingContainers": "269", "ReservedMB": "0", "ReservedVCores": "0",
// "ReservedContainers": "0", "ActiveUsers": "1", "ActiveApplications": "1"}
Future<RecordMetadata> future = producer.send(data);
jsonLines.setLength(0);
try {
future.get();
} catch (InterruptedException e) {
throw new MetricsException("Error sending data", e);
} catch (ExecutionException e) {
throw new MetricsException("Error sending data", e);
}
}
use of org.apache.hadoop.metrics2.MetricsException in project hadoop by apache.
the class KafkaSink method init.
@Override
public void init(SubsetConfiguration conf) {
// Get Kafka broker configuration.
Properties props = new Properties();
brokerList = conf.getString(BROKER_LIST);
if (LOG.isDebugEnabled()) {
LOG.debug("Broker list " + brokerList);
}
props.put("bootstrap.servers", brokerList);
if (LOG.isDebugEnabled()) {
LOG.debug("Kafka brokers: " + brokerList);
}
// Get Kafka topic configuration.
topic = conf.getString(TOPIC);
if (LOG.isDebugEnabled()) {
LOG.debug("Kafka topic " + topic);
}
if (Strings.isNullOrEmpty(topic)) {
throw new MetricsException("Kafka topic can not be null");
}
// Set the rest of Kafka configuration.
props.put("key.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer");
props.put("value.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer");
props.put("request.required.acks", "0");
// Set the hostname once and use it in every message.
hostname = "null";
try {
hostname = InetAddress.getLocalHost().getHostName();
} catch (Exception e) {
LOG.warn("Error getting Hostname, going to continue");
}
try {
// Create the producer object.
producer = new KafkaProducer<Integer, byte[]>(props);
} catch (Exception e) {
throw new MetricsException("Error creating Producer, " + brokerList, e);
}
}
Aggregations