use of org.apache.hadoop.metrics2.MetricsTag in project hadoop by apache.
the class GangliaSink30 method appendPrefix.
@InterfaceAudience.Private
public void appendPrefix(MetricsRecord record, StringBuilder sb) {
String contextName = record.context();
Collection<MetricsTag> tags = record.tags();
if (useTagsMap.containsKey(contextName)) {
Set<String> useTags = useTagsMap.get(contextName);
for (MetricsTag t : tags) {
if (useTags == null || useTags.contains(t.name())) {
if (t.info() != MsInfo.Context && t.info() != MsInfo.Hostname && t.value() != null) {
sb.append('.').append(t.name()).append('=').append(t.value());
}
}
}
}
}
use of org.apache.hadoop.metrics2.MetricsTag in project hadoop by apache.
the class TestMetricsCache method makeRecord.
private MetricsRecord makeRecord(String name, Collection<MetricsTag> tags, Collection<AbstractMetric> metrics) {
MetricsRecord mr = mock(MetricsRecord.class);
when(mr.name()).thenReturn(name);
when(mr.tags()).thenReturn(tags);
when(mr.metrics()).thenReturn(metrics);
return mr;
}
use of org.apache.hadoop.metrics2.MetricsTag in project hadoop by apache.
the class FileSink method putMetrics.
@Override
public void putMetrics(MetricsRecord record) {
writer.print(record.timestamp());
writer.print(" ");
writer.print(record.context());
writer.print(".");
writer.print(record.name());
String separator = ": ";
for (MetricsTag tag : record.tags()) {
writer.print(separator);
separator = ", ";
writer.print(tag.name());
writer.print("=");
writer.print(tag.value());
}
for (AbstractMetric metric : record.metrics()) {
writer.print(separator);
separator = ", ";
writer.print(metric.name());
writer.print("=");
writer.print(metric.value());
}
writer.println();
}
use of org.apache.hadoop.metrics2.MetricsTag in project hadoop by apache.
the class KafkaSink method putMetrics.
@Override
public void putMetrics(MetricsRecord record) {
if (producer == null) {
throw new MetricsException("Producer in KafkaSink is null!");
}
// Create the json object.
StringBuilder jsonLines = new StringBuilder();
long timestamp = record.timestamp();
Instant instant = Instant.ofEpochMilli(timestamp);
LocalDateTime ldt = LocalDateTime.ofInstant(instant, zoneId);
String date = ldt.format(dateFormat);
String time = ldt.format(timeFormat);
// Collect datapoints and populate the json object.
jsonLines.append("{\"hostname\": \"" + hostname);
jsonLines.append("\", \"timestamp\": " + timestamp);
jsonLines.append(", \"date\": \"" + date);
jsonLines.append("\",\"time\": \"" + time);
jsonLines.append("\",\"name\": \"" + record.name() + "\" ");
for (MetricsTag tag : record.tags()) {
jsonLines.append(", \"" + tag.name().toString().replaceAll("[\\p{Cc}]", "") + "\": ");
jsonLines.append(" \"" + tag.value().toString() + "\"");
}
for (AbstractMetric metric : record.metrics()) {
jsonLines.append(", \"" + metric.name().toString().replaceAll("[\\p{Cc}]", "") + "\": ");
jsonLines.append(" \"" + metric.value().toString() + "\"");
}
jsonLines.append("}");
LOG.debug("kafka message: " + jsonLines.toString());
// Create the record to be sent from the json.
ProducerRecord<Integer, byte[]> data = new ProducerRecord<Integer, byte[]>(topic, jsonLines.toString().getBytes(Charset.forName("UTF-8")));
// Send the data to the Kafka broker. Here is an example of this data:
// {"hostname": "...", "timestamp": 1436913651516,
// "date": "2015-6-14","time": "22:40:51","context": "yarn","name":
// "QueueMetrics, "running_0": "1", "running_60": "0", "running_300": "0",
// "running_1440": "0", "AppsSubmitted": "1", "AppsRunning": "1",
// "AppsPending": "0", "AppsCompleted": "0", "AppsKilled": "0",
// "AppsFailed": "0", "AllocatedMB": "134656", "AllocatedVCores": "132",
// "AllocatedContainers": "132", "AggregateContainersAllocated": "132",
// "AggregateContainersReleased": "0", "AvailableMB": "0",
// "AvailableVCores": "0", "PendingMB": "275456", "PendingVCores": "269",
// "PendingContainers": "269", "ReservedMB": "0", "ReservedVCores": "0",
// "ReservedContainers": "0", "ActiveUsers": "1", "ActiveApplications": "1"}
Future<RecordMetadata> future = producer.send(data);
jsonLines.setLength(0);
try {
future.get();
} catch (InterruptedException e) {
throw new MetricsException("Error sending data", e);
} catch (ExecutionException e) {
throw new MetricsException("Error sending data", e);
}
}
use of org.apache.hadoop.metrics2.MetricsTag in project phoenix by apache.
the class BaseTracingTestIT method createRecord.
public static MetricsRecord createRecord(long traceid, long parentid, long spanid, String desc, long startTime, long endTime, String hostname, String... tags) {
List<AbstractMetric> metrics = new ArrayList<AbstractMetric>();
AbstractMetric span = new ExposedMetricCounterLong(asInfo(MetricInfo.SPAN.traceName), spanid);
metrics.add(span);
AbstractMetric parent = new ExposedMetricCounterLong(asInfo(MetricInfo.PARENT.traceName), parentid);
metrics.add(parent);
AbstractMetric start = new ExposedMetricCounterLong(asInfo(MetricInfo.START.traceName), startTime);
metrics.add(start);
AbstractMetric end = new ExposedMetricCounterLong(asInfo(MetricInfo.END.traceName), endTime);
metrics.add(end);
List<MetricsTag> tagsList = new ArrayList<MetricsTag>();
int tagCount = 0;
for (String annotation : tags) {
MetricsTag tag = new PhoenixTagImpl(MetricInfo.ANNOTATION.traceName, Integer.toString(tagCount++), annotation);
tagsList.add(tag);
}
String hostnameValue = "host-name.value";
MetricsTag hostnameTag = new PhoenixTagImpl(MetricInfo.HOSTNAME.traceName, "", hostnameValue);
tagsList.add(hostnameTag);
MetricsRecord record = new ExposedMetricsRecordImpl(new ExposedMetricsInfoImpl(TracingUtils.getTraceMetricName(traceid), desc), System.currentTimeMillis(), tagsList, metrics);
return record;
}
Aggregations