use of org.apache.hadoop.metrics2.util.MetricsCache.Record in project hadoop by apache.
the class AzureBlobStorageTestAccount method getLatestMetricValue.
public Number getLatestMetricValue(String metricName, Number defaultValue) throws IndexOutOfBoundsException {
boolean found = false;
Number ret = null;
for (MetricsRecord currentRecord : allMetrics) {
// First check if this record is coming for my file system.
if (wasGeneratedByMe(currentRecord)) {
for (AbstractMetric currentMetric : currentRecord.metrics()) {
if (currentMetric.name().equalsIgnoreCase(metricName)) {
found = true;
ret = currentMetric.value();
break;
}
}
}
}
if (!found) {
if (defaultValue != null) {
return defaultValue;
}
throw new IndexOutOfBoundsException(metricName);
}
return ret;
}
use of org.apache.hadoop.metrics2.util.MetricsCache.Record in project hadoop by apache.
the class KafkaSink method putMetrics.
@Override
public void putMetrics(MetricsRecord record) {
if (producer == null) {
throw new MetricsException("Producer in KafkaSink is null!");
}
// Create the json object.
StringBuilder jsonLines = new StringBuilder();
long timestamp = record.timestamp();
Instant instant = Instant.ofEpochMilli(timestamp);
LocalDateTime ldt = LocalDateTime.ofInstant(instant, zoneId);
String date = ldt.format(dateFormat);
String time = ldt.format(timeFormat);
// Collect datapoints and populate the json object.
jsonLines.append("{\"hostname\": \"" + hostname);
jsonLines.append("\", \"timestamp\": " + timestamp);
jsonLines.append(", \"date\": \"" + date);
jsonLines.append("\",\"time\": \"" + time);
jsonLines.append("\",\"name\": \"" + record.name() + "\" ");
for (MetricsTag tag : record.tags()) {
jsonLines.append(", \"" + tag.name().toString().replaceAll("[\\p{Cc}]", "") + "\": ");
jsonLines.append(" \"" + tag.value().toString() + "\"");
}
for (AbstractMetric metric : record.metrics()) {
jsonLines.append(", \"" + metric.name().toString().replaceAll("[\\p{Cc}]", "") + "\": ");
jsonLines.append(" \"" + metric.value().toString() + "\"");
}
jsonLines.append("}");
LOG.debug("kafka message: " + jsonLines.toString());
// Create the record to be sent from the json.
ProducerRecord<Integer, byte[]> data = new ProducerRecord<Integer, byte[]>(topic, jsonLines.toString().getBytes(Charset.forName("UTF-8")));
// Send the data to the Kafka broker. Here is an example of this data:
// {"hostname": "...", "timestamp": 1436913651516,
// "date": "2015-6-14","time": "22:40:51","context": "yarn","name":
// "QueueMetrics, "running_0": "1", "running_60": "0", "running_300": "0",
// "running_1440": "0", "AppsSubmitted": "1", "AppsRunning": "1",
// "AppsPending": "0", "AppsCompleted": "0", "AppsKilled": "0",
// "AppsFailed": "0", "AllocatedMB": "134656", "AllocatedVCores": "132",
// "AllocatedContainers": "132", "AggregateContainersAllocated": "132",
// "AggregateContainersReleased": "0", "AvailableMB": "0",
// "AvailableVCores": "0", "PendingMB": "275456", "PendingVCores": "269",
// "PendingContainers": "269", "ReservedMB": "0", "ReservedVCores": "0",
// "ReservedContainers": "0", "ActiveUsers": "1", "ActiveApplications": "1"}
Future<RecordMetadata> future = producer.send(data);
jsonLines.setLength(0);
try {
future.get();
} catch (InterruptedException e) {
throw new MetricsException("Error sending data", e);
} catch (ExecutionException e) {
throw new MetricsException("Error sending data", e);
}
}
use of org.apache.hadoop.metrics2.util.MetricsCache.Record in project hadoop by apache.
the class TestContainerMetrics method testContainerMetricsLimit.
@Test
public void testContainerMetricsLimit() throws InterruptedException {
final String ERR = "Error in number of records";
MetricsCollectorImpl collector = new MetricsCollectorImpl();
ContainerId containerId = mock(ContainerId.class);
ContainerMetrics metrics = ContainerMetrics.forContainer(containerId, 100, 1);
int anyPmemLimit = 1024;
int anyVmemLimit = 2048;
int anyVcores = 10;
long anyLaunchDuration = 20L;
long anyLocalizationDuration = 1000L;
String anyProcessId = "1234";
metrics.recordResourceLimit(anyVmemLimit, anyPmemLimit, anyVcores);
metrics.recordProcessId(anyProcessId);
metrics.recordStateChangeDurations(anyLaunchDuration, anyLocalizationDuration);
Thread.sleep(110);
metrics.getMetrics(collector, true);
assertEquals(ERR, 1, collector.getRecords().size());
MetricsRecord record = collector.getRecords().get(0);
MetricsRecords.assertTag(record, ContainerMetrics.PROCESSID_INFO.name(), anyProcessId);
MetricsRecords.assertMetric(record, ContainerMetrics.PMEM_LIMIT_METRIC_NAME, anyPmemLimit);
MetricsRecords.assertMetric(record, ContainerMetrics.VMEM_LIMIT_METRIC_NAME, anyVmemLimit);
MetricsRecords.assertMetric(record, ContainerMetrics.VCORE_LIMIT_METRIC_NAME, anyVcores);
MetricsRecords.assertMetric(record, ContainerMetrics.LAUNCH_DURATION_METRIC_NAME, anyLaunchDuration);
MetricsRecords.assertMetric(record, ContainerMetrics.LOCALIZATION_DURATION_METRIC_NAME, anyLocalizationDuration);
collector.clear();
}
use of org.apache.hadoop.metrics2.util.MetricsCache.Record in project hadoop by apache.
the class TestContainerMetrics method testContainerMetricsHistogram.
/**
* Run a test to submit values for actual memory usage and see if the
* histogram comes out correctly.
* @throws Exception
*/
@Test
public void testContainerMetricsHistogram() throws Exception {
// submit 2 values - 1024 and 2048. 75th, 90th, 95th and 99th percentiles
// will be 2048. 50th percentile will be 1536((1024+2048)/2)
// if we keep recording 1024 and 2048 in a loop, the 50th percentile
// will tend closer to 2048
Map<String, Long> expectedValues = new HashMap<>();
expectedValues.put("PMemUsageMBHistogram50thPercentileMBs", 1536L);
expectedValues.put("PMemUsageMBHistogram75thPercentileMBs", 2048L);
expectedValues.put("PMemUsageMBHistogram90thPercentileMBs", 2048L);
expectedValues.put("PMemUsageMBHistogram95thPercentileMBs", 2048L);
expectedValues.put("PMemUsageMBHistogram99thPercentileMBs", 2048L);
expectedValues.put("PCpuUsagePercentHistogram50thPercentilePercents", 0L);
expectedValues.put("PCpuUsagePercentHistogram75thPercentilePercents", 0L);
expectedValues.put("PCpuUsagePercentHistogram90thPercentilePercents", 0L);
expectedValues.put("PCpuUsagePercentHistogram95thPercentilePercents", 0L);
expectedValues.put("PCpuUsagePercentHistogram99thPercentilePercents", 0L);
Set<String> testResults = new HashSet<>();
int delay = 10;
int rolloverDelay = 1000;
MetricsCollectorImpl collector = new MetricsCollectorImpl();
ContainerId containerId = mock(ContainerId.class);
ContainerMetrics metrics = ContainerMetrics.forContainer(containerId, delay, 0);
metrics.recordMemoryUsage(1024);
metrics.recordMemoryUsage(2048);
Thread.sleep(rolloverDelay + 10);
metrics.getMetrics(collector, true);
for (MetricsRecord record : collector.getRecords()) {
for (AbstractMetric metric : record.metrics()) {
String metricName = metric.name();
if (expectedValues.containsKey(metricName)) {
Long expectedValue = expectedValues.get(metricName);
Assert.assertEquals("Metric " + metricName + " doesn't have expected value", expectedValue, metric.value());
testResults.add(metricName);
}
}
}
Assert.assertEquals(expectedValues.keySet(), testResults);
}
use of org.apache.hadoop.metrics2.util.MetricsCache.Record in project phoenix by apache.
the class BaseTracingTestIT method createRecord.
public static MetricsRecord createRecord(long traceid, long parentid, long spanid, String desc, long startTime, long endTime, String hostname, String... tags) {
List<AbstractMetric> metrics = new ArrayList<AbstractMetric>();
AbstractMetric span = new ExposedMetricCounterLong(asInfo(MetricInfo.SPAN.traceName), spanid);
metrics.add(span);
AbstractMetric parent = new ExposedMetricCounterLong(asInfo(MetricInfo.PARENT.traceName), parentid);
metrics.add(parent);
AbstractMetric start = new ExposedMetricCounterLong(asInfo(MetricInfo.START.traceName), startTime);
metrics.add(start);
AbstractMetric end = new ExposedMetricCounterLong(asInfo(MetricInfo.END.traceName), endTime);
metrics.add(end);
List<MetricsTag> tagsList = new ArrayList<MetricsTag>();
int tagCount = 0;
for (String annotation : tags) {
MetricsTag tag = new PhoenixTagImpl(MetricInfo.ANNOTATION.traceName, Integer.toString(tagCount++), annotation);
tagsList.add(tag);
}
String hostnameValue = "host-name.value";
MetricsTag hostnameTag = new PhoenixTagImpl(MetricInfo.HOSTNAME.traceName, "", hostnameValue);
tagsList.add(hostnameTag);
MetricsRecord record = new ExposedMetricsRecordImpl(new ExposedMetricsInfoImpl(TracingUtils.getTraceMetricName(traceid), desc), System.currentTimeMillis(), tagsList, metrics);
return record;
}
Aggregations