use of org.apache.hadoop.metrics2.MetricsRecord in project hadoop by apache.
the class TestContainerMetrics method testContainerMetricsLimit.
@Test
public void testContainerMetricsLimit() throws InterruptedException {
final String ERR = "Error in number of records";
MetricsCollectorImpl collector = new MetricsCollectorImpl();
ContainerId containerId = mock(ContainerId.class);
ContainerMetrics metrics = ContainerMetrics.forContainer(containerId, 100, 1);
int anyPmemLimit = 1024;
int anyVmemLimit = 2048;
int anyVcores = 10;
long anyLaunchDuration = 20L;
long anyLocalizationDuration = 1000L;
String anyProcessId = "1234";
metrics.recordResourceLimit(anyVmemLimit, anyPmemLimit, anyVcores);
metrics.recordProcessId(anyProcessId);
metrics.recordStateChangeDurations(anyLaunchDuration, anyLocalizationDuration);
Thread.sleep(110);
metrics.getMetrics(collector, true);
assertEquals(ERR, 1, collector.getRecords().size());
MetricsRecord record = collector.getRecords().get(0);
MetricsRecords.assertTag(record, ContainerMetrics.PROCESSID_INFO.name(), anyProcessId);
MetricsRecords.assertMetric(record, ContainerMetrics.PMEM_LIMIT_METRIC_NAME, anyPmemLimit);
MetricsRecords.assertMetric(record, ContainerMetrics.VMEM_LIMIT_METRIC_NAME, anyVmemLimit);
MetricsRecords.assertMetric(record, ContainerMetrics.VCORE_LIMIT_METRIC_NAME, anyVcores);
MetricsRecords.assertMetric(record, ContainerMetrics.LAUNCH_DURATION_METRIC_NAME, anyLaunchDuration);
MetricsRecords.assertMetric(record, ContainerMetrics.LOCALIZATION_DURATION_METRIC_NAME, anyLocalizationDuration);
collector.clear();
}
use of org.apache.hadoop.metrics2.MetricsRecord in project hadoop by apache.
the class TestContainerMetrics method testContainerMetricsHistogram.
/**
* Run a test to submit values for actual memory usage and see if the
* histogram comes out correctly.
* @throws Exception
*/
@Test
public void testContainerMetricsHistogram() throws Exception {
// submit 2 values - 1024 and 2048. 75th, 90th, 95th and 99th percentiles
// will be 2048. 50th percentile will be 1536((1024+2048)/2)
// if we keep recording 1024 and 2048 in a loop, the 50th percentile
// will tend closer to 2048
Map<String, Long> expectedValues = new HashMap<>();
expectedValues.put("PMemUsageMBHistogram50thPercentileMBs", 1536L);
expectedValues.put("PMemUsageMBHistogram75thPercentileMBs", 2048L);
expectedValues.put("PMemUsageMBHistogram90thPercentileMBs", 2048L);
expectedValues.put("PMemUsageMBHistogram95thPercentileMBs", 2048L);
expectedValues.put("PMemUsageMBHistogram99thPercentileMBs", 2048L);
expectedValues.put("PCpuUsagePercentHistogram50thPercentilePercents", 0L);
expectedValues.put("PCpuUsagePercentHistogram75thPercentilePercents", 0L);
expectedValues.put("PCpuUsagePercentHistogram90thPercentilePercents", 0L);
expectedValues.put("PCpuUsagePercentHistogram95thPercentilePercents", 0L);
expectedValues.put("PCpuUsagePercentHistogram99thPercentilePercents", 0L);
Set<String> testResults = new HashSet<>();
int delay = 10;
int rolloverDelay = 1000;
MetricsCollectorImpl collector = new MetricsCollectorImpl();
ContainerId containerId = mock(ContainerId.class);
ContainerMetrics metrics = ContainerMetrics.forContainer(containerId, delay, 0);
metrics.recordMemoryUsage(1024);
metrics.recordMemoryUsage(2048);
Thread.sleep(rolloverDelay + 10);
metrics.getMetrics(collector, true);
for (MetricsRecord record : collector.getRecords()) {
for (AbstractMetric metric : record.metrics()) {
String metricName = metric.name();
if (expectedValues.containsKey(metricName)) {
Long expectedValue = expectedValues.get(metricName);
Assert.assertEquals("Metric " + metricName + " doesn't have expected value", expectedValue, metric.value());
testResults.add(metricName);
}
}
}
Assert.assertEquals(expectedValues.keySet(), testResults);
}
use of org.apache.hadoop.metrics2.MetricsRecord in project hadoop by apache.
the class TestGangliaMetrics method testTagsForPrefix.
@Test
public void testTagsForPrefix() throws Exception {
ConfigBuilder cb = new ConfigBuilder().add(testNamePrefix + ".sink.ganglia.tagsForPrefix.all", "*").add(testNamePrefix + ".sink.ganglia.tagsForPrefix.some", "NumActiveSinks, " + "NumActiveSources").add(testNamePrefix + ".sink.ganglia.tagsForPrefix.none", "");
GangliaSink30 sink = new GangliaSink30();
sink.init(cb.subset(testNamePrefix + ".sink.ganglia"));
List<MetricsTag> tags = new ArrayList<MetricsTag>();
tags.add(new MetricsTag(MsInfo.Context, "all"));
tags.add(new MetricsTag(MsInfo.NumActiveSources, "foo"));
tags.add(new MetricsTag(MsInfo.NumActiveSinks, "bar"));
tags.add(new MetricsTag(MsInfo.NumAllSinks, "haa"));
tags.add(new MetricsTag(MsInfo.Hostname, "host"));
Set<AbstractMetric> metrics = new HashSet<AbstractMetric>();
MetricsRecord record = new MetricsRecordImpl(MsInfo.Context, (long) 1, tags, metrics);
StringBuilder sb = new StringBuilder();
sink.appendPrefix(record, sb);
assertEquals(".NumActiveSources=foo.NumActiveSinks=bar.NumAllSinks=haa", sb.toString());
tags.set(0, new MetricsTag(MsInfo.Context, "some"));
sb = new StringBuilder();
sink.appendPrefix(record, sb);
assertEquals(".NumActiveSources=foo.NumActiveSinks=bar", sb.toString());
tags.set(0, new MetricsTag(MsInfo.Context, "none"));
sb = new StringBuilder();
sink.appendPrefix(record, sb);
assertEquals("", sb.toString());
tags.set(0, new MetricsTag(MsInfo.Context, "nada"));
sb = new StringBuilder();
sink.appendPrefix(record, sb);
assertEquals("", sb.toString());
}
use of org.apache.hadoop.metrics2.MetricsRecord in project hadoop by apache.
the class TestGraphiteMetrics method testPutMetrics2.
@Test
public void testPutMetrics2() {
GraphiteSink sink = new GraphiteSink();
List<MetricsTag> tags = new ArrayList<MetricsTag>();
tags.add(new MetricsTag(MsInfo.Context, "all"));
tags.add(new MetricsTag(MsInfo.Hostname, null));
Set<AbstractMetric> metrics = new HashSet<AbstractMetric>();
metrics.add(makeMetric("foo1", 1));
metrics.add(makeMetric("foo2", 2));
MetricsRecord record = new MetricsRecordImpl(MsInfo.Context, (long) 10000, tags, metrics);
ArgumentCaptor<String> argument = ArgumentCaptor.forClass(String.class);
final GraphiteSink.Graphite mockGraphite = makeGraphite();
Whitebox.setInternalState(sink, "graphite", mockGraphite);
sink.putMetrics(record);
try {
verify(mockGraphite).write(argument.capture());
} catch (IOException e) {
e.printStackTrace();
}
String result = argument.getValue();
assertEquals(true, result.equals("null.all.Context.Context=all.foo1 1 10\n" + "null.all.Context.Context=all.foo2 2 10\n") || result.equals("null.all.Context.Context=all.foo2 2 10\n" + "null.all.Context.Context=all.foo1 1 10\n"));
}
use of org.apache.hadoop.metrics2.MetricsRecord in project hadoop by apache.
the class TestGraphiteMetrics method testPutMetrics3.
/**
* Assert that timestamps are converted correctly, ticket HADOOP-11182
*/
@Test
public void testPutMetrics3() {
// setup GraphiteSink
GraphiteSink sink = new GraphiteSink();
final GraphiteSink.Graphite mockGraphite = makeGraphite();
Whitebox.setInternalState(sink, "graphite", mockGraphite);
// given two metrics records with timestamps 1000 milliseconds apart.
List<MetricsTag> tags = Collections.emptyList();
Set<AbstractMetric> metrics = new HashSet<AbstractMetric>();
metrics.add(makeMetric("foo1", 1));
MetricsRecord record1 = new MetricsRecordImpl(MsInfo.Context, 1000000000000L, tags, metrics);
MetricsRecord record2 = new MetricsRecordImpl(MsInfo.Context, 1000000001000L, tags, metrics);
sink.putMetrics(record1);
sink.putMetrics(record2);
sink.flush();
try {
sink.close();
} catch (IOException e) {
e.printStackTrace();
}
// then the timestamps in the graphite stream should differ by one second.
try {
verify(mockGraphite).write(eq("null.default.Context.foo1 1 1000000000\n"));
verify(mockGraphite).write(eq("null.default.Context.foo1 1 1000000001\n"));
} catch (IOException e) {
e.printStackTrace();
}
}
Aggregations