Search in sources :

Example 6 with MetricsCollectorImpl

use of org.apache.hadoop.metrics2.impl.MetricsCollectorImpl in project hadoop by apache.

the class TestSchedulingUpdate method verifyExpectedCalls.

private void verifyExpectedCalls(long expectedCalls, int memory, int vcores) throws InterruptedException {
    boolean verified = false;
    int count = 0;
    while (count < 100) {
        if (scheduler.fsOpDurations.hasUpdateThreadRunChanged()) {
            break;
        }
        count++;
        Thread.sleep(10);
    }
    assertTrue("Update Thread has not run based on its metrics", scheduler.fsOpDurations.hasUpdateThreadRunChanged());
    assertEquals("Root queue metrics memory does not have expected value", memory, scheduler.getRootQueueMetrics().getAvailableMB());
    assertEquals("Root queue metrics cpu does not have expected value", vcores, scheduler.getRootQueueMetrics().getAvailableVirtualCores());
    MetricsCollectorImpl collector = new MetricsCollectorImpl();
    scheduler.fsOpDurations.getMetrics(collector, true);
    MetricsRecord record = collector.getRecords().get(0);
    for (AbstractMetric abstractMetric : record.metrics()) {
        if (abstractMetric.name().contains("UpdateThreadRunNumOps")) {
            assertEquals("Update Thread did not run expected number of times " + "based on metric record count", expectedCalls, abstractMetric.value());
            verified = true;
        }
    }
    assertTrue("Did not find metric for UpdateThreadRunNumOps", verified);
}
Also used : MetricsRecord(org.apache.hadoop.metrics2.MetricsRecord) AbstractMetric(org.apache.hadoop.metrics2.AbstractMetric) MetricsCollectorImpl(org.apache.hadoop.metrics2.impl.MetricsCollectorImpl)

Example 7 with MetricsCollectorImpl

use of org.apache.hadoop.metrics2.impl.MetricsCollectorImpl in project hadoop by apache.

the class TestFSQueueMetrics method checkSchedulingPolicy.

private void checkSchedulingPolicy(String queueName, String policy) {
    MetricsSource queueSource = TestQueueMetrics.queueSource(ms, queueName);
    MetricsCollectorImpl collector = new MetricsCollectorImpl();
    queueSource.getMetrics(collector, true);
    MetricsRecords.assertTag(collector.getRecords().get(0), "SchedulingPolicy", policy);
}
Also used : MetricsSource(org.apache.hadoop.metrics2.MetricsSource) MetricsCollectorImpl(org.apache.hadoop.metrics2.impl.MetricsCollectorImpl)

Example 8 with MetricsCollectorImpl

use of org.apache.hadoop.metrics2.impl.MetricsCollectorImpl in project hadoop by apache.

the class TestContainerMetrics method testContainerMetricsLimit.

@Test
public void testContainerMetricsLimit() throws InterruptedException {
    final String ERR = "Error in number of records";
    MetricsCollectorImpl collector = new MetricsCollectorImpl();
    ContainerId containerId = mock(ContainerId.class);
    ContainerMetrics metrics = ContainerMetrics.forContainer(containerId, 100, 1);
    int anyPmemLimit = 1024;
    int anyVmemLimit = 2048;
    int anyVcores = 10;
    long anyLaunchDuration = 20L;
    long anyLocalizationDuration = 1000L;
    String anyProcessId = "1234";
    metrics.recordResourceLimit(anyVmemLimit, anyPmemLimit, anyVcores);
    metrics.recordProcessId(anyProcessId);
    metrics.recordStateChangeDurations(anyLaunchDuration, anyLocalizationDuration);
    Thread.sleep(110);
    metrics.getMetrics(collector, true);
    assertEquals(ERR, 1, collector.getRecords().size());
    MetricsRecord record = collector.getRecords().get(0);
    MetricsRecords.assertTag(record, ContainerMetrics.PROCESSID_INFO.name(), anyProcessId);
    MetricsRecords.assertMetric(record, ContainerMetrics.PMEM_LIMIT_METRIC_NAME, anyPmemLimit);
    MetricsRecords.assertMetric(record, ContainerMetrics.VMEM_LIMIT_METRIC_NAME, anyVmemLimit);
    MetricsRecords.assertMetric(record, ContainerMetrics.VCORE_LIMIT_METRIC_NAME, anyVcores);
    MetricsRecords.assertMetric(record, ContainerMetrics.LAUNCH_DURATION_METRIC_NAME, anyLaunchDuration);
    MetricsRecords.assertMetric(record, ContainerMetrics.LOCALIZATION_DURATION_METRIC_NAME, anyLocalizationDuration);
    collector.clear();
}
Also used : ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) MetricsRecord(org.apache.hadoop.metrics2.MetricsRecord) MetricsCollectorImpl(org.apache.hadoop.metrics2.impl.MetricsCollectorImpl) Test(org.junit.Test)

Example 9 with MetricsCollectorImpl

use of org.apache.hadoop.metrics2.impl.MetricsCollectorImpl in project hadoop by apache.

the class TestContainerMetrics method testContainerMetricsHistogram.

/**
   * Run a test to submit values for actual memory usage and see if the
   * histogram comes out correctly.
   * @throws Exception
   */
@Test
public void testContainerMetricsHistogram() throws Exception {
    // submit 2 values - 1024 and 2048. 75th, 90th, 95th and 99th percentiles
    // will be 2048. 50th percentile will be 1536((1024+2048)/2)
    // if we keep recording 1024 and 2048 in a loop, the 50th percentile
    // will tend closer to 2048
    Map<String, Long> expectedValues = new HashMap<>();
    expectedValues.put("PMemUsageMBHistogram50thPercentileMBs", 1536L);
    expectedValues.put("PMemUsageMBHistogram75thPercentileMBs", 2048L);
    expectedValues.put("PMemUsageMBHistogram90thPercentileMBs", 2048L);
    expectedValues.put("PMemUsageMBHistogram95thPercentileMBs", 2048L);
    expectedValues.put("PMemUsageMBHistogram99thPercentileMBs", 2048L);
    expectedValues.put("PCpuUsagePercentHistogram50thPercentilePercents", 0L);
    expectedValues.put("PCpuUsagePercentHistogram75thPercentilePercents", 0L);
    expectedValues.put("PCpuUsagePercentHistogram90thPercentilePercents", 0L);
    expectedValues.put("PCpuUsagePercentHistogram95thPercentilePercents", 0L);
    expectedValues.put("PCpuUsagePercentHistogram99thPercentilePercents", 0L);
    Set<String> testResults = new HashSet<>();
    int delay = 10;
    int rolloverDelay = 1000;
    MetricsCollectorImpl collector = new MetricsCollectorImpl();
    ContainerId containerId = mock(ContainerId.class);
    ContainerMetrics metrics = ContainerMetrics.forContainer(containerId, delay, 0);
    metrics.recordMemoryUsage(1024);
    metrics.recordMemoryUsage(2048);
    Thread.sleep(rolloverDelay + 10);
    metrics.getMetrics(collector, true);
    for (MetricsRecord record : collector.getRecords()) {
        for (AbstractMetric metric : record.metrics()) {
            String metricName = metric.name();
            if (expectedValues.containsKey(metricName)) {
                Long expectedValue = expectedValues.get(metricName);
                Assert.assertEquals("Metric " + metricName + " doesn't have expected value", expectedValue, metric.value());
                testResults.add(metricName);
            }
        }
    }
    Assert.assertEquals(expectedValues.keySet(), testResults);
}
Also used : HashMap(java.util.HashMap) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) MetricsRecord(org.apache.hadoop.metrics2.MetricsRecord) AbstractMetric(org.apache.hadoop.metrics2.AbstractMetric) MetricsCollectorImpl(org.apache.hadoop.metrics2.impl.MetricsCollectorImpl) HashSet(java.util.HashSet) Test(org.junit.Test)

Aggregations

MetricsCollectorImpl (org.apache.hadoop.metrics2.impl.MetricsCollectorImpl)8 Test (org.junit.Test)7 MetricsSource (org.apache.hadoop.metrics2.MetricsSource)4 MetricsRecord (org.apache.hadoop.metrics2.MetricsRecord)3 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)3 File (java.io.File)2 AbstractMetric (org.apache.hadoop.metrics2.AbstractMetric)2 ArrayList (java.util.ArrayList)1 HashMap (java.util.HashMap)1 HashSet (java.util.HashSet)1 MetricsTag (org.apache.hadoop.metrics2.MetricsTag)1 MetricsSourceBuilder (org.apache.hadoop.metrics2.lib.MetricsSourceBuilder)1 DiskErrorException (org.apache.hadoop.util.DiskChecker.DiskErrorException)1