Search in sources :

Example 21 with Metrics

use of org.apache.hadoop.metrics2.annotation.Metrics in project hadoop by apache.

the class TestMetricsAnnotations method testFields.

@Test
public void testFields() {
    MyMetrics metrics = new MyMetrics();
    MetricsSource source = MetricsAnnotations.makeSource(metrics);
    metrics.c1.incr();
    metrics.c2.incr();
    metrics.g1.incr();
    metrics.g2.incr();
    metrics.g3.incr();
    metrics.r1.add(1);
    metrics.s1.add(1);
    metrics.rs1.add("rs1", 1);
    MetricsRecordBuilder rb = getMetrics(source);
    verify(rb).addCounter(info("C1", "C1"), 1);
    verify(rb).addCounter(info("Counter2", "Counter2 desc"), 1L);
    verify(rb).addGauge(info("G1", "G1"), 1);
    verify(rb).addGauge(info("G2", "G2"), 1);
    verify(rb).addGauge(info("G3", "g3 desc"), 1L);
    verify(rb).addCounter(info("R1NumOps", "Number of ops for r1"), 1L);
    verify(rb).addGauge(info("R1AvgTime", "Average time for r1"), 1.0);
    verify(rb).addCounter(info("S1NumOps", "Number of ops for s1"), 1L);
    verify(rb).addGauge(info("S1AvgTime", "Average time for s1"), 1.0);
    verify(rb).addCounter(info("Rs1NumOps", "Number of ops for rs1"), 1L);
    verify(rb).addGauge(info("Rs1AvgTime", "Average time for rs1"), 1.0);
}
Also used : MetricsSource(org.apache.hadoop.metrics2.MetricsSource) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) Test(org.junit.Test)

Example 22 with Metrics

use of org.apache.hadoop.metrics2.annotation.Metrics in project hadoop by apache.

the class TestProtoBufRpc method testProtoBufRpc2.

@Test(timeout = 5000)
public void testProtoBufRpc2() throws Exception {
    TestRpcService2 client = getClient2();
    // Test ping method
    client.ping2(null, newEmptyRequest());
    // Test echo method
    EchoResponseProto echoResponse = client.echo2(null, newEchoRequest("hello"));
    Assert.assertEquals(echoResponse.getMessage(), "hello");
    // Ensure RPC metrics are updated
    MetricsRecordBuilder rpcMetrics = getMetrics(server.getRpcMetrics().name());
    assertCounterGt("RpcQueueTimeNumOps", 0L, rpcMetrics);
    assertCounterGt("RpcProcessingTimeNumOps", 0L, rpcMetrics);
    MetricsRecordBuilder rpcDetailedMetrics = getMetrics(server.getRpcDetailedMetrics().name());
    assertCounterGt("Echo2NumOps", 0L, rpcDetailedMetrics);
}
Also used : EchoResponseProto(org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) Test(org.junit.Test)

Example 23 with Metrics

use of org.apache.hadoop.metrics2.annotation.Metrics in project hadoop by apache.

the class TestRPC method testClientBackOffByResponseTime.

/**
   *  Test RPC backoff by response time of each priority level.
   */
@Test(timeout = 30000)
public void testClientBackOffByResponseTime() throws Exception {
    final TestRpcService proxy;
    boolean succeeded = false;
    final int numClients = 1;
    GenericTestUtils.setLogLevel(DecayRpcScheduler.LOG, Level.DEBUG);
    GenericTestUtils.setLogLevel(RPC.LOG, Level.DEBUG);
    final List<Future<Void>> res = new ArrayList<Future<Void>>();
    final ExecutorService executorService = Executors.newFixedThreadPool(numClients);
    conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
    final String ns = CommonConfigurationKeys.IPC_NAMESPACE + ".0";
    Server server = setupDecayRpcSchedulerandTestServer(ns + ".");
    @SuppressWarnings("unchecked") CallQueueManager<Call> spy = spy((CallQueueManager<Call>) Whitebox.getInternalState(server, "callQueue"));
    Whitebox.setInternalState(server, "callQueue", spy);
    Exception lastException = null;
    proxy = getClient(addr, conf);
    MetricsRecordBuilder rb1 = getMetrics("DecayRpcSchedulerMetrics2." + ns);
    final long beginDecayedCallVolume = MetricsAsserts.getLongCounter("DecayedCallVolume", rb1);
    final long beginRawCallVolume = MetricsAsserts.getLongCounter("CallVolume", rb1);
    final int beginUniqueCaller = MetricsAsserts.getIntCounter("UniqueCallers", rb1);
    try {
        // start a sleep RPC call that sleeps 3s.
        for (int i = 0; i < numClients; i++) {
            res.add(executorService.submit(new Callable<Void>() {

                @Override
                public Void call() throws ServiceException, InterruptedException {
                    proxy.sleep(null, newSleepRequest(3000));
                    return null;
                }
            }));
            verify(spy, timeout(500).times(i + 1)).offer(Mockito.<Call>anyObject());
        }
        // avg response time(3s) exceeds threshold (2s).
        try {
            // wait for the 1st response time update
            Thread.sleep(5500);
            proxy.sleep(null, newSleepRequest(100));
        } catch (ServiceException e) {
            RemoteException re = (RemoteException) e.getCause();
            IOException unwrapExeption = re.unwrapRemoteException();
            if (unwrapExeption instanceof RetriableException) {
                succeeded = true;
            } else {
                lastException = unwrapExeption;
            }
            // Lets Metric system update latest metrics
            GenericTestUtils.waitFor(new Supplier<Boolean>() {

                @Override
                public Boolean get() {
                    MetricsRecordBuilder rb2 = getMetrics("DecayRpcSchedulerMetrics2." + ns);
                    long decayedCallVolume1 = MetricsAsserts.getLongCounter("DecayedCallVolume", rb2);
                    long rawCallVolume1 = MetricsAsserts.getLongCounter("CallVolume", rb2);
                    int uniqueCaller1 = MetricsAsserts.getIntCounter("UniqueCallers", rb2);
                    long callVolumePriority0 = MetricsAsserts.getLongGauge("Priority.0.CompletedCallVolume", rb2);
                    long callVolumePriority1 = MetricsAsserts.getLongGauge("Priority.1.CompletedCallVolume", rb2);
                    double avgRespTimePriority0 = MetricsAsserts.getDoubleGauge("Priority.0.AvgResponseTime", rb2);
                    double avgRespTimePriority1 = MetricsAsserts.getDoubleGauge("Priority.1.AvgResponseTime", rb2);
                    LOG.info("DecayedCallVolume: " + decayedCallVolume1);
                    LOG.info("CallVolume: " + rawCallVolume1);
                    LOG.info("UniqueCaller: " + uniqueCaller1);
                    LOG.info("Priority.0.CompletedCallVolume: " + callVolumePriority0);
                    LOG.info("Priority.1.CompletedCallVolume: " + callVolumePriority1);
                    LOG.info("Priority.0.AvgResponseTime: " + avgRespTimePriority0);
                    LOG.info("Priority.1.AvgResponseTime: " + avgRespTimePriority1);
                    return decayedCallVolume1 > beginDecayedCallVolume && rawCallVolume1 > beginRawCallVolume && uniqueCaller1 > beginUniqueCaller;
                }
            }, 30, 60000);
        }
    } finally {
        executorService.shutdown();
        stop(server, proxy);
    }
    if (lastException != null) {
        LOG.error("Last received non-RetriableException:", lastException);
    }
    assertTrue("RetriableException not received", succeeded);
}
Also used : Call(org.apache.hadoop.ipc.Server.Call) ArrayList(java.util.ArrayList) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) ServiceException(com.google.protobuf.ServiceException) AuthorizationException(org.apache.hadoop.security.authorize.AuthorizationException) InterruptedIOException(java.io.InterruptedIOException) SocketTimeoutException(java.net.SocketTimeoutException) ConnectException(java.net.ConnectException) HadoopIllegalArgumentException(org.apache.hadoop.HadoopIllegalArgumentException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) AccessControlException(org.apache.hadoop.security.AccessControlException) Callable(java.util.concurrent.Callable) ServiceException(com.google.protobuf.ServiceException) ExecutorService(java.util.concurrent.ExecutorService) Future(java.util.concurrent.Future) Supplier(com.google.common.base.Supplier) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) Test(org.junit.Test)

Example 24 with Metrics

use of org.apache.hadoop.metrics2.annotation.Metrics in project hadoop by apache.

the class TestReadWriteDiskValidator method testCheckFailures.

@Test
public void testCheckFailures() throws Throwable {
    ReadWriteDiskValidator readWriteDiskValidator = (ReadWriteDiskValidator) DiskValidatorFactory.getInstance(ReadWriteDiskValidator.NAME);
    // create a temporary test directory under the system test directory
    File testDir = Files.createTempDirectory(Paths.get(System.getProperty("test.build.data")), "test").toFile();
    try {
        Shell.execCommand(Shell.getSetPermissionCommand("000", false, testDir.getAbsolutePath()));
    } catch (Exception e) {
        testDir.delete();
        throw e;
    }
    try {
        readWriteDiskValidator.checkStatus(testDir);
        fail("Disk check should fail.");
    } catch (DiskErrorException e) {
        assertTrue(e.getMessage().equals("Disk Check failed!"));
    }
    MetricsSource source = ms.getSource(ReadWriteDiskValidatorMetrics.sourceName(testDir.toString()));
    MetricsCollectorImpl collector = new MetricsCollectorImpl();
    source.getMetrics(collector, true);
    try {
        readWriteDiskValidator.checkStatus(testDir);
        fail("Disk check should fail.");
    } catch (DiskErrorException e) {
        assertTrue(e.getMessage().equals("Disk Check failed!"));
    }
    source.getMetrics(collector, true);
    // verify the first metrics record
    MetricsRecords.assertMetric(collector.getRecords().get(0), "FailureCount", 1);
    Long lastFailureTime1 = (Long) MetricsRecords.getMetricValueByName(collector.getRecords().get(0), "LastFailureTime");
    // verify the second metrics record
    MetricsRecords.assertMetric(collector.getRecords().get(1), "FailureCount", 2);
    Long lastFailureTime2 = (Long) MetricsRecords.getMetricValueByName(collector.getRecords().get(1), "LastFailureTime");
    assertTrue("The first failure time should be less than the second one", lastFailureTime1 < lastFailureTime2);
    testDir.delete();
}
Also used : MetricsSource(org.apache.hadoop.metrics2.MetricsSource) DiskErrorException(org.apache.hadoop.util.DiskChecker.DiskErrorException) File(java.io.File) MetricsCollectorImpl(org.apache.hadoop.metrics2.impl.MetricsCollectorImpl) DiskErrorException(org.apache.hadoop.util.DiskChecker.DiskErrorException) Test(org.junit.Test)

Example 25 with Metrics

use of org.apache.hadoop.metrics2.annotation.Metrics in project hadoop by apache.

the class TestReadWriteDiskValidator method testReadWriteDiskValidator.

@Test
public void testReadWriteDiskValidator() throws DiskErrorException, InterruptedException {
    int count = 100;
    File testDir = new File(System.getProperty("test.build.data"));
    ReadWriteDiskValidator readWriteDiskValidator = (ReadWriteDiskValidator) DiskValidatorFactory.getInstance(ReadWriteDiskValidator.NAME);
    for (int i = 0; i < count; i++) {
        readWriteDiskValidator.checkStatus(testDir);
    }
    ReadWriteDiskValidatorMetrics metric = ReadWriteDiskValidatorMetrics.getMetric(testDir.toString());
    Assert.assertEquals("The count number of estimator in MutableQuantiles" + "metrics of file read is not right", metric.getFileReadQuantiles()[0].getEstimator().getCount(), count);
    Assert.assertEquals("The count number of estimator in MutableQuantiles" + "metrics of file write is not right", metric.getFileWriteQuantiles()[0].getEstimator().getCount(), count);
    MetricsSource source = ms.getSource(ReadWriteDiskValidatorMetrics.sourceName(testDir.toString()));
    MetricsCollectorImpl collector = new MetricsCollectorImpl();
    source.getMetrics(collector, true);
    MetricsRecords.assertMetric(collector.getRecords().get(0), "FailureCount", 0);
    MetricsRecords.assertMetric(collector.getRecords().get(0), "LastFailureTime", (long) 0);
    // All MutableQuantiles haven't rolled over yet because the minimum
    // interval is 1 hours, so we just test if these metrics exist.
    MetricsRecords.assertMetricNotNull(collector.getRecords().get(0), "WriteLatency3600sNumOps");
    MetricsRecords.assertMetricNotNull(collector.getRecords().get(0), "WriteLatency3600s50thPercentileLatencyMicros");
    MetricsRecords.assertMetricNotNull(collector.getRecords().get(0), "WriteLatency86400sNumOps");
    MetricsRecords.assertMetricNotNull(collector.getRecords().get(0), "WriteLatency864000sNumOps");
    MetricsRecords.assertMetricNotNull(collector.getRecords().get(0), "ReadLatency3600sNumOps");
    MetricsRecords.assertMetricNotNull(collector.getRecords().get(0), "ReadLatency3600s50thPercentileLatencyMicros");
    MetricsRecords.assertMetricNotNull(collector.getRecords().get(0), "ReadLatency86400sNumOps");
    MetricsRecords.assertMetricNotNull(collector.getRecords().get(0), "ReadLatency864000sNumOps");
}
Also used : MetricsSource(org.apache.hadoop.metrics2.MetricsSource) File(java.io.File) MetricsCollectorImpl(org.apache.hadoop.metrics2.impl.MetricsCollectorImpl) Test(org.junit.Test)

Aggregations

Test (org.junit.Test)67 MetricsRecordBuilder (org.apache.hadoop.metrics2.MetricsRecordBuilder)30 MetricsRecord (org.apache.hadoop.metrics2.MetricsRecord)20 AbstractMetric (org.apache.hadoop.metrics2.AbstractMetric)19 MetricsSystem (org.apache.hadoop.metrics2.MetricsSystem)19 MetricsTag (org.apache.hadoop.metrics2.MetricsTag)18 MetricsSource (org.apache.hadoop.metrics2.MetricsSource)16 ArrayList (java.util.ArrayList)11 IOException (java.io.IOException)10 HashSet (java.util.HashSet)8 Path (org.apache.hadoop.fs.Path)8 MetricsException (org.apache.hadoop.metrics2.MetricsException)8 MetricsCollectorImpl (org.apache.hadoop.metrics2.impl.MetricsCollectorImpl)7 DefaultMetricsSystem (org.apache.hadoop.metrics2.lib.DefaultMetricsSystem)7 Configuration (org.apache.hadoop.conf.Configuration)5 Map (java.util.Map)4 FileSystem (org.apache.hadoop.fs.FileSystem)4 MetricsSink (org.apache.hadoop.metrics2.MetricsSink)4 MetricsSystemImpl (org.apache.hadoop.metrics2.impl.MetricsSystemImpl)4 GraphiteSink (org.apache.hadoop.metrics2.sink.GraphiteSink)4