Search in sources :

Example 91 with MetricsRecordBuilder

use of org.apache.hadoop.metrics2.MetricsRecordBuilder in project hadoop by apache.

the class TestDataNodeMetrics method testTimeoutMetric.

@Test(timeout = 60000)
public void testTimeoutMetric() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    final Path path = new Path("/test");
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    final List<FSDataOutputStream> streams = Lists.newArrayList();
    try {
        final FSDataOutputStream out = cluster.getFileSystem().create(path, (short) 2);
        final DataNodeFaultInjector injector = Mockito.mock(DataNodeFaultInjector.class);
        Mockito.doThrow(new IOException("mock IOException")).when(injector).writeBlockAfterFlush();
        DataNodeFaultInjector.instance = injector;
        streams.add(out);
        out.writeBytes("old gs data\n");
        out.hflush();
        /* Test the metric. */
        final MetricsRecordBuilder dnMetrics = getMetrics(cluster.getDataNodes().get(0).getMetrics().name());
        assertCounter("DatanodeNetworkErrors", 1L, dnMetrics);
        /* Test JMX datanode network counts. */
        final MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
        final ObjectName mxbeanName = new ObjectName("Hadoop:service=DataNode,name=DataNodeInfo");
        final Object dnc = mbs.getAttribute(mxbeanName, "DatanodeNetworkCounts");
        final String allDnc = dnc.toString();
        assertTrue("expected to see loopback address", allDnc.indexOf("127.0.0.1") >= 0);
        assertTrue("expected to see networkErrors", allDnc.indexOf("networkErrors") >= 0);
    } finally {
        IOUtils.cleanup(LOG, streams.toArray(new Closeable[0]));
        if (cluster != null) {
            cluster.shutdown();
        }
        DataNodeFaultInjector.instance = new DataNodeFaultInjector();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) Closeable(java.io.Closeable) IOException(java.io.IOException) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) ObjectName(javax.management.ObjectName) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) MBeanServer(javax.management.MBeanServer) Test(org.junit.Test)

Example 92 with MetricsRecordBuilder

use of org.apache.hadoop.metrics2.MetricsRecordBuilder in project hadoop by apache.

the class TestDataNodeMetrics method testSendDataPacketMetrics.

@Test
public void testSendDataPacketMetrics() throws Exception {
    Configuration conf = new HdfsConfiguration();
    final int interval = 1;
    conf.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY, "" + interval);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    try {
        FileSystem fs = cluster.getFileSystem();
        // Create and read a 1 byte file
        Path tmpfile = new Path("/tmp.txt");
        DFSTestUtil.createFile(fs, tmpfile, (long) 1, (short) 1, 1L);
        DFSTestUtil.readFile(fs, tmpfile);
        List<DataNode> datanodes = cluster.getDataNodes();
        assertEquals(datanodes.size(), 1);
        DataNode datanode = datanodes.get(0);
        MetricsRecordBuilder rb = getMetrics(datanode.getMetrics().name());
        // Expect 2 packets, 1 for the 1 byte read, 1 for the empty packet
        // signaling the end of the block
        assertCounter("SendDataPacketTransferNanosNumOps", (long) 2, rb);
        assertCounter("SendDataPacketBlockedOnNetworkNanosNumOps", (long) 2, rb);
        // Wait for at least 1 rollover
        Thread.sleep((interval + 1) * 1000);
        // Check that the sendPacket percentiles rolled to non-zero values
        String sec = interval + "s";
        assertQuantileGauges("SendDataPacketBlockedOnNetworkNanos" + sec, rb);
        assertQuantileGauges("SendDataPacketTransferNanos" + sec, rb);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) Test(org.junit.Test)

Example 93 with MetricsRecordBuilder

use of org.apache.hadoop.metrics2.MetricsRecordBuilder in project hadoop by apache.

the class TestDataNodeMetrics method testReceivePacketMetrics.

@Test
public void testReceivePacketMetrics() throws Exception {
    Configuration conf = new HdfsConfiguration();
    final int interval = 1;
    conf.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY, "" + interval);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    try {
        cluster.waitActive();
        DistributedFileSystem fs = cluster.getFileSystem();
        Path testFile = new Path("/testFlushNanosMetric.txt");
        FSDataOutputStream fout = fs.create(testFile);
        fout.write(new byte[1]);
        fout.hsync();
        fout.close();
        List<DataNode> datanodes = cluster.getDataNodes();
        DataNode datanode = datanodes.get(0);
        MetricsRecordBuilder dnMetrics = getMetrics(datanode.getMetrics().name());
        // Expect two flushes, 1 for the flush that occurs after writing, 
        // 1 that occurs on closing the data and metadata files.
        assertCounter("FlushNanosNumOps", 2L, dnMetrics);
        // Expect two syncs, one from the hsync, one on close.
        assertCounter("FsyncNanosNumOps", 2L, dnMetrics);
        // Wait for at least 1 rollover
        Thread.sleep((interval + 1) * 1000);
        // Check the receivePacket percentiles that should be non-zero
        String sec = interval + "s";
        assertQuantileGauges("FlushNanos" + sec, dnMetrics);
        assertQuantileGauges("FsyncNanos" + sec, dnMetrics);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) Test(org.junit.Test)

Example 94 with MetricsRecordBuilder

use of org.apache.hadoop.metrics2.MetricsRecordBuilder in project accumulo by apache.

the class Metrics2ThriftMetrics method getMetrics.

@Override
public void getMetrics(MetricsCollector collector, boolean all) {
    MetricsRecordBuilder builder = collector.addRecord(record).setContext(CONTEXT);
    registry.snapshot(builder, all);
}
Also used : MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder)

Example 95 with MetricsRecordBuilder

use of org.apache.hadoop.metrics2.MetricsRecordBuilder in project accumulo by apache.

the class Metrics2ReplicationMetrics method getMetrics.

@Override
public void getMetrics(MetricsCollector collector, boolean all) {
    MetricsRecordBuilder builder = collector.addRecord(RECORD).setContext(CONTEXT);
    snapshot();
    registry.snapshot(builder, all);
    replicationQueueTimeQuantiles.snapshot(builder, all);
    replicationQueueTimeStat.snapshot(builder, all);
}
Also used : MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder)

Aggregations

MetricsRecordBuilder (org.apache.hadoop.metrics2.MetricsRecordBuilder)99 Test (org.junit.Test)47 Path (org.apache.hadoop.fs.Path)20 Configuration (org.apache.hadoop.conf.Configuration)14 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)12 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)11 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)11 FileSystem (org.apache.hadoop.fs.FileSystem)8 MetricsInfo (org.apache.hadoop.metrics2.MetricsInfo)7 IOException (java.io.IOException)6 MetricsCollector (org.apache.hadoop.metrics2.MetricsCollector)6 MetricsSource (org.apache.hadoop.metrics2.MetricsSource)5 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)4 Quantile (org.apache.hadoop.metrics2.util.Quantile)4 ServiceException (com.google.protobuf.ServiceException)3 InterruptedIOException (java.io.InterruptedIOException)2 GarbageCollectorMXBean (java.lang.management.GarbageCollectorMXBean)2 Map (java.util.Map)2 CacheDirectiveInfo (org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo)2 CachePoolInfo (org.apache.hadoop.hdfs.protocol.CachePoolInfo)2