Search in sources :

Example 16 with MetricsRecordBuilder

use of org.apache.hadoop.metrics2.MetricsRecordBuilder in project hadoop by apache.

the class TestDataNodeMetrics method testDatanodeActiveXceiversCount.

@Test
public void testDatanodeActiveXceiversCount() throws Exception {
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    try {
        FileSystem fs = cluster.getFileSystem();
        List<DataNode> datanodes = cluster.getDataNodes();
        assertEquals(datanodes.size(), 1);
        DataNode datanode = datanodes.get(0);
        MetricsRecordBuilder rb = getMetrics(datanode.getMetrics().name());
        long dataNodeActiveXceiversCount = MetricsAsserts.getIntGauge("DataNodeActiveXceiversCount", rb);
        assertEquals(dataNodeActiveXceiversCount, 0);
        Path path = new Path("/counter.txt");
        DFSTestUtil.createFile(fs, path, 204800000, (short) 3, Time.monotonicNow());
        MetricsRecordBuilder rbNew = getMetrics(datanode.getMetrics().name());
        dataNodeActiveXceiversCount = MetricsAsserts.getIntGauge("DataNodeActiveXceiversCount", rbNew);
        assertTrue(dataNodeActiveXceiversCount >= 0);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) Test(org.junit.Test)

Example 17 with MetricsRecordBuilder

use of org.apache.hadoop.metrics2.MetricsRecordBuilder in project hadoop by apache.

the class TestDataNodeMetrics method testDatanodeBlocksReplicatedMetric.

@Test
public void testDatanodeBlocksReplicatedMetric() throws Exception {
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    try {
        FileSystem fs = cluster.getFileSystem();
        List<DataNode> datanodes = cluster.getDataNodes();
        assertEquals(datanodes.size(), 1);
        DataNode datanode = datanodes.get(0);
        MetricsRecordBuilder rb = getMetrics(datanode.getMetrics().name());
        long blocksReplicated = getLongCounter("BlocksReplicated", rb);
        assertEquals("No blocks replicated yet", 0, blocksReplicated);
        Path path = new Path("/counter.txt");
        DFSTestUtil.createFile(fs, path, 1024, (short) 2, Time.monotonicNow());
        cluster.startDataNodes(conf, 1, true, StartupOption.REGULAR, null);
        ExtendedBlock firstBlock = DFSTestUtil.getFirstBlock(fs, path);
        DFSTestUtil.waitForReplication(cluster, firstBlock, 1, 2, 0);
        MetricsRecordBuilder rbNew = getMetrics(datanode.getMetrics().name());
        blocksReplicated = getLongCounter("BlocksReplicated", rbNew);
        assertEquals("blocks replicated counter incremented", 1, blocksReplicated);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) Test(org.junit.Test)

Example 18 with MetricsRecordBuilder

use of org.apache.hadoop.metrics2.MetricsRecordBuilder in project hadoop by apache.

the class TestDataNodeMetrics method testDataNodeMetrics.

@Test
public void testDataNodeMetrics() throws Exception {
    Configuration conf = new HdfsConfiguration();
    SimulatedFSDataset.setFactory(conf);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    try {
        FileSystem fs = cluster.getFileSystem();
        final long LONG_FILE_LEN = Integer.MAX_VALUE + 1L;
        DFSTestUtil.createFile(fs, new Path("/tmp.txt"), LONG_FILE_LEN, (short) 1, 1L);
        List<DataNode> datanodes = cluster.getDataNodes();
        assertEquals(datanodes.size(), 1);
        DataNode datanode = datanodes.get(0);
        MetricsRecordBuilder rb = getMetrics(datanode.getMetrics().name());
        assertCounter("BytesWritten", LONG_FILE_LEN, rb);
        assertTrue("Expected non-zero number of incremental block reports", getLongCounter("IncrementalBlockReportsNumOps", rb) > 0);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) Test(org.junit.Test)

Example 19 with MetricsRecordBuilder

use of org.apache.hadoop.metrics2.MetricsRecordBuilder in project hadoop by apache.

the class TestDataNodeErasureCodingMetrics method getLongMetric.

private long getLongMetric(String metricName) {
    long metricValue = 0;
    // Add all reconstruction metric value from all data nodes
    for (DataNode dn : cluster.getDataNodes()) {
        MetricsRecordBuilder rb = getMetrics(dn.getMetrics().name());
        metricValue += getLongCounter(metricName, rb);
    }
    return metricValue;
}
Also used : MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder)

Example 20 with MetricsRecordBuilder

use of org.apache.hadoop.metrics2.MetricsRecordBuilder in project hadoop by apache.

the class TestDataNodeErasureCodingMetrics method getLongMetricWithoutCheck.

private long getLongMetricWithoutCheck(String metricName) {
    long metricValue = 0;
    // Add all reconstruction metric value from all data nodes
    for (DataNode dn : cluster.getDataNodes()) {
        MetricsRecordBuilder rb = getMetrics(dn.getMetrics().name());
        metricValue += getLongCounterWithoutCheck(metricName, rb);
    }
    return metricValue;
}
Also used : MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder)

Aggregations

MetricsRecordBuilder (org.apache.hadoop.metrics2.MetricsRecordBuilder)99 Test (org.junit.Test)47 Path (org.apache.hadoop.fs.Path)20 Configuration (org.apache.hadoop.conf.Configuration)14 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)12 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)11 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)11 FileSystem (org.apache.hadoop.fs.FileSystem)8 MetricsInfo (org.apache.hadoop.metrics2.MetricsInfo)7 IOException (java.io.IOException)6 MetricsCollector (org.apache.hadoop.metrics2.MetricsCollector)6 MetricsSource (org.apache.hadoop.metrics2.MetricsSource)5 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)4 Quantile (org.apache.hadoop.metrics2.util.Quantile)4 ServiceException (com.google.protobuf.ServiceException)3 InterruptedIOException (java.io.InterruptedIOException)2 GarbageCollectorMXBean (java.lang.management.GarbageCollectorMXBean)2 Map (java.util.Map)2 CacheDirectiveInfo (org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo)2 CachePoolInfo (org.apache.hadoop.hdfs.protocol.CachePoolInfo)2