Search in sources :

Example 11 with MetricsRecordBuilder

use of org.apache.hadoop.metrics2.MetricsRecordBuilder in project hadoop by apache.

the class TestNameNodeMetrics method tearDown.

@After
public void tearDown() throws Exception {
    MetricsSource source = DefaultMetricsSystem.instance().getSource("UgiMetrics");
    if (source != null) {
        // Run only once since the UGI metrics is cleaned up during teardown
        MetricsRecordBuilder rb = getMetrics(source);
        assertQuantileGauges("GetGroups1s", rb);
    }
    if (cluster != null) {
        cluster.shutdown();
        cluster = null;
    }
}
Also used : MetricsSource(org.apache.hadoop.metrics2.MetricsSource) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) After(org.junit.After)

Example 12 with MetricsRecordBuilder

use of org.apache.hadoop.metrics2.MetricsRecordBuilder in project hadoop by apache.

the class TestNameNodeMetrics method testGenerateEDEKTime.

@Test
public void testGenerateEDEKTime() throws IOException, NoSuchAlgorithmException {
    //Create new MiniDFSCluster with EncryptionZone configurations
    Configuration conf = new HdfsConfiguration();
    FileSystemTestHelper fsHelper = new FileSystemTestHelper();
    // Set up java key store
    String testRoot = fsHelper.getTestRootDir();
    File testRootDir = new File(testRoot).getAbsoluteFile();
    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH, JavaKeyStoreProvider.SCHEME_NAME + "://file" + new Path(testRootDir.toString(), "test.jks").toUri());
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES, 2);
    try (MiniDFSCluster clusterEDEK = new MiniDFSCluster.Builder(conf).numDataNodes(1).build()) {
        DistributedFileSystem fsEDEK = clusterEDEK.getFileSystem();
        FileSystemTestWrapper fsWrapper = new FileSystemTestWrapper(fsEDEK);
        HdfsAdmin dfsAdmin = new HdfsAdmin(clusterEDEK.getURI(), conf);
        fsEDEK.getClient().setKeyProvider(clusterEDEK.getNameNode().getNamesystem().getProvider());
        String testKey = "test_key";
        DFSTestUtil.createKey(testKey, clusterEDEK, conf);
        final Path zoneParent = new Path("/zones");
        final Path zone1 = new Path(zoneParent, "zone1");
        fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true);
        dfsAdmin.createEncryptionZone(zone1, "test_key", EnumSet.of(CreateEncryptionZoneFlag.NO_TRASH));
        MetricsRecordBuilder rb = getMetrics(NN_METRICS);
        for (int i = 0; i < 3; i++) {
            Path filePath = new Path("/zones/zone1/testfile-" + i);
            DFSTestUtil.createFile(fsEDEK, filePath, 1024, (short) 3, 1L);
            assertQuantileGauges("GenerateEDEKTime1s", rb);
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) FileSystemTestHelper(org.apache.hadoop.fs.FileSystemTestHelper) HdfsAdmin(org.apache.hadoop.hdfs.client.HdfsAdmin) FileSystemTestWrapper(org.apache.hadoop.fs.FileSystemTestWrapper) File(java.io.File) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) Test(org.junit.Test)

Example 13 with MetricsRecordBuilder

use of org.apache.hadoop.metrics2.MetricsRecordBuilder in project hadoop by apache.

the class TestDataNodeVolumeMetrics method verifyDataNodeVolumeMetrics.

private void verifyDataNodeVolumeMetrics(final FileSystem fs, final MiniDFSCluster cluster, final Path fileName) throws IOException {
    List<DataNode> datanodes = cluster.getDataNodes();
    DataNode datanode = datanodes.get(0);
    final ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
    final FsVolumeSpi volume = datanode.getFSDataset().getVolume(block);
    DataNodeVolumeMetrics metrics = volume.getMetrics();
    MetricsRecordBuilder rb = getMetrics(volume.getMetrics().name());
    assertCounter("TotalDataFileIos", metrics.getTotalDataFileIos(), rb);
    LOG.info("TotalMetadataOperations : " + metrics.getTotalMetadataOperations());
    LOG.info("TotalDataFileIos : " + metrics.getTotalDataFileIos());
    LOG.info("TotalFileIoErrors : " + metrics.getTotalFileIoErrors());
    LOG.info("MetadataOperationSampleCount : " + metrics.getMetadataOperationSampleCount());
    LOG.info("MetadataOperationMean : " + metrics.getMetadataOperationMean());
    LOG.info("MetadataFileIoStdDev : " + metrics.getMetadataOperationStdDev());
    LOG.info("DataFileIoSampleCount : " + metrics.getDataFileIoSampleCount());
    LOG.info("DataFileIoMean : " + metrics.getDataFileIoMean());
    LOG.info("DataFileIoStdDev : " + metrics.getDataFileIoStdDev());
    LOG.info("flushIoSampleCount : " + metrics.getFlushIoSampleCount());
    LOG.info("flushIoMean : " + metrics.getFlushIoMean());
    LOG.info("flushIoStdDev : " + metrics.getFlushIoStdDev());
    LOG.info("syncIoSampleCount : " + metrics.getSyncIoSampleCount());
    LOG.info("syncIoMean : " + metrics.getSyncIoMean());
    LOG.info("syncIoStdDev : " + metrics.getSyncIoStdDev());
    LOG.info("readIoSampleCount : " + metrics.getReadIoMean());
    LOG.info("readIoMean : " + metrics.getReadIoMean());
    LOG.info("readIoStdDev : " + metrics.getReadIoStdDev());
    LOG.info("writeIoSampleCount : " + metrics.getWriteIoSampleCount());
    LOG.info("writeIoMean : " + metrics.getWriteIoMean());
    LOG.info("writeIoStdDev : " + metrics.getWriteIoStdDev());
    LOG.info("fileIoErrorSampleCount : " + metrics.getFileIoErrorSampleCount());
    LOG.info("fileIoErrorMean : " + metrics.getFileIoErrorMean());
    LOG.info("fileIoErrorStdDev : " + metrics.getFileIoErrorStdDev());
}
Also used : ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) FsVolumeSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi) DataNodeVolumeMetrics(org.apache.hadoop.hdfs.server.datanode.fsdataset.DataNodeVolumeMetrics) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder)

Example 14 with MetricsRecordBuilder

use of org.apache.hadoop.metrics2.MetricsRecordBuilder in project hadoop by apache.

the class TestDataNodeMetrics method testRoundTripAckMetric.

/**
   * Tests that round-trip acks in a datanode write pipeline are correctly 
   * measured. 
   */
@Test
public void testRoundTripAckMetric() throws Exception {
    final int datanodeCount = 2;
    final int interval = 1;
    Configuration conf = new HdfsConfiguration();
    conf.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY, "" + interval);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(datanodeCount).build();
    try {
        cluster.waitActive();
        FileSystem fs = cluster.getFileSystem();
        // Open a file and get the head of the pipeline
        Path testFile = new Path("/testRoundTripAckMetric.txt");
        FSDataOutputStream fsout = fs.create(testFile, (short) datanodeCount);
        DFSOutputStream dout = (DFSOutputStream) fsout.getWrappedStream();
        // Slow down the writes to catch the write pipeline
        dout.setChunksPerPacket(5);
        dout.setArtificialSlowdown(3000);
        fsout.write(new byte[10000]);
        DatanodeInfo[] pipeline = null;
        int count = 0;
        while (pipeline == null && count < 5) {
            pipeline = dout.getPipeline();
            System.out.println("Waiting for pipeline to be created.");
            Thread.sleep(1000);
            count++;
        }
        // Get the head node that should be receiving downstream acks
        DatanodeInfo headInfo = pipeline[0];
        DataNode headNode = null;
        for (DataNode datanode : cluster.getDataNodes()) {
            if (datanode.getDatanodeId().equals(headInfo)) {
                headNode = datanode;
                break;
            }
        }
        assertNotNull("Could not find the head of the datanode write pipeline", headNode);
        // Close the file and wait for the metrics to rollover
        Thread.sleep((interval + 1) * 1000);
        // Check the ack was received
        MetricsRecordBuilder dnMetrics = getMetrics(headNode.getMetrics().name());
        assertTrue("Expected non-zero number of acks", getLongCounter("PacketAckRoundTripTimeNanosNumOps", dnMetrics) > 0);
        assertQuantileGauges("PacketAckRoundTripTimeNanos" + interval + "s", dnMetrics);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DFSOutputStream(org.apache.hadoop.hdfs.DFSOutputStream) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) Test(org.junit.Test)

Example 15 with MetricsRecordBuilder

use of org.apache.hadoop.metrics2.MetricsRecordBuilder in project hadoop by apache.

the class TestDataNodeMetrics method testDataNodeTimeSpend.

/**
   * This function ensures that writing causes TotalWritetime to increment
   * and reading causes totalReadTime to move.
   * @throws Exception
   */
@Test(timeout = 120000)
public void testDataNodeTimeSpend() throws Exception {
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    try {
        final FileSystem fs = cluster.getFileSystem();
        List<DataNode> datanodes = cluster.getDataNodes();
        assertEquals(datanodes.size(), 1);
        final DataNode datanode = datanodes.get(0);
        MetricsRecordBuilder rb = getMetrics(datanode.getMetrics().name());
        final long LONG_FILE_LEN = 1024 * 1024 * 10;
        final long startWriteValue = getLongCounter("TotalWriteTime", rb);
        final long startReadValue = getLongCounter("TotalReadTime", rb);
        final AtomicInteger x = new AtomicInteger(0);
        // Lets Metric system update latest metrics
        GenericTestUtils.waitFor(new Supplier<Boolean>() {

            @Override
            public Boolean get() {
                x.getAndIncrement();
                try {
                    DFSTestUtil.createFile(fs, new Path("/time.txt." + x.get()), LONG_FILE_LEN, (short) 1, Time.monotonicNow());
                    DFSTestUtil.readFile(fs, new Path("/time.txt." + x.get()));
                    fs.delete(new Path("/time.txt." + x.get()), true);
                } catch (IOException ioe) {
                    LOG.error("Caught IOException while ingesting DN metrics", ioe);
                    return false;
                }
                MetricsRecordBuilder rbNew = getMetrics(datanode.getMetrics().name());
                final long endWriteValue = getLongCounter("TotalWriteTime", rbNew);
                final long endReadValue = getLongCounter("TotalReadTime", rbNew);
                return endWriteValue > startWriteValue && endReadValue > startReadValue;
            }
        }, 30, 60000);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) IOException(java.io.IOException) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) Test(org.junit.Test)

Aggregations

MetricsRecordBuilder (org.apache.hadoop.metrics2.MetricsRecordBuilder)99 Test (org.junit.Test)47 Path (org.apache.hadoop.fs.Path)20 Configuration (org.apache.hadoop.conf.Configuration)14 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)12 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)11 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)11 FileSystem (org.apache.hadoop.fs.FileSystem)8 MetricsInfo (org.apache.hadoop.metrics2.MetricsInfo)7 IOException (java.io.IOException)6 MetricsCollector (org.apache.hadoop.metrics2.MetricsCollector)6 MetricsSource (org.apache.hadoop.metrics2.MetricsSource)5 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)4 Quantile (org.apache.hadoop.metrics2.util.Quantile)4 ServiceException (com.google.protobuf.ServiceException)3 InterruptedIOException (java.io.InterruptedIOException)2 GarbageCollectorMXBean (java.lang.management.GarbageCollectorMXBean)2 Map (java.util.Map)2 CacheDirectiveInfo (org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo)2 CachePoolInfo (org.apache.hadoop.hdfs.protocol.CachePoolInfo)2