Search in sources :

Example 41 with Metrics

use of org.apache.hadoop.metrics2.annotation.Metrics in project hadoop by apache.

the class TestDataNodeVolumeMetrics method verifyDataNodeVolumeMetrics.

private void verifyDataNodeVolumeMetrics(final FileSystem fs, final MiniDFSCluster cluster, final Path fileName) throws IOException {
    List<DataNode> datanodes = cluster.getDataNodes();
    DataNode datanode = datanodes.get(0);
    final ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
    final FsVolumeSpi volume = datanode.getFSDataset().getVolume(block);
    DataNodeVolumeMetrics metrics = volume.getMetrics();
    MetricsRecordBuilder rb = getMetrics(volume.getMetrics().name());
    assertCounter("TotalDataFileIos", metrics.getTotalDataFileIos(), rb);
    LOG.info("TotalMetadataOperations : " + metrics.getTotalMetadataOperations());
    LOG.info("TotalDataFileIos : " + metrics.getTotalDataFileIos());
    LOG.info("TotalFileIoErrors : " + metrics.getTotalFileIoErrors());
    LOG.info("MetadataOperationSampleCount : " + metrics.getMetadataOperationSampleCount());
    LOG.info("MetadataOperationMean : " + metrics.getMetadataOperationMean());
    LOG.info("MetadataFileIoStdDev : " + metrics.getMetadataOperationStdDev());
    LOG.info("DataFileIoSampleCount : " + metrics.getDataFileIoSampleCount());
    LOG.info("DataFileIoMean : " + metrics.getDataFileIoMean());
    LOG.info("DataFileIoStdDev : " + metrics.getDataFileIoStdDev());
    LOG.info("flushIoSampleCount : " + metrics.getFlushIoSampleCount());
    LOG.info("flushIoMean : " + metrics.getFlushIoMean());
    LOG.info("flushIoStdDev : " + metrics.getFlushIoStdDev());
    LOG.info("syncIoSampleCount : " + metrics.getSyncIoSampleCount());
    LOG.info("syncIoMean : " + metrics.getSyncIoMean());
    LOG.info("syncIoStdDev : " + metrics.getSyncIoStdDev());
    LOG.info("readIoSampleCount : " + metrics.getReadIoMean());
    LOG.info("readIoMean : " + metrics.getReadIoMean());
    LOG.info("readIoStdDev : " + metrics.getReadIoStdDev());
    LOG.info("writeIoSampleCount : " + metrics.getWriteIoSampleCount());
    LOG.info("writeIoMean : " + metrics.getWriteIoMean());
    LOG.info("writeIoStdDev : " + metrics.getWriteIoStdDev());
    LOG.info("fileIoErrorSampleCount : " + metrics.getFileIoErrorSampleCount());
    LOG.info("fileIoErrorMean : " + metrics.getFileIoErrorMean());
    LOG.info("fileIoErrorStdDev : " + metrics.getFileIoErrorStdDev());
}
Also used : ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) FsVolumeSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi) DataNodeVolumeMetrics(org.apache.hadoop.hdfs.server.datanode.fsdataset.DataNodeVolumeMetrics) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder)

Example 42 with Metrics

use of org.apache.hadoop.metrics2.annotation.Metrics in project hadoop by apache.

the class TestDataNodeMetrics method testRoundTripAckMetric.

/**
   * Tests that round-trip acks in a datanode write pipeline are correctly 
   * measured. 
   */
@Test
public void testRoundTripAckMetric() throws Exception {
    final int datanodeCount = 2;
    final int interval = 1;
    Configuration conf = new HdfsConfiguration();
    conf.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY, "" + interval);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(datanodeCount).build();
    try {
        cluster.waitActive();
        FileSystem fs = cluster.getFileSystem();
        // Open a file and get the head of the pipeline
        Path testFile = new Path("/testRoundTripAckMetric.txt");
        FSDataOutputStream fsout = fs.create(testFile, (short) datanodeCount);
        DFSOutputStream dout = (DFSOutputStream) fsout.getWrappedStream();
        // Slow down the writes to catch the write pipeline
        dout.setChunksPerPacket(5);
        dout.setArtificialSlowdown(3000);
        fsout.write(new byte[10000]);
        DatanodeInfo[] pipeline = null;
        int count = 0;
        while (pipeline == null && count < 5) {
            pipeline = dout.getPipeline();
            System.out.println("Waiting for pipeline to be created.");
            Thread.sleep(1000);
            count++;
        }
        // Get the head node that should be receiving downstream acks
        DatanodeInfo headInfo = pipeline[0];
        DataNode headNode = null;
        for (DataNode datanode : cluster.getDataNodes()) {
            if (datanode.getDatanodeId().equals(headInfo)) {
                headNode = datanode;
                break;
            }
        }
        assertNotNull("Could not find the head of the datanode write pipeline", headNode);
        // Close the file and wait for the metrics to rollover
        Thread.sleep((interval + 1) * 1000);
        // Check the ack was received
        MetricsRecordBuilder dnMetrics = getMetrics(headNode.getMetrics().name());
        assertTrue("Expected non-zero number of acks", getLongCounter("PacketAckRoundTripTimeNanosNumOps", dnMetrics) > 0);
        assertQuantileGauges("PacketAckRoundTripTimeNanos" + interval + "s", dnMetrics);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DFSOutputStream(org.apache.hadoop.hdfs.DFSOutputStream) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) Test(org.junit.Test)

Example 43 with Metrics

use of org.apache.hadoop.metrics2.annotation.Metrics in project hadoop by apache.

the class TestDataNodeMetrics method testDataNodeTimeSpend.

/**
   * This function ensures that writing causes TotalWritetime to increment
   * and reading causes totalReadTime to move.
   * @throws Exception
   */
@Test(timeout = 120000)
public void testDataNodeTimeSpend() throws Exception {
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    try {
        final FileSystem fs = cluster.getFileSystem();
        List<DataNode> datanodes = cluster.getDataNodes();
        assertEquals(datanodes.size(), 1);
        final DataNode datanode = datanodes.get(0);
        MetricsRecordBuilder rb = getMetrics(datanode.getMetrics().name());
        final long LONG_FILE_LEN = 1024 * 1024 * 10;
        final long startWriteValue = getLongCounter("TotalWriteTime", rb);
        final long startReadValue = getLongCounter("TotalReadTime", rb);
        final AtomicInteger x = new AtomicInteger(0);
        // Lets Metric system update latest metrics
        GenericTestUtils.waitFor(new Supplier<Boolean>() {

            @Override
            public Boolean get() {
                x.getAndIncrement();
                try {
                    DFSTestUtil.createFile(fs, new Path("/time.txt." + x.get()), LONG_FILE_LEN, (short) 1, Time.monotonicNow());
                    DFSTestUtil.readFile(fs, new Path("/time.txt." + x.get()));
                    fs.delete(new Path("/time.txt." + x.get()), true);
                } catch (IOException ioe) {
                    LOG.error("Caught IOException while ingesting DN metrics", ioe);
                    return false;
                }
                MetricsRecordBuilder rbNew = getMetrics(datanode.getMetrics().name());
                final long endWriteValue = getLongCounter("TotalWriteTime", rbNew);
                final long endReadValue = getLongCounter("TotalReadTime", rbNew);
                return endWriteValue > startWriteValue && endReadValue > startReadValue;
            }
        }, 30, 60000);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) IOException(java.io.IOException) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) Test(org.junit.Test)

Example 44 with Metrics

use of org.apache.hadoop.metrics2.annotation.Metrics in project hadoop by apache.

the class TestShuffleHandler method testShuffleMetrics.

/**
   * Validate shuffle connection and input/output metrics.
   *
   * @throws Exception exception
   */
@Test(timeout = 10000)
public void testShuffleMetrics() throws Exception {
    MetricsSystem ms = new MetricsSystemImpl();
    ShuffleHandler sh = new ShuffleHandler(ms);
    ChannelFuture cf = make(stub(ChannelFuture.class).returning(true, false).from.isSuccess());
    sh.metrics.shuffleConnections.incr();
    sh.metrics.shuffleOutputBytes.incr(1 * MiB);
    sh.metrics.shuffleConnections.incr();
    sh.metrics.shuffleOutputBytes.incr(2 * MiB);
    checkShuffleMetrics(ms, 3 * MiB, 0, 0, 2);
    sh.metrics.operationComplete(cf);
    sh.metrics.operationComplete(cf);
    checkShuffleMetrics(ms, 3 * MiB, 1, 1, 0);
}
Also used : ChannelFuture(org.jboss.netty.channel.ChannelFuture) MetricsSystem(org.apache.hadoop.metrics2.MetricsSystem) MetricsSystemImpl(org.apache.hadoop.metrics2.impl.MetricsSystemImpl) Test(org.junit.Test)

Example 45 with Metrics

use of org.apache.hadoop.metrics2.annotation.Metrics in project hadoop by apache.

the class TestContainerMetrics method testContainerMetricsFlow.

@Test
public void testContainerMetricsFlow() throws InterruptedException {
    final String ERR = "Error in number of records";
    MetricsCollectorImpl collector = new MetricsCollectorImpl();
    ContainerId containerId = mock(ContainerId.class);
    ContainerMetrics metrics = ContainerMetrics.forContainer(containerId, 100, 1);
    metrics.recordMemoryUsage(1024);
    metrics.getMetrics(collector, true);
    assertEquals(ERR, 0, collector.getRecords().size());
    Thread.sleep(110);
    metrics.getMetrics(collector, true);
    assertEquals(ERR, 1, collector.getRecords().size());
    collector.clear();
    Thread.sleep(110);
    metrics.getMetrics(collector, true);
    assertEquals(ERR, 1, collector.getRecords().size());
    collector.clear();
    metrics.finished();
    metrics.getMetrics(collector, true);
    assertEquals(ERR, 1, collector.getRecords().size());
    collector.clear();
    metrics.getMetrics(collector, true);
    assertEquals(ERR, 1, collector.getRecords().size());
    collector.clear();
    Thread.sleep(110);
    metrics.getMetrics(collector, true);
    assertEquals(ERR, 1, collector.getRecords().size());
}
Also used : ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) MetricsCollectorImpl(org.apache.hadoop.metrics2.impl.MetricsCollectorImpl) Test(org.junit.Test)

Aggregations

Test (org.junit.Test)67 MetricsRecordBuilder (org.apache.hadoop.metrics2.MetricsRecordBuilder)30 MetricsRecord (org.apache.hadoop.metrics2.MetricsRecord)20 AbstractMetric (org.apache.hadoop.metrics2.AbstractMetric)19 MetricsSystem (org.apache.hadoop.metrics2.MetricsSystem)19 MetricsTag (org.apache.hadoop.metrics2.MetricsTag)18 MetricsSource (org.apache.hadoop.metrics2.MetricsSource)16 ArrayList (java.util.ArrayList)11 IOException (java.io.IOException)10 HashSet (java.util.HashSet)8 Path (org.apache.hadoop.fs.Path)8 MetricsException (org.apache.hadoop.metrics2.MetricsException)8 MetricsCollectorImpl (org.apache.hadoop.metrics2.impl.MetricsCollectorImpl)7 DefaultMetricsSystem (org.apache.hadoop.metrics2.lib.DefaultMetricsSystem)7 Configuration (org.apache.hadoop.conf.Configuration)5 Map (java.util.Map)4 FileSystem (org.apache.hadoop.fs.FileSystem)4 MetricsSink (org.apache.hadoop.metrics2.MetricsSink)4 MetricsSystemImpl (org.apache.hadoop.metrics2.impl.MetricsSystemImpl)4 GraphiteSink (org.apache.hadoop.metrics2.sink.GraphiteSink)4