Search in sources :

Example 1 with DataNodeVolumeMetrics

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.DataNodeVolumeMetrics in project hadoop by apache.

the class ProfilingFileIoEvents method afterFileIo.

public void afterFileIo(@Nullable FsVolumeSpi volume, FileIoProvider.OPERATION op, long begin, long len) {
    if (isEnabled && begin != 0) {
        DataNodeVolumeMetrics metrics = getVolumeMetrics(volume);
        if (metrics != null) {
            long latency = Time.monotonicNow() - begin;
            metrics.addDataFileIoLatency(latency);
            switch(op) {
                case SYNC:
                    metrics.addSyncIoLatency(latency);
                    break;
                case FLUSH:
                    metrics.addFlushIoLatency(latency);
                    break;
                case READ:
                    metrics.addReadIoLatency(latency);
                    break;
                case WRITE:
                    metrics.addWriteIoLatency(latency);
                    break;
                default:
            }
        }
    }
}
Also used : DataNodeVolumeMetrics(org.apache.hadoop.hdfs.server.datanode.fsdataset.DataNodeVolumeMetrics)

Example 2 with DataNodeVolumeMetrics

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.DataNodeVolumeMetrics in project hadoop by apache.

the class DataNodeDiskMetrics method startDiskOutlierDetectionThread.

private void startDiskOutlierDetectionThread() {
    slowDiskDetectionDaemon = new Daemon(new Runnable() {

        @Override
        public void run() {
            while (shouldRun) {
                Map<String, Double> metadataOpStats = Maps.newHashMap();
                Map<String, Double> readIoStats = Maps.newHashMap();
                Map<String, Double> writeIoStats = Maps.newHashMap();
                FsDatasetSpi.FsVolumeReferences fsVolumeReferences = null;
                try {
                    fsVolumeReferences = dn.getFSDataset().getFsVolumeReferences();
                    Iterator<FsVolumeSpi> volumeIterator = fsVolumeReferences.iterator();
                    while (volumeIterator.hasNext()) {
                        FsVolumeSpi volume = volumeIterator.next();
                        DataNodeVolumeMetrics metrics = volumeIterator.next().getMetrics();
                        String volumeName = volume.getBaseURI().getPath();
                        metadataOpStats.put(volumeName, metrics.getMetadataOperationMean());
                        readIoStats.put(volumeName, metrics.getReadIoMean());
                        writeIoStats.put(volumeName, metrics.getWriteIoMean());
                    }
                } finally {
                    if (fsVolumeReferences != null) {
                        try {
                            fsVolumeReferences.close();
                        } catch (IOException e) {
                            LOG.error("Error in releasing FS Volume references", e);
                        }
                    }
                }
                if (metadataOpStats.isEmpty() && readIoStats.isEmpty() && writeIoStats.isEmpty()) {
                    LOG.debug("No disk stats available for detecting outliers.");
                    return;
                }
                detectAndUpdateDiskOutliers(metadataOpStats, readIoStats, writeIoStats);
                try {
                    Thread.sleep(detectionInterval);
                } catch (InterruptedException e) {
                    LOG.error("Disk Outlier Detection thread interrupted", e);
                    Thread.currentThread().interrupt();
                }
            }
        }
    });
    slowDiskDetectionDaemon.start();
}
Also used : Daemon(org.apache.hadoop.util.Daemon) FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) FsVolumeSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi) DataNodeVolumeMetrics(org.apache.hadoop.hdfs.server.datanode.fsdataset.DataNodeVolumeMetrics) IOException(java.io.IOException)

Example 3 with DataNodeVolumeMetrics

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.DataNodeVolumeMetrics in project hadoop by apache.

the class TestDataNodeVolumeMetrics method verifyDataNodeVolumeMetrics.

private void verifyDataNodeVolumeMetrics(final FileSystem fs, final MiniDFSCluster cluster, final Path fileName) throws IOException {
    List<DataNode> datanodes = cluster.getDataNodes();
    DataNode datanode = datanodes.get(0);
    final ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
    final FsVolumeSpi volume = datanode.getFSDataset().getVolume(block);
    DataNodeVolumeMetrics metrics = volume.getMetrics();
    MetricsRecordBuilder rb = getMetrics(volume.getMetrics().name());
    assertCounter("TotalDataFileIos", metrics.getTotalDataFileIos(), rb);
    LOG.info("TotalMetadataOperations : " + metrics.getTotalMetadataOperations());
    LOG.info("TotalDataFileIos : " + metrics.getTotalDataFileIos());
    LOG.info("TotalFileIoErrors : " + metrics.getTotalFileIoErrors());
    LOG.info("MetadataOperationSampleCount : " + metrics.getMetadataOperationSampleCount());
    LOG.info("MetadataOperationMean : " + metrics.getMetadataOperationMean());
    LOG.info("MetadataFileIoStdDev : " + metrics.getMetadataOperationStdDev());
    LOG.info("DataFileIoSampleCount : " + metrics.getDataFileIoSampleCount());
    LOG.info("DataFileIoMean : " + metrics.getDataFileIoMean());
    LOG.info("DataFileIoStdDev : " + metrics.getDataFileIoStdDev());
    LOG.info("flushIoSampleCount : " + metrics.getFlushIoSampleCount());
    LOG.info("flushIoMean : " + metrics.getFlushIoMean());
    LOG.info("flushIoStdDev : " + metrics.getFlushIoStdDev());
    LOG.info("syncIoSampleCount : " + metrics.getSyncIoSampleCount());
    LOG.info("syncIoMean : " + metrics.getSyncIoMean());
    LOG.info("syncIoStdDev : " + metrics.getSyncIoStdDev());
    LOG.info("readIoSampleCount : " + metrics.getReadIoMean());
    LOG.info("readIoMean : " + metrics.getReadIoMean());
    LOG.info("readIoStdDev : " + metrics.getReadIoStdDev());
    LOG.info("writeIoSampleCount : " + metrics.getWriteIoSampleCount());
    LOG.info("writeIoMean : " + metrics.getWriteIoMean());
    LOG.info("writeIoStdDev : " + metrics.getWriteIoStdDev());
    LOG.info("fileIoErrorSampleCount : " + metrics.getFileIoErrorSampleCount());
    LOG.info("fileIoErrorMean : " + metrics.getFileIoErrorMean());
    LOG.info("fileIoErrorStdDev : " + metrics.getFileIoErrorStdDev());
}
Also used : ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) FsVolumeSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi) DataNodeVolumeMetrics(org.apache.hadoop.hdfs.server.datanode.fsdataset.DataNodeVolumeMetrics) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder)

Aggregations

DataNodeVolumeMetrics (org.apache.hadoop.hdfs.server.datanode.fsdataset.DataNodeVolumeMetrics)3 FsVolumeSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi)2 IOException (java.io.IOException)1 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)1 FsDatasetSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi)1 MetricsRecordBuilder (org.apache.hadoop.metrics2.MetricsRecordBuilder)1 Daemon (org.apache.hadoop.util.Daemon)1