Search in sources :

Example 6 with FsVolumeSpi

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi in project hadoop by apache.

the class DataNodeDiskMetrics method startDiskOutlierDetectionThread.

private void startDiskOutlierDetectionThread() {
    slowDiskDetectionDaemon = new Daemon(new Runnable() {

        @Override
        public void run() {
            while (shouldRun) {
                Map<String, Double> metadataOpStats = Maps.newHashMap();
                Map<String, Double> readIoStats = Maps.newHashMap();
                Map<String, Double> writeIoStats = Maps.newHashMap();
                FsDatasetSpi.FsVolumeReferences fsVolumeReferences = null;
                try {
                    fsVolumeReferences = dn.getFSDataset().getFsVolumeReferences();
                    Iterator<FsVolumeSpi> volumeIterator = fsVolumeReferences.iterator();
                    while (volumeIterator.hasNext()) {
                        FsVolumeSpi volume = volumeIterator.next();
                        DataNodeVolumeMetrics metrics = volumeIterator.next().getMetrics();
                        String volumeName = volume.getBaseURI().getPath();
                        metadataOpStats.put(volumeName, metrics.getMetadataOperationMean());
                        readIoStats.put(volumeName, metrics.getReadIoMean());
                        writeIoStats.put(volumeName, metrics.getWriteIoMean());
                    }
                } finally {
                    if (fsVolumeReferences != null) {
                        try {
                            fsVolumeReferences.close();
                        } catch (IOException e) {
                            LOG.error("Error in releasing FS Volume references", e);
                        }
                    }
                }
                if (metadataOpStats.isEmpty() && readIoStats.isEmpty() && writeIoStats.isEmpty()) {
                    LOG.debug("No disk stats available for detecting outliers.");
                    return;
                }
                detectAndUpdateDiskOutliers(metadataOpStats, readIoStats, writeIoStats);
                try {
                    Thread.sleep(detectionInterval);
                } catch (InterruptedException e) {
                    LOG.error("Disk Outlier Detection thread interrupted", e);
                    Thread.currentThread().interrupt();
                }
            }
        }
    });
    slowDiskDetectionDaemon.start();
}
Also used : Daemon(org.apache.hadoop.util.Daemon) FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) FsVolumeSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi) DataNodeVolumeMetrics(org.apache.hadoop.hdfs.server.datanode.fsdataset.DataNodeVolumeMetrics) IOException(java.io.IOException)

Example 7 with FsVolumeSpi

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi in project hadoop by apache.

the class TestWriteToReplica method createReplicas.

private void createReplicas(List<String> bpList, List<FsVolumeSpi> volumes, FsDatasetTestUtils testUtils) throws IOException {
    // Here we create all different type of replicas and add it
    // to volume map. 
    // Created all type of ReplicaInfo, each under Blkpool corresponding volume
    // This variable is used as both blockId and genStamp
    long id = 1;
    for (String bpId : bpList) {
        for (FsVolumeSpi volume : volumes) {
            ExtendedBlock eb = new ExtendedBlock(bpId, id, 1, id);
            testUtils.createFinalizedReplica(volume, eb);
            id++;
            eb = new ExtendedBlock(bpId, id, 1, id);
            testUtils.createRBW(volume, eb);
            id++;
            eb = new ExtendedBlock(bpId, id, 1, id);
            testUtils.createReplicaWaitingToBeRecovered(volume, eb);
            id++;
            eb = new ExtendedBlock(bpId, id, 1, id);
            testUtils.createReplicaInPipeline(volume, eb);
            id++;
        }
    }
}
Also used : FsVolumeSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock)

Example 8 with FsVolumeSpi

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi in project hadoop by apache.

the class TestWriteToReplica method testReplicaMapAfterDatanodeRestart.

/**
   * This is a test to check the replica map before and after the datanode 
   * quick restart (less than 5 minutes)
   * @throws Exception
   */
@Test
public void testReplicaMapAfterDatanodeRestart() throws Exception {
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2)).build();
    try {
        cluster.waitActive();
        NameNode nn1 = cluster.getNameNode(0);
        NameNode nn2 = cluster.getNameNode(1);
        assertNotNull("cannot create nn1", nn1);
        assertNotNull("cannot create nn2", nn2);
        // check number of volumes in fsdataset
        DataNode dn = cluster.getDataNodes().get(0);
        FsDatasetImpl dataSet = (FsDatasetImpl) DataNodeTestUtils.getFSDataset(dn);
        List<FsVolumeSpi> volumes = null;
        try (FsDatasetSpi.FsVolumeReferences referredVols = dataSet.getFsVolumeReferences()) {
            // number of volumes should be 2 - [data1, data2]
            assertEquals("number of volumes is wrong", 2, referredVols.size());
            volumes = new ArrayList<>(referredVols.size());
            for (FsVolumeSpi vol : referredVols) {
                volumes.add(vol);
            }
        }
        ArrayList<String> bpList = new ArrayList<>(Arrays.asList(cluster.getNamesystem(0).getBlockPoolId(), cluster.getNamesystem(1).getBlockPoolId()));
        Assert.assertTrue("Cluster should have 2 block pools", bpList.size() == 2);
        createReplicas(bpList, volumes, cluster.getFsDatasetTestUtils(dn));
        ReplicaMap oldReplicaMap = new ReplicaMap(new AutoCloseableLock());
        oldReplicaMap.addAll(dataSet.volumeMap);
        cluster.restartDataNode(0);
        cluster.waitActive();
        dn = cluster.getDataNodes().get(0);
        dataSet = (FsDatasetImpl) dn.getFSDataset();
        testEqualityOfReplicaMap(oldReplicaMap, dataSet.volumeMap, bpList);
    } finally {
        cluster.shutdown();
    }
}
Also used : NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) ArrayList(java.util.ArrayList) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) FsVolumeSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi) Test(org.junit.Test)

Example 9 with FsVolumeSpi

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi in project hadoop by apache.

the class DataNode method handleVolumeFailures.

private void handleVolumeFailures(Set<FsVolumeSpi> unhealthyVolumes) {
    if (unhealthyVolumes.isEmpty()) {
        LOG.debug("handleVolumeFailures done with empty " + "unhealthyVolumes");
        return;
    }
    data.handleVolumeFailures(unhealthyVolumes);
    Set<StorageLocation> unhealthyLocations = new HashSet<>(unhealthyVolumes.size());
    StringBuilder sb = new StringBuilder("DataNode failed volumes:");
    for (FsVolumeSpi vol : unhealthyVolumes) {
        unhealthyLocations.add(vol.getStorageLocation());
        sb.append(vol.getStorageLocation()).append(";");
    }
    try {
        // Remove all unhealthy volumes from DataNode.
        removeVolumes(unhealthyLocations, false);
    } catch (IOException e) {
        LOG.warn("Error occurred when removing unhealthy storage dirs: " + e.getMessage(), e);
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug(sb.toString());
    }
    // send blockreport regarding volume failure
    handleDiskError(sb.toString());
}
Also used : FsVolumeSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi) IOException(java.io.IOException) HashSet(java.util.HashSet)

Example 10 with FsVolumeSpi

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi in project hadoop by apache.

the class DiskBalancer method getStorageIDToVolumeBasePathMap.

/**
   * Returns volume UUID to volume base path map.
   *
   * @return Map
   * @throws DiskBalancerException
   */
private Map<String, String> getStorageIDToVolumeBasePathMap() throws DiskBalancerException {
    Map<String, String> storageIDToVolBasePathMap = new HashMap<>();
    FsDatasetSpi.FsVolumeReferences references;
    try {
        try (AutoCloseableLock lock = this.dataset.acquireDatasetLock()) {
            references = this.dataset.getFsVolumeReferences();
            for (int ndx = 0; ndx < references.size(); ndx++) {
                FsVolumeSpi vol = references.get(ndx);
                storageIDToVolBasePathMap.put(vol.getStorageID(), vol.getBaseURI().getPath());
            }
            references.close();
        }
    } catch (IOException ex) {
        LOG.error("Disk Balancer - Internal Error.", ex);
        throw new DiskBalancerException("Internal error", ex, DiskBalancerException.Result.INTERNAL_ERROR);
    }
    return storageIDToVolBasePathMap;
}
Also used : FsVolumeReferences(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences) HashMap(java.util.HashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) FsVolumeSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi) IOException(java.io.IOException) DiskBalancerException(org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException)

Aggregations

FsVolumeSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi)33 FsDatasetSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi)15 Test (org.junit.Test)10 IOException (java.io.IOException)8 File (java.io.File)7 HashSet (java.util.HashSet)7 Path (org.apache.hadoop.fs.Path)6 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)6 FsVolumeReferences (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences)6 Configuration (org.apache.hadoop.conf.Configuration)5 AutoCloseableLock (org.apache.hadoop.util.AutoCloseableLock)5 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)4 ArrayList (java.util.ArrayList)3 FileSystem (org.apache.hadoop.fs.FileSystem)3 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)3 StorageLocation (org.apache.hadoop.hdfs.server.datanode.StorageLocation)3 FsVolumeReference (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference)3 FsVolumeImpl (org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl)3 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)3 HashMap (java.util.HashMap)2