Search in sources :

Example 6 with FsVolumeReferences

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences in project hadoop by apache.

the class FsDatasetImplTestUtils method verifyBlockPoolExists.

@Override
public void verifyBlockPoolExists(String bpid) throws IOException {
    FsVolumeImpl volume;
    try (FsVolumeReferences references = dataset.getFsVolumeReferences()) {
        volume = (FsVolumeImpl) references.get(0);
    }
    File bpDir = new File(volume.getCurrentDir(), bpid);
    File bpCurrentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT);
    File finalizedDir = new File(bpCurrentDir, DataStorage.STORAGE_DIR_FINALIZED);
    File rbwDir = new File(bpCurrentDir, DataStorage.STORAGE_DIR_RBW);
    File versionFile = new File(bpCurrentDir, "VERSION");
    if (!finalizedDir.isDirectory()) {
        throw new IOException(finalizedDir.getPath() + " is not a directory.");
    }
    if (!rbwDir.isDirectory()) {
        throw new IOException(finalizedDir.getPath() + " is not a directory.");
    }
    if (!versionFile.exists()) {
        throw new IOException("Version file: " + versionFile.getPath() + " does not exist.");
    }
}
Also used : FsVolumeReferences(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences) IOException(java.io.IOException) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File)

Example 7 with FsVolumeReferences

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences in project hadoop by apache.

the class FsDatasetImplTestUtils method getStoredReplicas.

@Override
public Iterator<Replica> getStoredReplicas(String bpid) throws IOException {
    // Reload replicas from the disk.
    ReplicaMap replicaMap = new ReplicaMap(dataset.datasetLock);
    try (FsVolumeReferences refs = dataset.getFsVolumeReferences()) {
        for (FsVolumeSpi vol : refs) {
            FsVolumeImpl volume = (FsVolumeImpl) vol;
            volume.getVolumeMap(bpid, replicaMap, dataset.ramDiskReplicaTracker);
        }
    }
    // Cast ReplicaInfo to Replica, because ReplicaInfo assumes a file-based
    // FsVolumeSpi implementation.
    List<Replica> ret = new ArrayList<>();
    if (replicaMap.replicas(bpid) != null) {
        ret.addAll(replicaMap.replicas(bpid));
    }
    return ret.iterator();
}
Also used : FsVolumeSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi) ArrayList(java.util.ArrayList) FsVolumeReferences(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences) FinalizedReplica(org.apache.hadoop.hdfs.server.datanode.FinalizedReplica) Replica(org.apache.hadoop.hdfs.server.datanode.Replica)

Example 8 with FsVolumeReferences

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences in project hadoop by apache.

the class TestNameNodePrunesMissingStorages method testRenamingStorageIds.

@Test(timeout = 300000)
public void testRenamingStorageIds() throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 0);
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).storagesPerDatanode(1).build();
    GenericTestUtils.setLogLevel(BlockManager.LOG, Level.ALL);
    try {
        cluster.waitActive();
        final Path TEST_PATH = new Path("/foo1");
        DistributedFileSystem fs = cluster.getFileSystem();
        // Create a file and leave it open
        DFSTestUtil.createFile(fs, TEST_PATH, 1, (short) 1, 0xdeadbeef);
        // Find the volume within the datanode which holds that first storage.
        DataNode dn = cluster.getDataNodes().get(0);
        FsVolumeReferences volumeRefs = dn.getFSDataset().getFsVolumeReferences();
        final String newStorageId = DatanodeStorage.generateUuid();
        try {
            File currentDir = new File(new File(volumeRefs.get(0).getStorageLocation().getUri()), "current");
            File versionFile = new File(currentDir, "VERSION");
            rewriteVersionFile(versionFile, newStorageId);
        } finally {
            volumeRefs.close();
        }
        final ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, TEST_PATH);
        cluster.restartDataNodes();
        GenericTestUtils.waitFor(new Supplier<Boolean>() {

            @Override
            public Boolean get() {
                cluster.getNamesystem().writeLock();
                try {
                    Iterator<DatanodeStorageInfo> storageInfoIter = cluster.getNamesystem().getBlockManager().getStorages(block.getLocalBlock()).iterator();
                    if (!storageInfoIter.hasNext()) {
                        LOG.info("Expected to find a storage for " + block.getBlockName() + ", but nothing was found.  " + "Continuing to wait.");
                        return false;
                    }
                    DatanodeStorageInfo info = storageInfoIter.next();
                    if (!newStorageId.equals(info.getStorageID())) {
                        LOG.info("Expected " + block.getBlockName() + " to " + "be in storage id " + newStorageId + ", but it " + "was in " + info.getStorageID() + ".  Continuing " + "to wait.");
                        return false;
                    }
                    LOG.info("Successfully found " + block.getBlockName() + " in " + "be in storage id " + newStorageId);
                } finally {
                    cluster.getNamesystem().writeUnlock();
                }
                return true;
            }
        }, 20, 100000);
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) FsVolumeReferences(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) Iterator(java.util.Iterator) File(java.io.File) Test(org.junit.Test)

Example 9 with FsVolumeReferences

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences in project hadoop by apache.

the class TestNameNodePrunesMissingStorages method testRemovingStorageDoesNotProduceZombies.

/**
   * Regression test for HDFS-7960.<p/>
   *
   * Shutting down a datanode, removing a storage directory, and restarting
   * the DataNode should not produce zombie storages.
   */
@Test(timeout = 300000)
public void testRemovingStorageDoesNotProduceZombies() throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
    final int NUM_STORAGES_PER_DN = 2;
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).storagesPerDatanode(NUM_STORAGES_PER_DN).build();
    try {
        cluster.waitActive();
        for (DataNode dn : cluster.getDataNodes()) {
            assertEquals(NUM_STORAGES_PER_DN, cluster.getNamesystem().getBlockManager().getDatanodeManager().getDatanode(dn.getDatanodeId()).getStorageInfos().length);
        }
        // Create a file which will end up on all 3 datanodes.
        final Path TEST_PATH = new Path("/foo1");
        DistributedFileSystem fs = cluster.getFileSystem();
        DFSTestUtil.createFile(fs, TEST_PATH, 1024, (short) 3, 0xcafecafe);
        for (DataNode dn : cluster.getDataNodes()) {
            DataNodeTestUtils.triggerBlockReport(dn);
        }
        ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, new Path("/foo1"));
        cluster.getNamesystem().writeLock();
        final String storageIdToRemove;
        String datanodeUuid;
        // Find the first storage which this block is in.
        try {
            BlockInfo storedBlock = cluster.getNamesystem().getBlockManager().getStoredBlock(block.getLocalBlock());
            Iterator<DatanodeStorageInfo> storageInfoIter = cluster.getNamesystem().getBlockManager().blocksMap.getStorages(storedBlock).iterator();
            assertTrue(storageInfoIter.hasNext());
            DatanodeStorageInfo info = storageInfoIter.next();
            storageIdToRemove = info.getStorageID();
            datanodeUuid = info.getDatanodeDescriptor().getDatanodeUuid();
        } finally {
            cluster.getNamesystem().writeUnlock();
        }
        // Find the DataNode which holds that first storage.
        final DataNode datanodeToRemoveStorageFrom;
        int datanodeToRemoveStorageFromIdx = 0;
        while (true) {
            if (datanodeToRemoveStorageFromIdx >= cluster.getDataNodes().size()) {
                Assert.fail("failed to find datanode with uuid " + datanodeUuid);
                datanodeToRemoveStorageFrom = null;
                break;
            }
            DataNode dn = cluster.getDataNodes().get(datanodeToRemoveStorageFromIdx);
            if (dn.getDatanodeUuid().equals(datanodeUuid)) {
                datanodeToRemoveStorageFrom = dn;
                break;
            }
            datanodeToRemoveStorageFromIdx++;
        }
        // Find the volume within the datanode which holds that first storage.
        StorageLocation volumeLocationToRemove = null;
        try (FsVolumeReferences volumes = datanodeToRemoveStorageFrom.getFSDataset().getFsVolumeReferences()) {
            assertEquals(NUM_STORAGES_PER_DN, volumes.size());
            for (FsVolumeSpi volume : volumes) {
                if (volume.getStorageID().equals(storageIdToRemove)) {
                    volumeLocationToRemove = volume.getStorageLocation();
                }
            }
        }
        ;
        // Shut down the datanode and remove the volume.
        // Replace the volume directory with a regular file, which will
        // cause a volume failure.  (If we merely removed the directory,
        // it would be re-initialized with a new storage ID.)
        assertNotNull(volumeLocationToRemove);
        datanodeToRemoveStorageFrom.shutdown();
        FileUtil.fullyDelete(new File(volumeLocationToRemove.getUri()));
        FileOutputStream fos = new FileOutputStream(new File(volumeLocationToRemove.getUri()));
        try {
            fos.write(1);
        } finally {
            fos.close();
        }
        cluster.restartDataNode(datanodeToRemoveStorageFromIdx);
        // Wait for the NameNode to remove the storage.
        LOG.info("waiting for the datanode to remove " + storageIdToRemove);
        GenericTestUtils.waitFor(new Supplier<Boolean>() {

            @Override
            public Boolean get() {
                final DatanodeDescriptor dnDescriptor = cluster.getNamesystem().getBlockManager().getDatanodeManager().getDatanode(datanodeToRemoveStorageFrom.getDatanodeUuid());
                assertNotNull(dnDescriptor);
                DatanodeStorageInfo[] infos = dnDescriptor.getStorageInfos();
                for (DatanodeStorageInfo info : infos) {
                    if (info.getStorageID().equals(storageIdToRemove)) {
                        LOG.info("Still found storage " + storageIdToRemove + " on " + info + ".");
                        return false;
                    }
                }
                assertEquals(NUM_STORAGES_PER_DN - 1, infos.length);
                return true;
            }
        }, 1000, 30000);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) FsVolumeReferences(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FileOutputStream(java.io.FileOutputStream) FsVolumeSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi) StorageLocation(org.apache.hadoop.hdfs.server.datanode.StorageLocation) File(java.io.File) Test(org.junit.Test)

Example 10 with FsVolumeReferences

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences in project hadoop by apache.

the class TestDirectoryScanner method testExceptionHandlingWhileDirectoryScan.

/**
   * Test the behavior of exception handling during directory scan operation.
   * Directory scanner shouldn't abort the scan on every directory just because
   * one had an error.
   */
@Test(timeout = 60000)
public void testExceptionHandlingWhileDirectoryScan() throws Exception {
    cluster = new MiniDFSCluster.Builder(CONF).build();
    try {
        cluster.waitActive();
        bpid = cluster.getNamesystem().getBlockPoolId();
        fds = DataNodeTestUtils.getFSDataset(cluster.getDataNodes().get(0));
        client = cluster.getFileSystem().getClient();
        CONF.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY, 1);
        DataNode dataNode = cluster.getDataNodes().get(0);
        // Add files with 2 blocks
        createFile(GenericTestUtils.getMethodName(), BLOCK_LENGTH * 2, false);
        // Inject error on #getFinalizedDir() so that ReportCompiler#call() will
        // hit exception while preparing the block info report list.
        List<FsVolumeSpi> volumes = new ArrayList<>();
        Iterator<FsVolumeSpi> iterator = fds.getFsVolumeReferences().iterator();
        while (iterator.hasNext()) {
            FsVolumeImpl volume = (FsVolumeImpl) iterator.next();
            FsVolumeImpl spy = Mockito.spy(volume);
            Mockito.doThrow(new IOException("Error while getFinalizedDir")).when(spy).getFinalizedDir(volume.getBlockPoolList()[0]);
            volumes.add(spy);
        }
        FsVolumeReferences volReferences = new FsVolumeReferences(volumes);
        FsDatasetSpi<? extends FsVolumeSpi> spyFds = Mockito.spy(fds);
        Mockito.doReturn(volReferences).when(spyFds).getFsVolumeReferences();
        scanner = new DirectoryScanner(dataNode, spyFds, CONF);
        scanner.setRetainDiffs(true);
        scanner.reconcile();
    } finally {
        if (scanner != null) {
            scanner.shutdown();
            scanner = null;
        }
        cluster.shutdown();
    }
}
Also used : FsVolumeImpl(org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl) ArrayList(java.util.ArrayList) FsVolumeSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi) FsVolumeReferences(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences) IOException(java.io.IOException) Test(org.junit.Test)

Aggregations

FsVolumeReferences (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences)10 File (java.io.File)5 FsVolumeSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi)4 Test (org.junit.Test)4 IOException (java.io.IOException)3 RandomAccessFile (java.io.RandomAccessFile)3 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)3 FinalizedReplica (org.apache.hadoop.hdfs.server.datanode.FinalizedReplica)3 ArrayList (java.util.ArrayList)2 Configuration (org.apache.hadoop.conf.Configuration)2 Path (org.apache.hadoop.fs.Path)2 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)2 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)2 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)2 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)2 StorageLocation (org.apache.hadoop.hdfs.server.datanode.StorageLocation)2 FileOutputStream (java.io.FileOutputStream)1 HashSet (java.util.HashSet)1 Iterator (java.util.Iterator)1 FileExistsException (org.apache.commons.io.FileExistsException)1