Search in sources :

Example 11 with StorageLocation

use of org.apache.hadoop.hdfs.server.datanode.StorageLocation in project hadoop by apache.

the class TestNameNodePrunesMissingStorages method testRemovingStorageDoesNotProduceZombies.

/**
   * Regression test for HDFS-7960.<p/>
   *
   * Shutting down a datanode, removing a storage directory, and restarting
   * the DataNode should not produce zombie storages.
   */
@Test(timeout = 300000)
public void testRemovingStorageDoesNotProduceZombies() throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
    final int NUM_STORAGES_PER_DN = 2;
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).storagesPerDatanode(NUM_STORAGES_PER_DN).build();
    try {
        cluster.waitActive();
        for (DataNode dn : cluster.getDataNodes()) {
            assertEquals(NUM_STORAGES_PER_DN, cluster.getNamesystem().getBlockManager().getDatanodeManager().getDatanode(dn.getDatanodeId()).getStorageInfos().length);
        }
        // Create a file which will end up on all 3 datanodes.
        final Path TEST_PATH = new Path("/foo1");
        DistributedFileSystem fs = cluster.getFileSystem();
        DFSTestUtil.createFile(fs, TEST_PATH, 1024, (short) 3, 0xcafecafe);
        for (DataNode dn : cluster.getDataNodes()) {
            DataNodeTestUtils.triggerBlockReport(dn);
        }
        ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, new Path("/foo1"));
        cluster.getNamesystem().writeLock();
        final String storageIdToRemove;
        String datanodeUuid;
        // Find the first storage which this block is in.
        try {
            BlockInfo storedBlock = cluster.getNamesystem().getBlockManager().getStoredBlock(block.getLocalBlock());
            Iterator<DatanodeStorageInfo> storageInfoIter = cluster.getNamesystem().getBlockManager().blocksMap.getStorages(storedBlock).iterator();
            assertTrue(storageInfoIter.hasNext());
            DatanodeStorageInfo info = storageInfoIter.next();
            storageIdToRemove = info.getStorageID();
            datanodeUuid = info.getDatanodeDescriptor().getDatanodeUuid();
        } finally {
            cluster.getNamesystem().writeUnlock();
        }
        // Find the DataNode which holds that first storage.
        final DataNode datanodeToRemoveStorageFrom;
        int datanodeToRemoveStorageFromIdx = 0;
        while (true) {
            if (datanodeToRemoveStorageFromIdx >= cluster.getDataNodes().size()) {
                Assert.fail("failed to find datanode with uuid " + datanodeUuid);
                datanodeToRemoveStorageFrom = null;
                break;
            }
            DataNode dn = cluster.getDataNodes().get(datanodeToRemoveStorageFromIdx);
            if (dn.getDatanodeUuid().equals(datanodeUuid)) {
                datanodeToRemoveStorageFrom = dn;
                break;
            }
            datanodeToRemoveStorageFromIdx++;
        }
        // Find the volume within the datanode which holds that first storage.
        StorageLocation volumeLocationToRemove = null;
        try (FsVolumeReferences volumes = datanodeToRemoveStorageFrom.getFSDataset().getFsVolumeReferences()) {
            assertEquals(NUM_STORAGES_PER_DN, volumes.size());
            for (FsVolumeSpi volume : volumes) {
                if (volume.getStorageID().equals(storageIdToRemove)) {
                    volumeLocationToRemove = volume.getStorageLocation();
                }
            }
        }
        ;
        // Shut down the datanode and remove the volume.
        // Replace the volume directory with a regular file, which will
        // cause a volume failure.  (If we merely removed the directory,
        // it would be re-initialized with a new storage ID.)
        assertNotNull(volumeLocationToRemove);
        datanodeToRemoveStorageFrom.shutdown();
        FileUtil.fullyDelete(new File(volumeLocationToRemove.getUri()));
        FileOutputStream fos = new FileOutputStream(new File(volumeLocationToRemove.getUri()));
        try {
            fos.write(1);
        } finally {
            fos.close();
        }
        cluster.restartDataNode(datanodeToRemoveStorageFromIdx);
        // Wait for the NameNode to remove the storage.
        LOG.info("waiting for the datanode to remove " + storageIdToRemove);
        GenericTestUtils.waitFor(new Supplier<Boolean>() {

            @Override
            public Boolean get() {
                final DatanodeDescriptor dnDescriptor = cluster.getNamesystem().getBlockManager().getDatanodeManager().getDatanode(datanodeToRemoveStorageFrom.getDatanodeUuid());
                assertNotNull(dnDescriptor);
                DatanodeStorageInfo[] infos = dnDescriptor.getStorageInfos();
                for (DatanodeStorageInfo info : infos) {
                    if (info.getStorageID().equals(storageIdToRemove)) {
                        LOG.info("Still found storage " + storageIdToRemove + " on " + info + ".");
                        return false;
                    }
                }
                assertEquals(NUM_STORAGES_PER_DN - 1, infos.length);
                return true;
            }
        }, 1000, 30000);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) FsVolumeReferences(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FileOutputStream(java.io.FileOutputStream) FsVolumeSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi) StorageLocation(org.apache.hadoop.hdfs.server.datanode.StorageLocation) File(java.io.File) Test(org.junit.Test)

Example 12 with StorageLocation

use of org.apache.hadoop.hdfs.server.datanode.StorageLocation in project hadoop by apache.

the class TestDatasetVolumeChecker method makeVolumes.

static List<FsVolumeSpi> makeVolumes(int numVolumes, VolumeCheckResult health) throws Exception {
    final List<FsVolumeSpi> volumes = new ArrayList<>(numVolumes);
    for (int i = 0; i < numVolumes; ++i) {
        final FsVolumeSpi volume = mock(FsVolumeSpi.class);
        final FsVolumeReference reference = mock(FsVolumeReference.class);
        final StorageLocation location = mock(StorageLocation.class);
        when(reference.getVolume()).thenReturn(volume);
        when(volume.obtainReference()).thenReturn(reference);
        when(volume.getStorageLocation()).thenReturn(location);
        if (health != null) {
            when(volume.check(anyObject())).thenReturn(health);
        } else {
            final DiskErrorException de = new DiskErrorException("Fake Exception");
            when(volume.check(anyObject())).thenThrow(de);
        }
        volumes.add(volume);
    }
    return volumes;
}
Also used : DiskErrorException(org.apache.hadoop.util.DiskChecker.DiskErrorException) ArrayList(java.util.ArrayList) StorageLocation(org.apache.hadoop.hdfs.server.datanode.StorageLocation)

Example 13 with StorageLocation

use of org.apache.hadoop.hdfs.server.datanode.StorageLocation in project hadoop by apache.

the class TestDatasetVolumeCheckerFailures method makeClosedVolume.

/**
   * Create a mock FsVolumeSpi which is closed and hence cannot
   * be referenced.
   *
   * @return volume
   * @throws Exception
   */
private static FsVolumeSpi makeClosedVolume() throws Exception {
    final FsVolumeSpi volume = mock(FsVolumeSpi.class);
    final StorageLocation location = mock(StorageLocation.class);
    when(volume.obtainReference()).thenThrow(new ClosedChannelException());
    when(volume.getStorageLocation()).thenReturn(location);
    return volume;
}
Also used : ClosedChannelException(java.nio.channels.ClosedChannelException) StorageLocation(org.apache.hadoop.hdfs.server.datanode.StorageLocation)

Example 14 with StorageLocation

use of org.apache.hadoop.hdfs.server.datanode.StorageLocation in project hadoop by apache.

the class TestFsDatasetImpl method testRemoveNewlyAddedVolume.

@Test(timeout = 5000)
public void testRemoveNewlyAddedVolume() throws IOException {
    final int numExistingVolumes = getNumVolumes();
    List<NamespaceInfo> nsInfos = new ArrayList<>();
    for (String bpid : BLOCK_POOL_IDS) {
        nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1));
    }
    String newVolumePath = BASE_DIR + "/newVolumeToRemoveLater";
    StorageLocation loc = StorageLocation.parse(newVolumePath);
    Storage.StorageDirectory sd = createStorageDirectory(new File(newVolumePath));
    DataStorage.VolumeBuilder builder = new DataStorage.VolumeBuilder(storage, sd);
    when(storage.prepareVolume(eq(datanode), eq(loc), anyListOf(NamespaceInfo.class))).thenReturn(builder);
    dataset.addVolume(loc, nsInfos);
    assertEquals(numExistingVolumes + 1, getNumVolumes());
    when(storage.getNumStorageDirs()).thenReturn(numExistingVolumes + 1);
    when(storage.getStorageDir(numExistingVolumes)).thenReturn(sd);
    Set<StorageLocation> volumesToRemove = new HashSet<>();
    volumesToRemove.add(loc);
    dataset.removeVolumes(volumesToRemove, true);
    assertEquals(numExistingVolumes, getNumVolumes());
}
Also used : DataStorage(org.apache.hadoop.hdfs.server.datanode.DataStorage) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) ArrayList(java.util.ArrayList) Matchers.anyString(org.mockito.Matchers.anyString) DataStorage(org.apache.hadoop.hdfs.server.datanode.DataStorage) Storage(org.apache.hadoop.hdfs.server.common.Storage) NamespaceInfo(org.apache.hadoop.hdfs.server.protocol.NamespaceInfo) StorageLocation(org.apache.hadoop.hdfs.server.datanode.StorageLocation) File(java.io.File) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 15 with StorageLocation

use of org.apache.hadoop.hdfs.server.datanode.StorageLocation in project hadoop by apache.

the class TestFsDatasetImpl method testAddVolumes.

@Test
public void testAddVolumes() throws IOException {
    final int numNewVolumes = 3;
    final int numExistingVolumes = getNumVolumes();
    final int totalVolumes = numNewVolumes + numExistingVolumes;
    Set<String> expectedVolumes = new HashSet<String>();
    List<NamespaceInfo> nsInfos = Lists.newArrayList();
    for (String bpid : BLOCK_POOL_IDS) {
        nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1));
    }
    for (int i = 0; i < numNewVolumes; i++) {
        String path = BASE_DIR + "/newData" + i;
        String pathUri = new Path(path).toUri().toString();
        expectedVolumes.add(new File(pathUri).getAbsolutePath());
        StorageLocation loc = StorageLocation.parse(pathUri);
        Storage.StorageDirectory sd = createStorageDirectory(new File(path));
        DataStorage.VolumeBuilder builder = new DataStorage.VolumeBuilder(storage, sd);
        when(storage.prepareVolume(eq(datanode), eq(loc), anyListOf(NamespaceInfo.class))).thenReturn(builder);
        dataset.addVolume(loc, nsInfos);
        LOG.info("expectedVolumes " + i + " is " + new File(pathUri).getAbsolutePath());
    }
    assertEquals(totalVolumes, getNumVolumes());
    assertEquals(totalVolumes, dataset.storageMap.size());
    Set<String> actualVolumes = new HashSet<String>();
    try (FsDatasetSpi.FsVolumeReferences volumes = dataset.getFsVolumeReferences()) {
        for (int i = 0; i < numNewVolumes; i++) {
            String volumeName = volumes.get(numExistingVolumes + i).toString();
            actualVolumes.add(volumeName);
            LOG.info("actualVolume " + i + " is " + volumeName);
        }
    }
    assertEquals(actualVolumes.size(), expectedVolumes.size());
    assertTrue(actualVolumes.containsAll(expectedVolumes));
}
Also used : Path(org.apache.hadoop.fs.Path) DataStorage(org.apache.hadoop.hdfs.server.datanode.DataStorage) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) Matchers.anyString(org.mockito.Matchers.anyString) FsVolumeReferences(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences) DataStorage(org.apache.hadoop.hdfs.server.datanode.DataStorage) Storage(org.apache.hadoop.hdfs.server.common.Storage) NamespaceInfo(org.apache.hadoop.hdfs.server.protocol.NamespaceInfo) StorageLocation(org.apache.hadoop.hdfs.server.datanode.StorageLocation) File(java.io.File) HashSet(java.util.HashSet) Test(org.junit.Test)

Aggregations

StorageLocation (org.apache.hadoop.hdfs.server.datanode.StorageLocation)24 Test (org.junit.Test)11 File (java.io.File)7 ArrayList (java.util.ArrayList)7 Configuration (org.apache.hadoop.conf.Configuration)7 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)7 Storage (org.apache.hadoop.hdfs.server.common.Storage)6 DataStorage (org.apache.hadoop.hdfs.server.datanode.DataStorage)6 FakeTimer (org.apache.hadoop.util.FakeTimer)6 HashSet (java.util.HashSet)5 Matchers.anyString (org.mockito.Matchers.anyString)5 IOException (java.io.IOException)4 StorageDirectory (org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory)4 FsVolumeReferences (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences)4 NamespaceInfo (org.apache.hadoop.hdfs.server.protocol.NamespaceInfo)4 Path (org.apache.hadoop.fs.Path)3 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)3 FsVolumeSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi)3 MultipleIOException (org.apache.hadoop.io.MultipleIOException)3 FileOutputStream (java.io.FileOutputStream)2