Search in sources :

Example 11 with Storage

use of org.apache.hadoop.hdfs.server.common.Storage in project hadoop by apache.

the class TestDataNodeHotSwapVolumes method testAddBackRemovedVolume.

@Test(timeout = 60000)
public void testAddBackRemovedVolume() throws IOException, TimeoutException, InterruptedException, ReconfigurationException {
    startDFSCluster(1, 2);
    // Create some data on every volume.
    createFile(new Path("/test"), 32);
    DataNode dn = cluster.getDataNodes().get(0);
    Configuration conf = dn.getConf();
    String oldDataDir = conf.get(DFS_DATANODE_DATA_DIR_KEY);
    String keepDataDir = oldDataDir.split(",")[0];
    String removeDataDir = oldDataDir.split(",")[1];
    assertThat("DN did not update its own config", dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, keepDataDir), is(dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY)));
    for (int i = 0; i < cluster.getNumNameNodes(); i++) {
        String bpid = cluster.getNamesystem(i).getBlockPoolId();
        BlockPoolSliceStorage bpsStorage = dn.getStorage().getBPStorage(bpid);
        // Make sure that there is no block pool level storage under removeDataDir.
        for (int j = 0; j < bpsStorage.getNumStorageDirs(); j++) {
            Storage.StorageDirectory sd = bpsStorage.getStorageDir(j);
            assertFalse(sd.getRoot().getAbsolutePath().startsWith(new File(removeDataDir).getAbsolutePath()));
        }
        assertEquals(dn.getStorage().getBPStorage(bpid).getNumStorageDirs(), 1);
    }
    // Bring the removed directory back. It only successes if all metadata about
    // this directory were removed from the previous step.
    assertThat("DN did not update its own config", dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, oldDataDir), is(dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY)));
}
Also used : Path(org.apache.hadoop.fs.Path) Storage(org.apache.hadoop.hdfs.server.common.Storage) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) Configuration(org.apache.hadoop.conf.Configuration) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) Matchers.anyString(org.mockito.Matchers.anyString) File(java.io.File) Test(org.junit.Test)

Example 12 with Storage

use of org.apache.hadoop.hdfs.server.common.Storage in project hadoop by apache.

the class TestFsDatasetImpl method testRemoveNewlyAddedVolume.

@Test(timeout = 5000)
public void testRemoveNewlyAddedVolume() throws IOException {
    final int numExistingVolumes = getNumVolumes();
    List<NamespaceInfo> nsInfos = new ArrayList<>();
    for (String bpid : BLOCK_POOL_IDS) {
        nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1));
    }
    String newVolumePath = BASE_DIR + "/newVolumeToRemoveLater";
    StorageLocation loc = StorageLocation.parse(newVolumePath);
    Storage.StorageDirectory sd = createStorageDirectory(new File(newVolumePath));
    DataStorage.VolumeBuilder builder = new DataStorage.VolumeBuilder(storage, sd);
    when(storage.prepareVolume(eq(datanode), eq(loc), anyListOf(NamespaceInfo.class))).thenReturn(builder);
    dataset.addVolume(loc, nsInfos);
    assertEquals(numExistingVolumes + 1, getNumVolumes());
    when(storage.getNumStorageDirs()).thenReturn(numExistingVolumes + 1);
    when(storage.getStorageDir(numExistingVolumes)).thenReturn(sd);
    Set<StorageLocation> volumesToRemove = new HashSet<>();
    volumesToRemove.add(loc);
    dataset.removeVolumes(volumesToRemove, true);
    assertEquals(numExistingVolumes, getNumVolumes());
}
Also used : DataStorage(org.apache.hadoop.hdfs.server.datanode.DataStorage) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) ArrayList(java.util.ArrayList) Matchers.anyString(org.mockito.Matchers.anyString) DataStorage(org.apache.hadoop.hdfs.server.datanode.DataStorage) Storage(org.apache.hadoop.hdfs.server.common.Storage) NamespaceInfo(org.apache.hadoop.hdfs.server.protocol.NamespaceInfo) StorageLocation(org.apache.hadoop.hdfs.server.datanode.StorageLocation) File(java.io.File) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 13 with Storage

use of org.apache.hadoop.hdfs.server.common.Storage in project hadoop by apache.

the class TestFsDatasetImpl method testAddVolumes.

@Test
public void testAddVolumes() throws IOException {
    final int numNewVolumes = 3;
    final int numExistingVolumes = getNumVolumes();
    final int totalVolumes = numNewVolumes + numExistingVolumes;
    Set<String> expectedVolumes = new HashSet<String>();
    List<NamespaceInfo> nsInfos = Lists.newArrayList();
    for (String bpid : BLOCK_POOL_IDS) {
        nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1));
    }
    for (int i = 0; i < numNewVolumes; i++) {
        String path = BASE_DIR + "/newData" + i;
        String pathUri = new Path(path).toUri().toString();
        expectedVolumes.add(new File(pathUri).getAbsolutePath());
        StorageLocation loc = StorageLocation.parse(pathUri);
        Storage.StorageDirectory sd = createStorageDirectory(new File(path));
        DataStorage.VolumeBuilder builder = new DataStorage.VolumeBuilder(storage, sd);
        when(storage.prepareVolume(eq(datanode), eq(loc), anyListOf(NamespaceInfo.class))).thenReturn(builder);
        dataset.addVolume(loc, nsInfos);
        LOG.info("expectedVolumes " + i + " is " + new File(pathUri).getAbsolutePath());
    }
    assertEquals(totalVolumes, getNumVolumes());
    assertEquals(totalVolumes, dataset.storageMap.size());
    Set<String> actualVolumes = new HashSet<String>();
    try (FsDatasetSpi.FsVolumeReferences volumes = dataset.getFsVolumeReferences()) {
        for (int i = 0; i < numNewVolumes; i++) {
            String volumeName = volumes.get(numExistingVolumes + i).toString();
            actualVolumes.add(volumeName);
            LOG.info("actualVolume " + i + " is " + volumeName);
        }
    }
    assertEquals(actualVolumes.size(), expectedVolumes.size());
    assertTrue(actualVolumes.containsAll(expectedVolumes));
}
Also used : Path(org.apache.hadoop.fs.Path) DataStorage(org.apache.hadoop.hdfs.server.datanode.DataStorage) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) Matchers.anyString(org.mockito.Matchers.anyString) FsVolumeReferences(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences) DataStorage(org.apache.hadoop.hdfs.server.datanode.DataStorage) Storage(org.apache.hadoop.hdfs.server.common.Storage) NamespaceInfo(org.apache.hadoop.hdfs.server.protocol.NamespaceInfo) StorageLocation(org.apache.hadoop.hdfs.server.datanode.StorageLocation) File(java.io.File) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 14 with Storage

use of org.apache.hadoop.hdfs.server.common.Storage in project hadoop by apache.

the class TestFsDatasetImpl method testAddVolumeFailureReleasesInUseLock.

@Test
public void testAddVolumeFailureReleasesInUseLock() throws IOException {
    FsDatasetImpl spyDataset = spy(dataset);
    FsVolumeImpl mockVolume = mock(FsVolumeImpl.class);
    File badDir = new File(BASE_DIR, "bad");
    badDir.mkdirs();
    doReturn(mockVolume).when(spyDataset).createFsVolume(anyString(), any(StorageDirectory.class), any(StorageLocation.class));
    doThrow(new IOException("Failed to getVolumeMap()")).when(mockVolume).getVolumeMap(anyString(), any(ReplicaMap.class), any(RamDiskReplicaLruTracker.class));
    Storage.StorageDirectory sd = createStorageDirectory(badDir);
    sd.lock();
    DataStorage.VolumeBuilder builder = new DataStorage.VolumeBuilder(storage, sd);
    when(storage.prepareVolume(eq(datanode), eq(StorageLocation.parse(badDir.toURI().toString())), Matchers.<List<NamespaceInfo>>any())).thenReturn(builder);
    StorageLocation location = StorageLocation.parse(badDir.toString());
    List<NamespaceInfo> nsInfos = Lists.newArrayList();
    for (String bpid : BLOCK_POOL_IDS) {
        nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1));
    }
    try {
        spyDataset.addVolume(location, nsInfos);
        fail("Expect to throw MultipleIOException");
    } catch (MultipleIOException e) {
    }
    FsDatasetTestUtil.assertFileLockReleased(badDir.toString());
}
Also used : DataStorage(org.apache.hadoop.hdfs.server.datanode.DataStorage) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) Matchers.anyString(org.mockito.Matchers.anyString) MultipleIOException(org.apache.hadoop.io.MultipleIOException) DataStorage(org.apache.hadoop.hdfs.server.datanode.DataStorage) Storage(org.apache.hadoop.hdfs.server.common.Storage) StorageLocation(org.apache.hadoop.hdfs.server.datanode.StorageLocation) NamespaceInfo(org.apache.hadoop.hdfs.server.protocol.NamespaceInfo) File(java.io.File) Test(org.junit.Test)

Aggregations

Storage (org.apache.hadoop.hdfs.server.common.Storage)14 File (java.io.File)11 StorageDirectory (org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory)9 Test (org.junit.Test)8 DataStorage (org.apache.hadoop.hdfs.server.datanode.DataStorage)7 Matchers.anyString (org.mockito.Matchers.anyString)7 Path (org.apache.hadoop.fs.Path)5 StorageLocation (org.apache.hadoop.hdfs.server.datanode.StorageLocation)5 NamespaceInfo (org.apache.hadoop.hdfs.server.protocol.NamespaceInfo)5 IOException (java.io.IOException)4 FsDatasetSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi)4 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)4 Configuration (org.apache.hadoop.conf.Configuration)3 ArrayList (java.util.ArrayList)2 HashSet (java.util.HashSet)2 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)2 FsVolumeReferences (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences)2 FsVolumeSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi)2 MultipleIOException (org.apache.hadoop.io.MultipleIOException)2 CoreMatchers.containsString (org.hamcrest.CoreMatchers.containsString)2