Search in sources :

Example 16 with StorageLocation

use of org.apache.hadoop.hdfs.server.datanode.StorageLocation in project hadoop by apache.

the class TestFsDatasetImpl method testRemoveNewlyAddedVolume.

@Test(timeout = 5000)
public void testRemoveNewlyAddedVolume() throws IOException {
    final int numExistingVolumes = getNumVolumes();
    List<NamespaceInfo> nsInfos = new ArrayList<>();
    for (String bpid : BLOCK_POOL_IDS) {
        nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1));
    }
    String newVolumePath = BASE_DIR + "/newVolumeToRemoveLater";
    StorageLocation loc = StorageLocation.parse(newVolumePath);
    Storage.StorageDirectory sd = createStorageDirectory(new File(newVolumePath));
    DataStorage.VolumeBuilder builder = new DataStorage.VolumeBuilder(storage, sd);
    when(storage.prepareVolume(eq(datanode), eq(loc), anyListOf(NamespaceInfo.class))).thenReturn(builder);
    dataset.addVolume(loc, nsInfos);
    assertEquals(numExistingVolumes + 1, getNumVolumes());
    when(storage.getNumStorageDirs()).thenReturn(numExistingVolumes + 1);
    when(storage.getStorageDir(numExistingVolumes)).thenReturn(sd);
    Set<StorageLocation> volumesToRemove = new HashSet<>();
    volumesToRemove.add(loc);
    dataset.removeVolumes(volumesToRemove, true);
    assertEquals(numExistingVolumes, getNumVolumes());
}
Also used : DataStorage(org.apache.hadoop.hdfs.server.datanode.DataStorage) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) ArrayList(java.util.ArrayList) Matchers.anyString(org.mockito.Matchers.anyString) DataStorage(org.apache.hadoop.hdfs.server.datanode.DataStorage) Storage(org.apache.hadoop.hdfs.server.common.Storage) NamespaceInfo(org.apache.hadoop.hdfs.server.protocol.NamespaceInfo) StorageLocation(org.apache.hadoop.hdfs.server.datanode.StorageLocation) File(java.io.File) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 17 with StorageLocation

use of org.apache.hadoop.hdfs.server.datanode.StorageLocation in project hadoop by apache.

the class TestFsDatasetImpl method testAddVolumes.

@Test
public void testAddVolumes() throws IOException {
    final int numNewVolumes = 3;
    final int numExistingVolumes = getNumVolumes();
    final int totalVolumes = numNewVolumes + numExistingVolumes;
    Set<String> expectedVolumes = new HashSet<String>();
    List<NamespaceInfo> nsInfos = Lists.newArrayList();
    for (String bpid : BLOCK_POOL_IDS) {
        nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1));
    }
    for (int i = 0; i < numNewVolumes; i++) {
        String path = BASE_DIR + "/newData" + i;
        String pathUri = new Path(path).toUri().toString();
        expectedVolumes.add(new File(pathUri).getAbsolutePath());
        StorageLocation loc = StorageLocation.parse(pathUri);
        Storage.StorageDirectory sd = createStorageDirectory(new File(path));
        DataStorage.VolumeBuilder builder = new DataStorage.VolumeBuilder(storage, sd);
        when(storage.prepareVolume(eq(datanode), eq(loc), anyListOf(NamespaceInfo.class))).thenReturn(builder);
        dataset.addVolume(loc, nsInfos);
        LOG.info("expectedVolumes " + i + " is " + new File(pathUri).getAbsolutePath());
    }
    assertEquals(totalVolumes, getNumVolumes());
    assertEquals(totalVolumes, dataset.storageMap.size());
    Set<String> actualVolumes = new HashSet<String>();
    try (FsDatasetSpi.FsVolumeReferences volumes = dataset.getFsVolumeReferences()) {
        for (int i = 0; i < numNewVolumes; i++) {
            String volumeName = volumes.get(numExistingVolumes + i).toString();
            actualVolumes.add(volumeName);
            LOG.info("actualVolume " + i + " is " + volumeName);
        }
    }
    assertEquals(actualVolumes.size(), expectedVolumes.size());
    assertTrue(actualVolumes.containsAll(expectedVolumes));
}
Also used : Path(org.apache.hadoop.fs.Path) DataStorage(org.apache.hadoop.hdfs.server.datanode.DataStorage) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) Matchers.anyString(org.mockito.Matchers.anyString) FsVolumeReferences(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences) DataStorage(org.apache.hadoop.hdfs.server.datanode.DataStorage) Storage(org.apache.hadoop.hdfs.server.common.Storage) NamespaceInfo(org.apache.hadoop.hdfs.server.protocol.NamespaceInfo) StorageLocation(org.apache.hadoop.hdfs.server.datanode.StorageLocation) File(java.io.File) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 18 with StorageLocation

use of org.apache.hadoop.hdfs.server.datanode.StorageLocation in project hadoop by apache.

the class TestFsDatasetImpl method testAddVolumeFailureReleasesInUseLock.

@Test
public void testAddVolumeFailureReleasesInUseLock() throws IOException {
    FsDatasetImpl spyDataset = spy(dataset);
    FsVolumeImpl mockVolume = mock(FsVolumeImpl.class);
    File badDir = new File(BASE_DIR, "bad");
    badDir.mkdirs();
    doReturn(mockVolume).when(spyDataset).createFsVolume(anyString(), any(StorageDirectory.class), any(StorageLocation.class));
    doThrow(new IOException("Failed to getVolumeMap()")).when(mockVolume).getVolumeMap(anyString(), any(ReplicaMap.class), any(RamDiskReplicaLruTracker.class));
    Storage.StorageDirectory sd = createStorageDirectory(badDir);
    sd.lock();
    DataStorage.VolumeBuilder builder = new DataStorage.VolumeBuilder(storage, sd);
    when(storage.prepareVolume(eq(datanode), eq(StorageLocation.parse(badDir.toURI().toString())), Matchers.<List<NamespaceInfo>>any())).thenReturn(builder);
    StorageLocation location = StorageLocation.parse(badDir.toString());
    List<NamespaceInfo> nsInfos = Lists.newArrayList();
    for (String bpid : BLOCK_POOL_IDS) {
        nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1));
    }
    try {
        spyDataset.addVolume(location, nsInfos);
        fail("Expect to throw MultipleIOException");
    } catch (MultipleIOException e) {
    }
    FsDatasetTestUtil.assertFileLockReleased(badDir.toString());
}
Also used : DataStorage(org.apache.hadoop.hdfs.server.datanode.DataStorage) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) Matchers.anyString(org.mockito.Matchers.anyString) MultipleIOException(org.apache.hadoop.io.MultipleIOException) DataStorage(org.apache.hadoop.hdfs.server.datanode.DataStorage) Storage(org.apache.hadoop.hdfs.server.common.Storage) StorageLocation(org.apache.hadoop.hdfs.server.datanode.StorageLocation) NamespaceInfo(org.apache.hadoop.hdfs.server.protocol.NamespaceInfo) File(java.io.File) Test(org.junit.Test)

Example 19 with StorageLocation

use of org.apache.hadoop.hdfs.server.datanode.StorageLocation in project hadoop by apache.

the class TestFsDatasetImpl method testRemoveVolumeBeingWritten.

@Test(timeout = 60000)
public void testRemoveVolumeBeingWritten() throws Exception {
    // Will write and remove on dn0.
    final ExtendedBlock eb = new ExtendedBlock(BLOCK_POOL_IDS[0], 0);
    final CountDownLatch startFinalizeLatch = new CountDownLatch(1);
    final CountDownLatch blockReportReceivedLatch = new CountDownLatch(1);
    final CountDownLatch volRemoveStartedLatch = new CountDownLatch(1);
    final CountDownLatch volRemoveCompletedLatch = new CountDownLatch(1);
    class BlockReportThread extends Thread {

        public void run() {
            // Lets wait for the volume remove process to start
            try {
                volRemoveStartedLatch.await();
            } catch (Exception e) {
                LOG.info("Unexpected exception when waiting for vol removal:", e);
            }
            LOG.info("Getting block report");
            dataset.getBlockReports(eb.getBlockPoolId());
            LOG.info("Successfully received block report");
            blockReportReceivedLatch.countDown();
        }
    }
    class ResponderThread extends Thread {

        public void run() {
            try (ReplicaHandler replica = dataset.createRbw(StorageType.DEFAULT, eb, false)) {
                LOG.info("CreateRbw finished");
                startFinalizeLatch.countDown();
                // Ignore any interrupts coming out of volume shutdown.
                try {
                    Thread.sleep(1000);
                } catch (InterruptedException ie) {
                    LOG.info("Ignoring ", ie);
                }
                // Lets wait for the other thread finish getting block report
                blockReportReceivedLatch.await();
                dataset.finalizeBlock(eb);
                LOG.info("FinalizeBlock finished");
            } catch (Exception e) {
                LOG.warn("Exception caught. This should not affect the test", e);
            }
        }
    }
    class VolRemoveThread extends Thread {

        public void run() {
            Set<StorageLocation> volumesToRemove = new HashSet<>();
            try {
                volumesToRemove.add(dataset.getVolume(eb).getStorageLocation());
            } catch (Exception e) {
                LOG.info("Problem preparing volumes to remove: ", e);
                Assert.fail("Exception in remove volume thread, check log for " + "details.");
            }
            LOG.info("Removing volume " + volumesToRemove);
            dataset.removeVolumes(volumesToRemove, true);
            volRemoveCompletedLatch.countDown();
            LOG.info("Removed volume " + volumesToRemove);
        }
    }
    // Start the volume write operation
    ResponderThread responderThread = new ResponderThread();
    responderThread.start();
    startFinalizeLatch.await();
    // Start the block report get operation
    final BlockReportThread blockReportThread = new BlockReportThread();
    blockReportThread.start();
    // Start the volume remove operation
    VolRemoveThread volRemoveThread = new VolRemoveThread();
    volRemoveThread.start();
    // Let volume write and remove operation be
    // blocked for few seconds
    Thread.sleep(2000);
    // Signal block report receiver and volume writer
    // thread to complete their operations so that vol
    // remove can proceed
    volRemoveStartedLatch.countDown();
    // Verify if block report can be received
    // when volume is in use and also being removed
    blockReportReceivedLatch.await();
    // Verify if volume can be removed safely when there
    // are read/write operation in-progress
    volRemoveCompletedLatch.await();
}
Also used : ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) CountDownLatch(java.util.concurrent.CountDownLatch) StorageLocation(org.apache.hadoop.hdfs.server.datanode.StorageLocation) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) ReplicaHandler(org.apache.hadoop.hdfs.server.datanode.ReplicaHandler) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 20 with StorageLocation

use of org.apache.hadoop.hdfs.server.datanode.StorageLocation in project hadoop by apache.

the class TestStorageLocationChecker method testAllLocationsHealthy.

/**
   * Verify that all healthy locations are correctly handled and that the
   * check routine is invoked as expected.
   * @throws Exception
   */
@Test(timeout = 30000)
public void testAllLocationsHealthy() throws Exception {
    final List<StorageLocation> locations = makeMockLocations(HEALTHY, HEALTHY, HEALTHY);
    final Configuration conf = new HdfsConfiguration();
    conf.setInt(DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 0);
    StorageLocationChecker checker = new StorageLocationChecker(conf, new FakeTimer());
    List<StorageLocation> filteredLocations = checker.check(conf, locations);
    // All locations should be healthy.
    assertThat(filteredLocations.size(), is(3));
    // Ensure that the check method was invoked for each location.
    for (StorageLocation location : locations) {
        verify(location).check(any(StorageLocation.CheckContext.class));
    }
}
Also used : HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) StorageLocation(org.apache.hadoop.hdfs.server.datanode.StorageLocation) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FakeTimer(org.apache.hadoop.util.FakeTimer) Test(org.junit.Test)

Aggregations

StorageLocation (org.apache.hadoop.hdfs.server.datanode.StorageLocation)24 Test (org.junit.Test)11 File (java.io.File)7 ArrayList (java.util.ArrayList)7 Configuration (org.apache.hadoop.conf.Configuration)7 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)7 Storage (org.apache.hadoop.hdfs.server.common.Storage)6 DataStorage (org.apache.hadoop.hdfs.server.datanode.DataStorage)6 FakeTimer (org.apache.hadoop.util.FakeTimer)6 HashSet (java.util.HashSet)5 Matchers.anyString (org.mockito.Matchers.anyString)5 IOException (java.io.IOException)4 StorageDirectory (org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory)4 FsVolumeReferences (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences)4 NamespaceInfo (org.apache.hadoop.hdfs.server.protocol.NamespaceInfo)4 Path (org.apache.hadoop.fs.Path)3 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)3 FsVolumeSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi)3 MultipleIOException (org.apache.hadoop.io.MultipleIOException)3 FileOutputStream (java.io.FileOutputStream)2