Search in sources :

Example 6 with StorageLocation

use of org.apache.hadoop.hdfs.server.datanode.StorageLocation in project hadoop by apache.

the class TestDatasetVolumeCheckerTimeout method makeSlowVolume.

static FsVolumeSpi makeSlowVolume() throws Exception {
    final FsVolumeSpi volume = mock(FsVolumeSpi.class);
    final FsVolumeReference reference = mock(FsVolumeReference.class);
    final StorageLocation location = mock(StorageLocation.class);
    when(reference.getVolume()).thenReturn(volume);
    when(volume.obtainReference()).thenReturn(reference);
    when(volume.getStorageLocation()).thenReturn(location);
    when(volume.check(anyObject())).thenAnswer(new Answer<VolumeCheckResult>() {

        @Override
        public VolumeCheckResult answer(InvocationOnMock invocationOnMock) throws Throwable {
            // Wait for the disk check to timeout and then release lock.
            lock.lock();
            lock.unlock();
            return VolumeCheckResult.HEALTHY;
        }
    });
    return volume;
}
Also used : FsVolumeReference(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference) InvocationOnMock(org.mockito.invocation.InvocationOnMock) FsVolumeSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi) StorageLocation(org.apache.hadoop.hdfs.server.datanode.StorageLocation)

Example 7 with StorageLocation

use of org.apache.hadoop.hdfs.server.datanode.StorageLocation in project hadoop by apache.

the class TestStorageLocationChecker method testFailedLocationsBelowThreshold.

/**
   * Test handling when the number of failed locations is below the
   * max volume failure threshold.
   *
   * @throws Exception
   */
@Test(timeout = 30000)
public void testFailedLocationsBelowThreshold() throws Exception {
    final List<StorageLocation> locations = // 2 healthy, 1 failed.
    makeMockLocations(HEALTHY, HEALTHY, FAILED);
    final Configuration conf = new HdfsConfiguration();
    conf.setInt(DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1);
    StorageLocationChecker checker = new StorageLocationChecker(conf, new FakeTimer());
    List<StorageLocation> filteredLocations = checker.check(conf, locations);
    assertThat(filteredLocations.size(), is(2));
}
Also used : HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) StorageLocation(org.apache.hadoop.hdfs.server.datanode.StorageLocation) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FakeTimer(org.apache.hadoop.util.FakeTimer) Test(org.junit.Test)

Example 8 with StorageLocation

use of org.apache.hadoop.hdfs.server.datanode.StorageLocation in project hadoop by apache.

the class TestStorageLocationChecker method testFailedLocationsAboveThreshold.

/**
   * Test handling when the number of volume failures tolerated is the
   * same as the number of volumes.
   *
   * @throws Exception
   */
@Test(timeout = 30000)
public void testFailedLocationsAboveThreshold() throws Exception {
    final List<StorageLocation> locations = // 1 healthy, 2 failed.
    makeMockLocations(HEALTHY, FAILED, FAILED);
    final Configuration conf = new HdfsConfiguration();
    conf.setInt(DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1);
    thrown.expect(IOException.class);
    thrown.expectMessage("Too many failed volumes - current valid volumes: 1," + " volumes configured: 3, volumes failed: 2, volume failures" + " tolerated: 1");
    StorageLocationChecker checker = new StorageLocationChecker(conf, new FakeTimer());
    checker.check(conf, locations);
}
Also used : HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) StorageLocation(org.apache.hadoop.hdfs.server.datanode.StorageLocation) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FakeTimer(org.apache.hadoop.util.FakeTimer) Test(org.junit.Test)

Example 9 with StorageLocation

use of org.apache.hadoop.hdfs.server.datanode.StorageLocation in project hadoop by apache.

the class TestStorageLocationChecker method testTimeoutInCheck.

/**
   * Verify that a {@link StorageLocation#check} timeout is correctly detected
   * as a failure.
   *
   * This is hard to test without a {@link Thread#sleep} call.
   *
   * @throws Exception
   */
@Test(timeout = 300000)
public void testTimeoutInCheck() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    conf.setTimeDuration(DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY, 1, TimeUnit.SECONDS);
    conf.setInt(DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1);
    final FakeTimer timer = new FakeTimer();
    // Generate a list of storage locations the first of which sleeps
    // for 2 seconds in its check() routine.
    final List<StorageLocation> locations = makeSlowLocations(2000, 1);
    StorageLocationChecker checker = new StorageLocationChecker(conf, timer);
    try {
        // Check the two locations and ensure that only one of them
        // was filtered out.
        List<StorageLocation> filteredList = checker.check(conf, locations);
        assertThat(filteredList.size(), is(1));
    } finally {
        checker.shutdownAndWait(10, TimeUnit.SECONDS);
    }
}
Also used : HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) StorageLocation(org.apache.hadoop.hdfs.server.datanode.StorageLocation) FakeTimer(org.apache.hadoop.util.FakeTimer) Test(org.junit.Test)

Example 10 with StorageLocation

use of org.apache.hadoop.hdfs.server.datanode.StorageLocation in project hadoop by apache.

the class FsDatasetImpl method getInitialVolumeFailureInfos.

/**
   * Gets initial volume failure information for all volumes that failed
   * immediately at startup.  The method works by determining the set difference
   * between all configured storage locations and the actual storage locations in
   * use after attempting to put all of them into service.
   *
   * @return each storage location that has failed
   */
private static List<VolumeFailureInfo> getInitialVolumeFailureInfos(Collection<StorageLocation> dataLocations, DataStorage storage) {
    Set<StorageLocation> failedLocationSet = Sets.newHashSetWithExpectedSize(dataLocations.size());
    for (StorageLocation sl : dataLocations) {
        failedLocationSet.add(sl);
    }
    for (Iterator<Storage.StorageDirectory> it = storage.dirIterator(); it.hasNext(); ) {
        Storage.StorageDirectory sd = it.next();
        failedLocationSet.remove(sd.getStorageLocation());
    }
    List<VolumeFailureInfo> volumeFailureInfos = Lists.newArrayListWithCapacity(failedLocationSet.size());
    long failureDate = Time.now();
    for (StorageLocation failedStorageLocation : failedLocationSet) {
        volumeFailureInfos.add(new VolumeFailureInfo(failedStorageLocation, failureDate));
    }
    return volumeFailureInfos;
}
Also used : DataStorage(org.apache.hadoop.hdfs.server.datanode.DataStorage) Storage(org.apache.hadoop.hdfs.server.common.Storage) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) StorageLocation(org.apache.hadoop.hdfs.server.datanode.StorageLocation)

Aggregations

StorageLocation (org.apache.hadoop.hdfs.server.datanode.StorageLocation)24 Test (org.junit.Test)11 File (java.io.File)7 ArrayList (java.util.ArrayList)7 Configuration (org.apache.hadoop.conf.Configuration)7 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)7 Storage (org.apache.hadoop.hdfs.server.common.Storage)6 DataStorage (org.apache.hadoop.hdfs.server.datanode.DataStorage)6 FakeTimer (org.apache.hadoop.util.FakeTimer)6 HashSet (java.util.HashSet)5 Matchers.anyString (org.mockito.Matchers.anyString)5 IOException (java.io.IOException)4 StorageDirectory (org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory)4 FsVolumeReferences (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences)4 NamespaceInfo (org.apache.hadoop.hdfs.server.protocol.NamespaceInfo)4 Path (org.apache.hadoop.fs.Path)3 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)3 FsVolumeSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi)3 MultipleIOException (org.apache.hadoop.io.MultipleIOException)3 FileOutputStream (java.io.FileOutputStream)2