use of org.apache.hadoop.hdfs.server.datanode.StorageLocation in project hadoop by apache.
the class TestDatasetVolumeCheckerTimeout method makeSlowVolume.
static FsVolumeSpi makeSlowVolume() throws Exception {
final FsVolumeSpi volume = mock(FsVolumeSpi.class);
final FsVolumeReference reference = mock(FsVolumeReference.class);
final StorageLocation location = mock(StorageLocation.class);
when(reference.getVolume()).thenReturn(volume);
when(volume.obtainReference()).thenReturn(reference);
when(volume.getStorageLocation()).thenReturn(location);
when(volume.check(anyObject())).thenAnswer(new Answer<VolumeCheckResult>() {
@Override
public VolumeCheckResult answer(InvocationOnMock invocationOnMock) throws Throwable {
// Wait for the disk check to timeout and then release lock.
lock.lock();
lock.unlock();
return VolumeCheckResult.HEALTHY;
}
});
return volume;
}
use of org.apache.hadoop.hdfs.server.datanode.StorageLocation in project hadoop by apache.
the class TestStorageLocationChecker method testFailedLocationsBelowThreshold.
/**
* Test handling when the number of failed locations is below the
* max volume failure threshold.
*
* @throws Exception
*/
@Test(timeout = 30000)
public void testFailedLocationsBelowThreshold() throws Exception {
final List<StorageLocation> locations = // 2 healthy, 1 failed.
makeMockLocations(HEALTHY, HEALTHY, FAILED);
final Configuration conf = new HdfsConfiguration();
conf.setInt(DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1);
StorageLocationChecker checker = new StorageLocationChecker(conf, new FakeTimer());
List<StorageLocation> filteredLocations = checker.check(conf, locations);
assertThat(filteredLocations.size(), is(2));
}
use of org.apache.hadoop.hdfs.server.datanode.StorageLocation in project hadoop by apache.
the class TestStorageLocationChecker method testFailedLocationsAboveThreshold.
/**
* Test handling when the number of volume failures tolerated is the
* same as the number of volumes.
*
* @throws Exception
*/
@Test(timeout = 30000)
public void testFailedLocationsAboveThreshold() throws Exception {
final List<StorageLocation> locations = // 1 healthy, 2 failed.
makeMockLocations(HEALTHY, FAILED, FAILED);
final Configuration conf = new HdfsConfiguration();
conf.setInt(DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1);
thrown.expect(IOException.class);
thrown.expectMessage("Too many failed volumes - current valid volumes: 1," + " volumes configured: 3, volumes failed: 2, volume failures" + " tolerated: 1");
StorageLocationChecker checker = new StorageLocationChecker(conf, new FakeTimer());
checker.check(conf, locations);
}
use of org.apache.hadoop.hdfs.server.datanode.StorageLocation in project hadoop by apache.
the class TestStorageLocationChecker method testTimeoutInCheck.
/**
* Verify that a {@link StorageLocation#check} timeout is correctly detected
* as a failure.
*
* This is hard to test without a {@link Thread#sleep} call.
*
* @throws Exception
*/
@Test(timeout = 300000)
public void testTimeoutInCheck() throws Exception {
final Configuration conf = new HdfsConfiguration();
conf.setTimeDuration(DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY, 1, TimeUnit.SECONDS);
conf.setInt(DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1);
final FakeTimer timer = new FakeTimer();
// Generate a list of storage locations the first of which sleeps
// for 2 seconds in its check() routine.
final List<StorageLocation> locations = makeSlowLocations(2000, 1);
StorageLocationChecker checker = new StorageLocationChecker(conf, timer);
try {
// Check the two locations and ensure that only one of them
// was filtered out.
List<StorageLocation> filteredList = checker.check(conf, locations);
assertThat(filteredList.size(), is(1));
} finally {
checker.shutdownAndWait(10, TimeUnit.SECONDS);
}
}
use of org.apache.hadoop.hdfs.server.datanode.StorageLocation in project hadoop by apache.
the class FsDatasetImpl method getInitialVolumeFailureInfos.
/**
* Gets initial volume failure information for all volumes that failed
* immediately at startup. The method works by determining the set difference
* between all configured storage locations and the actual storage locations in
* use after attempting to put all of them into service.
*
* @return each storage location that has failed
*/
private static List<VolumeFailureInfo> getInitialVolumeFailureInfos(Collection<StorageLocation> dataLocations, DataStorage storage) {
Set<StorageLocation> failedLocationSet = Sets.newHashSetWithExpectedSize(dataLocations.size());
for (StorageLocation sl : dataLocations) {
failedLocationSet.add(sl);
}
for (Iterator<Storage.StorageDirectory> it = storage.dirIterator(); it.hasNext(); ) {
Storage.StorageDirectory sd = it.next();
failedLocationSet.remove(sd.getStorageLocation());
}
List<VolumeFailureInfo> volumeFailureInfos = Lists.newArrayListWithCapacity(failedLocationSet.size());
long failureDate = Time.now();
for (StorageLocation failedStorageLocation : failedLocationSet) {
volumeFailureInfos.add(new VolumeFailureInfo(failedStorageLocation, failureDate));
}
return volumeFailureInfos;
}
Aggregations