use of org.apache.hadoop.hdfs.server.datanode.StorageLocation in project hadoop by apache.
the class TestFsDatasetImpl method testRemoveNewlyAddedVolume.
@Test(timeout = 5000)
public void testRemoveNewlyAddedVolume() throws IOException {
final int numExistingVolumes = getNumVolumes();
List<NamespaceInfo> nsInfos = new ArrayList<>();
for (String bpid : BLOCK_POOL_IDS) {
nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1));
}
String newVolumePath = BASE_DIR + "/newVolumeToRemoveLater";
StorageLocation loc = StorageLocation.parse(newVolumePath);
Storage.StorageDirectory sd = createStorageDirectory(new File(newVolumePath));
DataStorage.VolumeBuilder builder = new DataStorage.VolumeBuilder(storage, sd);
when(storage.prepareVolume(eq(datanode), eq(loc), anyListOf(NamespaceInfo.class))).thenReturn(builder);
dataset.addVolume(loc, nsInfos);
assertEquals(numExistingVolumes + 1, getNumVolumes());
when(storage.getNumStorageDirs()).thenReturn(numExistingVolumes + 1);
when(storage.getStorageDir(numExistingVolumes)).thenReturn(sd);
Set<StorageLocation> volumesToRemove = new HashSet<>();
volumesToRemove.add(loc);
dataset.removeVolumes(volumesToRemove, true);
assertEquals(numExistingVolumes, getNumVolumes());
}
use of org.apache.hadoop.hdfs.server.datanode.StorageLocation in project hadoop by apache.
the class TestFsDatasetImpl method testAddVolumes.
@Test
public void testAddVolumes() throws IOException {
final int numNewVolumes = 3;
final int numExistingVolumes = getNumVolumes();
final int totalVolumes = numNewVolumes + numExistingVolumes;
Set<String> expectedVolumes = new HashSet<String>();
List<NamespaceInfo> nsInfos = Lists.newArrayList();
for (String bpid : BLOCK_POOL_IDS) {
nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1));
}
for (int i = 0; i < numNewVolumes; i++) {
String path = BASE_DIR + "/newData" + i;
String pathUri = new Path(path).toUri().toString();
expectedVolumes.add(new File(pathUri).getAbsolutePath());
StorageLocation loc = StorageLocation.parse(pathUri);
Storage.StorageDirectory sd = createStorageDirectory(new File(path));
DataStorage.VolumeBuilder builder = new DataStorage.VolumeBuilder(storage, sd);
when(storage.prepareVolume(eq(datanode), eq(loc), anyListOf(NamespaceInfo.class))).thenReturn(builder);
dataset.addVolume(loc, nsInfos);
LOG.info("expectedVolumes " + i + " is " + new File(pathUri).getAbsolutePath());
}
assertEquals(totalVolumes, getNumVolumes());
assertEquals(totalVolumes, dataset.storageMap.size());
Set<String> actualVolumes = new HashSet<String>();
try (FsDatasetSpi.FsVolumeReferences volumes = dataset.getFsVolumeReferences()) {
for (int i = 0; i < numNewVolumes; i++) {
String volumeName = volumes.get(numExistingVolumes + i).toString();
actualVolumes.add(volumeName);
LOG.info("actualVolume " + i + " is " + volumeName);
}
}
assertEquals(actualVolumes.size(), expectedVolumes.size());
assertTrue(actualVolumes.containsAll(expectedVolumes));
}
use of org.apache.hadoop.hdfs.server.datanode.StorageLocation in project hadoop by apache.
the class TestFsDatasetImpl method testAddVolumeFailureReleasesInUseLock.
@Test
public void testAddVolumeFailureReleasesInUseLock() throws IOException {
FsDatasetImpl spyDataset = spy(dataset);
FsVolumeImpl mockVolume = mock(FsVolumeImpl.class);
File badDir = new File(BASE_DIR, "bad");
badDir.mkdirs();
doReturn(mockVolume).when(spyDataset).createFsVolume(anyString(), any(StorageDirectory.class), any(StorageLocation.class));
doThrow(new IOException("Failed to getVolumeMap()")).when(mockVolume).getVolumeMap(anyString(), any(ReplicaMap.class), any(RamDiskReplicaLruTracker.class));
Storage.StorageDirectory sd = createStorageDirectory(badDir);
sd.lock();
DataStorage.VolumeBuilder builder = new DataStorage.VolumeBuilder(storage, sd);
when(storage.prepareVolume(eq(datanode), eq(StorageLocation.parse(badDir.toURI().toString())), Matchers.<List<NamespaceInfo>>any())).thenReturn(builder);
StorageLocation location = StorageLocation.parse(badDir.toString());
List<NamespaceInfo> nsInfos = Lists.newArrayList();
for (String bpid : BLOCK_POOL_IDS) {
nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1));
}
try {
spyDataset.addVolume(location, nsInfos);
fail("Expect to throw MultipleIOException");
} catch (MultipleIOException e) {
}
FsDatasetTestUtil.assertFileLockReleased(badDir.toString());
}
use of org.apache.hadoop.hdfs.server.datanode.StorageLocation in project hadoop by apache.
the class TestFsDatasetImpl method testRemoveVolumeBeingWritten.
@Test(timeout = 60000)
public void testRemoveVolumeBeingWritten() throws Exception {
// Will write and remove on dn0.
final ExtendedBlock eb = new ExtendedBlock(BLOCK_POOL_IDS[0], 0);
final CountDownLatch startFinalizeLatch = new CountDownLatch(1);
final CountDownLatch blockReportReceivedLatch = new CountDownLatch(1);
final CountDownLatch volRemoveStartedLatch = new CountDownLatch(1);
final CountDownLatch volRemoveCompletedLatch = new CountDownLatch(1);
class BlockReportThread extends Thread {
public void run() {
// Lets wait for the volume remove process to start
try {
volRemoveStartedLatch.await();
} catch (Exception e) {
LOG.info("Unexpected exception when waiting for vol removal:", e);
}
LOG.info("Getting block report");
dataset.getBlockReports(eb.getBlockPoolId());
LOG.info("Successfully received block report");
blockReportReceivedLatch.countDown();
}
}
class ResponderThread extends Thread {
public void run() {
try (ReplicaHandler replica = dataset.createRbw(StorageType.DEFAULT, eb, false)) {
LOG.info("CreateRbw finished");
startFinalizeLatch.countDown();
// Ignore any interrupts coming out of volume shutdown.
try {
Thread.sleep(1000);
} catch (InterruptedException ie) {
LOG.info("Ignoring ", ie);
}
// Lets wait for the other thread finish getting block report
blockReportReceivedLatch.await();
dataset.finalizeBlock(eb);
LOG.info("FinalizeBlock finished");
} catch (Exception e) {
LOG.warn("Exception caught. This should not affect the test", e);
}
}
}
class VolRemoveThread extends Thread {
public void run() {
Set<StorageLocation> volumesToRemove = new HashSet<>();
try {
volumesToRemove.add(dataset.getVolume(eb).getStorageLocation());
} catch (Exception e) {
LOG.info("Problem preparing volumes to remove: ", e);
Assert.fail("Exception in remove volume thread, check log for " + "details.");
}
LOG.info("Removing volume " + volumesToRemove);
dataset.removeVolumes(volumesToRemove, true);
volRemoveCompletedLatch.countDown();
LOG.info("Removed volume " + volumesToRemove);
}
}
// Start the volume write operation
ResponderThread responderThread = new ResponderThread();
responderThread.start();
startFinalizeLatch.await();
// Start the block report get operation
final BlockReportThread blockReportThread = new BlockReportThread();
blockReportThread.start();
// Start the volume remove operation
VolRemoveThread volRemoveThread = new VolRemoveThread();
volRemoveThread.start();
// Let volume write and remove operation be
// blocked for few seconds
Thread.sleep(2000);
// Signal block report receiver and volume writer
// thread to complete their operations so that vol
// remove can proceed
volRemoveStartedLatch.countDown();
// Verify if block report can be received
// when volume is in use and also being removed
blockReportReceivedLatch.await();
// Verify if volume can be removed safely when there
// are read/write operation in-progress
volRemoveCompletedLatch.await();
}
use of org.apache.hadoop.hdfs.server.datanode.StorageLocation in project hadoop by apache.
the class TestStorageLocationChecker method testAllLocationsHealthy.
/**
* Verify that all healthy locations are correctly handled and that the
* check routine is invoked as expected.
* @throws Exception
*/
@Test(timeout = 30000)
public void testAllLocationsHealthy() throws Exception {
final List<StorageLocation> locations = makeMockLocations(HEALTHY, HEALTHY, HEALTHY);
final Configuration conf = new HdfsConfiguration();
conf.setInt(DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 0);
StorageLocationChecker checker = new StorageLocationChecker(conf, new FakeTimer());
List<StorageLocation> filteredLocations = checker.check(conf, locations);
// All locations should be healthy.
assertThat(filteredLocations.size(), is(3));
// Ensure that the check method was invoked for each location.
for (StorageLocation location : locations) {
verify(location).check(any(StorageLocation.CheckContext.class));
}
}
Aggregations