use of org.apache.hadoop.hdfs.server.common.Storage in project hadoop by apache.
the class TestDataNodeHotSwapVolumes method testAddVolumeFailures.
@Test
public void testAddVolumeFailures() throws IOException {
startDFSCluster(1, 1);
final String dataDir = cluster.getDataDirectory();
DataNode dn = cluster.getDataNodes().get(0);
List<String> newDirs = Lists.newArrayList();
final int NUM_NEW_DIRS = 4;
for (int i = 0; i < NUM_NEW_DIRS; i++) {
File newVolume = new File(dataDir, "new_vol" + i);
newDirs.add(newVolume.toString());
if (i % 2 == 0) {
// Make addVolume() fail.
newVolume.createNewFile();
}
}
String newValue = dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY) + "," + Joiner.on(",").join(newDirs);
try {
dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, newValue);
fail("Expect to throw IOException.");
} catch (ReconfigurationException e) {
String errorMessage = e.getCause().getMessage();
String[] messages = errorMessage.split("\\r?\\n");
assertEquals(2, messages.length);
assertThat(messages[0], containsString("new_vol0"));
assertThat(messages[1], containsString("new_vol2"));
}
// Make sure that vol0 and vol2's metadata are not left in memory.
FsDatasetSpi<?> dataset = dn.getFSDataset();
try (FsDatasetSpi.FsVolumeReferences volumes = dataset.getFsVolumeReferences()) {
for (FsVolumeSpi volume : volumes) {
assertThat(new File(volume.getStorageLocation().getUri()).toString(), is(not(anyOf(is(newDirs.get(0)), is(newDirs.get(2))))));
}
}
DataStorage storage = dn.getStorage();
for (int i = 0; i < storage.getNumStorageDirs(); i++) {
Storage.StorageDirectory sd = storage.getStorageDir(i);
assertThat(sd.getRoot().toString(), is(not(anyOf(is(newDirs.get(0)), is(newDirs.get(2))))));
}
// The newly effective conf does not have vol0 and vol2.
String[] effectiveVolumes = dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY).split(",");
assertEquals(4, effectiveVolumes.length);
for (String ev : effectiveVolumes) {
assertThat(new File(StorageLocation.parse(ev).getUri()).getCanonicalPath(), is(not(anyOf(is(newDirs.get(0)), is(newDirs.get(2))))));
}
}
use of org.apache.hadoop.hdfs.server.common.Storage in project hadoop by apache.
the class TestDataStorage method checkDir.
/** Check whether the root is a valid BlockPoolSlice storage. */
private static void checkDir(File root, String bpid) {
Storage.StorageDirectory sd = new Storage.StorageDirectory(root);
File bpRoot = new File(sd.getCurrentDir(), bpid);
Storage.StorageDirectory bpSd = new Storage.StorageDirectory(bpRoot);
assertTrue(bpSd.getRoot().isDirectory());
assertTrue(bpSd.getCurrentDir().isDirectory());
assertTrue(bpSd.getVersionFile().isFile());
}
use of org.apache.hadoop.hdfs.server.common.Storage in project hadoop by apache.
the class TestDataStorage method testMissingVersion.
@Test
public void testMissingVersion() throws IOException, URISyntaxException {
final int numLocations = 1;
final int numNamespace = 1;
List<StorageLocation> locations = createStorageLocations(numLocations);
StorageLocation firstStorage = locations.get(0);
Storage.StorageDirectory sd = new Storage.StorageDirectory(firstStorage);
// the directory is not initialized so VERSION does not exist
// create a fake directory under current/
File currentDir = new File(sd.getCurrentDir(), "BP-787466439-172.26.24.43-1462305406642");
assertTrue("unable to mkdir " + currentDir.getName(), currentDir.mkdirs());
// Add volumes for multiple namespaces.
List<NamespaceInfo> namespaceInfos = createNamespaceInfos(numNamespace);
for (NamespaceInfo ni : namespaceInfos) {
storage.addStorageLocations(mockDN, ni, locations, START_OPT);
}
// It should not format the directory because VERSION is missing.
assertTrue("Storage directory was formatted", currentDir.exists());
}
use of org.apache.hadoop.hdfs.server.common.Storage in project hadoop by apache.
the class TestDataNodeVolumeFailure method testFailedVolumeBeingRemovedFromDataNode.
/**
* Test that DataStorage and BlockPoolSliceStorage remove the failed volume
* after failure.
*/
@Test(timeout = 150000)
public void testFailedVolumeBeingRemovedFromDataNode() throws Exception {
// The test uses DataNodeTestUtils#injectDataDirFailure() to simulate
// volume failures which is currently not supported on Windows.
assumeNotWindows();
Path file1 = new Path("/test1");
DFSTestUtil.createFile(fs, file1, 1024, (short) 2, 1L);
DFSTestUtil.waitReplication(fs, file1, (short) 2);
File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
DataNodeTestUtils.injectDataDirFailure(dn0Vol1);
DataNode dn0 = cluster.getDataNodes().get(0);
DataNodeTestUtils.waitForDiskError(dn0, DataNodeTestUtils.getVolume(dn0, dn0Vol1));
// Verify dn0Vol1 has been completely removed from DN0.
// 1. dn0Vol1 is removed from DataStorage.
DataStorage storage = dn0.getStorage();
assertEquals(1, storage.getNumStorageDirs());
for (int i = 0; i < storage.getNumStorageDirs(); i++) {
Storage.StorageDirectory sd = storage.getStorageDir(i);
assertFalse(sd.getRoot().getAbsolutePath().startsWith(dn0Vol1.getAbsolutePath()));
}
final String bpid = cluster.getNamesystem().getBlockPoolId();
BlockPoolSliceStorage bpsStorage = storage.getBPStorage(bpid);
assertEquals(1, bpsStorage.getNumStorageDirs());
for (int i = 0; i < bpsStorage.getNumStorageDirs(); i++) {
Storage.StorageDirectory sd = bpsStorage.getStorageDir(i);
assertFalse(sd.getRoot().getAbsolutePath().startsWith(dn0Vol1.getAbsolutePath()));
}
// 2. dn0Vol1 is removed from FsDataset
FsDatasetSpi<? extends FsVolumeSpi> data = dn0.getFSDataset();
try (FsDatasetSpi.FsVolumeReferences vols = data.getFsVolumeReferences()) {
for (FsVolumeSpi volume : vols) {
assertFalse(new File(volume.getStorageLocation().getUri()).getAbsolutePath().startsWith(dn0Vol1.getAbsolutePath()));
}
}
// 3. all blocks on dn0Vol1 have been removed.
for (ReplicaInfo replica : FsDatasetTestUtil.getReplicas(data, bpid)) {
assertNotNull(replica.getVolume());
assertFalse(new File(replica.getVolume().getStorageLocation().getUri()).getAbsolutePath().startsWith(dn0Vol1.getAbsolutePath()));
}
// 4. dn0Vol1 is not in DN0's configuration and dataDirs anymore.
String[] dataDirStrs = dn0.getConf().get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY).split(",");
assertEquals(1, dataDirStrs.length);
assertFalse(dataDirStrs[0].contains(dn0Vol1.getAbsolutePath()));
}
use of org.apache.hadoop.hdfs.server.common.Storage in project hadoop by apache.
the class FsDatasetImpl method getInitialVolumeFailureInfos.
/**
* Gets initial volume failure information for all volumes that failed
* immediately at startup. The method works by determining the set difference
* between all configured storage locations and the actual storage locations in
* use after attempting to put all of them into service.
*
* @return each storage location that has failed
*/
private static List<VolumeFailureInfo> getInitialVolumeFailureInfos(Collection<StorageLocation> dataLocations, DataStorage storage) {
Set<StorageLocation> failedLocationSet = Sets.newHashSetWithExpectedSize(dataLocations.size());
for (StorageLocation sl : dataLocations) {
failedLocationSet.add(sl);
}
for (Iterator<Storage.StorageDirectory> it = storage.dirIterator(); it.hasNext(); ) {
Storage.StorageDirectory sd = it.next();
failedLocationSet.remove(sd.getStorageLocation());
}
List<VolumeFailureInfo> volumeFailureInfos = Lists.newArrayListWithCapacity(failedLocationSet.size());
long failureDate = Time.now();
for (StorageLocation failedStorageLocation : failedLocationSet) {
volumeFailureInfos.add(new VolumeFailureInfo(failedStorageLocation, failureDate));
}
return volumeFailureInfos;
}
Aggregations