use of org.apache.hadoop.hdfs.server.protocol.NamespaceInfo in project hadoop by apache.
the class TestFsDatasetImpl method testRemoveNewlyAddedVolume.
@Test(timeout = 5000)
public void testRemoveNewlyAddedVolume() throws IOException {
final int numExistingVolumes = getNumVolumes();
List<NamespaceInfo> nsInfos = new ArrayList<>();
for (String bpid : BLOCK_POOL_IDS) {
nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1));
}
String newVolumePath = BASE_DIR + "/newVolumeToRemoveLater";
StorageLocation loc = StorageLocation.parse(newVolumePath);
Storage.StorageDirectory sd = createStorageDirectory(new File(newVolumePath));
DataStorage.VolumeBuilder builder = new DataStorage.VolumeBuilder(storage, sd);
when(storage.prepareVolume(eq(datanode), eq(loc), anyListOf(NamespaceInfo.class))).thenReturn(builder);
dataset.addVolume(loc, nsInfos);
assertEquals(numExistingVolumes + 1, getNumVolumes());
when(storage.getNumStorageDirs()).thenReturn(numExistingVolumes + 1);
when(storage.getStorageDir(numExistingVolumes)).thenReturn(sd);
Set<StorageLocation> volumesToRemove = new HashSet<>();
volumesToRemove.add(loc);
dataset.removeVolumes(volumesToRemove, true);
assertEquals(numExistingVolumes, getNumVolumes());
}
use of org.apache.hadoop.hdfs.server.protocol.NamespaceInfo in project hadoop by apache.
the class TestFsDatasetImpl method testAddVolumes.
@Test
public void testAddVolumes() throws IOException {
final int numNewVolumes = 3;
final int numExistingVolumes = getNumVolumes();
final int totalVolumes = numNewVolumes + numExistingVolumes;
Set<String> expectedVolumes = new HashSet<String>();
List<NamespaceInfo> nsInfos = Lists.newArrayList();
for (String bpid : BLOCK_POOL_IDS) {
nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1));
}
for (int i = 0; i < numNewVolumes; i++) {
String path = BASE_DIR + "/newData" + i;
String pathUri = new Path(path).toUri().toString();
expectedVolumes.add(new File(pathUri).getAbsolutePath());
StorageLocation loc = StorageLocation.parse(pathUri);
Storage.StorageDirectory sd = createStorageDirectory(new File(path));
DataStorage.VolumeBuilder builder = new DataStorage.VolumeBuilder(storage, sd);
when(storage.prepareVolume(eq(datanode), eq(loc), anyListOf(NamespaceInfo.class))).thenReturn(builder);
dataset.addVolume(loc, nsInfos);
LOG.info("expectedVolumes " + i + " is " + new File(pathUri).getAbsolutePath());
}
assertEquals(totalVolumes, getNumVolumes());
assertEquals(totalVolumes, dataset.storageMap.size());
Set<String> actualVolumes = new HashSet<String>();
try (FsDatasetSpi.FsVolumeReferences volumes = dataset.getFsVolumeReferences()) {
for (int i = 0; i < numNewVolumes; i++) {
String volumeName = volumes.get(numExistingVolumes + i).toString();
actualVolumes.add(volumeName);
LOG.info("actualVolume " + i + " is " + volumeName);
}
}
assertEquals(actualVolumes.size(), expectedVolumes.size());
assertTrue(actualVolumes.containsAll(expectedVolumes));
}
use of org.apache.hadoop.hdfs.server.protocol.NamespaceInfo in project hadoop by apache.
the class TestFsDatasetImpl method testAddVolumeFailureReleasesInUseLock.
@Test
public void testAddVolumeFailureReleasesInUseLock() throws IOException {
FsDatasetImpl spyDataset = spy(dataset);
FsVolumeImpl mockVolume = mock(FsVolumeImpl.class);
File badDir = new File(BASE_DIR, "bad");
badDir.mkdirs();
doReturn(mockVolume).when(spyDataset).createFsVolume(anyString(), any(StorageDirectory.class), any(StorageLocation.class));
doThrow(new IOException("Failed to getVolumeMap()")).when(mockVolume).getVolumeMap(anyString(), any(ReplicaMap.class), any(RamDiskReplicaLruTracker.class));
Storage.StorageDirectory sd = createStorageDirectory(badDir);
sd.lock();
DataStorage.VolumeBuilder builder = new DataStorage.VolumeBuilder(storage, sd);
when(storage.prepareVolume(eq(datanode), eq(StorageLocation.parse(badDir.toURI().toString())), Matchers.<List<NamespaceInfo>>any())).thenReturn(builder);
StorageLocation location = StorageLocation.parse(badDir.toString());
List<NamespaceInfo> nsInfos = Lists.newArrayList();
for (String bpid : BLOCK_POOL_IDS) {
nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1));
}
try {
spyDataset.addVolume(location, nsInfos);
fail("Expect to throw MultipleIOException");
} catch (MultipleIOException e) {
}
FsDatasetTestUtil.assertFileLockReleased(badDir.toString());
}
use of org.apache.hadoop.hdfs.server.protocol.NamespaceInfo in project hadoop by apache.
the class NNThroughputBenchmark method getBlockPoolId.
private void getBlockPoolId(DistributedFileSystem unused) throws IOException {
final NamespaceInfo nsInfo = nameNodeProto.versionRequest();
bpid = nsInfo.getBlockPoolID();
}
use of org.apache.hadoop.hdfs.server.protocol.NamespaceInfo in project hadoop by apache.
the class TestEditLog method setupEdits.
/**
* Set up directories for tests.
*
* Each rolled file is 10 txns long.
* A failed file is 2 txns long.
*
* @param editUris directories to create edit logs in
* @param numrolls number of times to roll the edit log during setup
* @param closeOnFinish whether to close the edit log after setup
* @param abortAtRolls Specifications for when to fail, see AbortSpec
*/
public static NNStorage setupEdits(List<URI> editUris, int numrolls, boolean closeOnFinish, AbortSpec... abortAtRolls) throws IOException {
List<AbortSpec> aborts = new ArrayList<AbortSpec>(Arrays.asList(abortAtRolls));
NNStorage storage = new NNStorage(getConf(), Collections.<URI>emptyList(), editUris);
storage.format(new NamespaceInfo());
FSEditLog editlog = getFSEditLog(storage);
// open the edit log and add two transactions
// logGenerationStamp is used, simply because it doesn't
// require complex arguments.
editlog.initJournalsForWrite();
editlog.openForWrite(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
for (int i = 2; i < TXNS_PER_ROLL; i++) {
editlog.logGenerationStamp((long) 0);
}
editlog.logSync();
// back into rotation automatically by rollEditLog
for (int i = 0; i < numrolls; i++) {
editlog.rollEditLog(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
editlog.logGenerationStamp((long) i);
editlog.logSync();
while (aborts.size() > 0 && aborts.get(0).roll == (i + 1)) {
AbortSpec spec = aborts.remove(0);
editlog.getJournals().get(spec.logindex).abort();
}
for (int j = 3; j < TXNS_PER_ROLL; j++) {
editlog.logGenerationStamp((long) i);
}
editlog.logSync();
}
if (closeOnFinish) {
editlog.close();
}
FSImageTestUtil.logStorageContents(LOG, storage);
return storage;
}
Aggregations