use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences in project hadoop by apache.
the class FsDatasetImplTestUtils method injectCorruptReplica.
@Override
public void injectCorruptReplica(ExtendedBlock block) throws IOException {
Preconditions.checkState(!dataset.contains(block), "Block " + block + " already exists on dataset.");
try (FsVolumeReferences volRef = dataset.getFsVolumeReferences()) {
FsVolumeImpl volume = (FsVolumeImpl) volRef.get(0);
FinalizedReplica finalized = new FinalizedReplica(block.getLocalBlock(), volume, volume.getFinalizedDir(block.getBlockPoolId()));
File blockFile = finalized.getBlockFile();
if (!blockFile.createNewFile()) {
throw new FileExistsException("Block file " + blockFile + " already exists.");
}
File metaFile = FsDatasetUtil.getMetaFile(blockFile, 1000);
if (!metaFile.createNewFile()) {
throw new FileExistsException("Meta file " + metaFile + " already exists.");
}
dataset.volumeMap.add(block.getBlockPoolId(), finalized);
}
}
use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences in project hadoop by apache.
the class TestFsDatasetImpl method testRemoveVolumes.
@Test(timeout = 30000)
public void testRemoveVolumes() throws IOException {
// Feed FsDataset with block metadata.
final int NUM_BLOCKS = 100;
for (int i = 0; i < NUM_BLOCKS; i++) {
String bpid = BLOCK_POOL_IDS[NUM_BLOCKS % BLOCK_POOL_IDS.length];
ExtendedBlock eb = new ExtendedBlock(bpid, i);
try (ReplicaHandler replica = dataset.createRbw(StorageType.DEFAULT, eb, false)) {
}
}
final String[] dataDirs = conf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY).split(",");
final String volumePathToRemove = dataDirs[0];
Set<StorageLocation> volumesToRemove = new HashSet<>();
volumesToRemove.add(StorageLocation.parse(volumePathToRemove));
FsVolumeReferences volReferences = dataset.getFsVolumeReferences();
FsVolumeImpl volumeToRemove = null;
for (FsVolumeSpi vol : volReferences) {
if (vol.getStorageLocation().equals(volumesToRemove.iterator().next())) {
volumeToRemove = (FsVolumeImpl) vol;
}
}
assertTrue(volumeToRemove != null);
volReferences.close();
dataset.removeVolumes(volumesToRemove, true);
int expectedNumVolumes = dataDirs.length - 1;
assertEquals("The volume has been removed from the volumeList.", expectedNumVolumes, getNumVolumes());
assertEquals("The volume has been removed from the storageMap.", expectedNumVolumes, dataset.storageMap.size());
try {
dataset.asyncDiskService.execute(volumeToRemove, new Runnable() {
@Override
public void run() {
}
});
fail("Expect RuntimeException: the volume has been removed from the " + "AsyncDiskService.");
} catch (RuntimeException e) {
GenericTestUtils.assertExceptionContains("Cannot find volume", e);
}
int totalNumReplicas = 0;
for (String bpid : dataset.volumeMap.getBlockPoolList()) {
totalNumReplicas += dataset.volumeMap.size(bpid);
}
assertEquals("The replica infos on this volume has been removed from the " + "volumeMap.", NUM_BLOCKS / NUM_INIT_VOLUMES, totalNumReplicas);
}
use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences in project hadoop by apache.
the class FsDatasetImplTestUtils method verifyBlockPoolMissing.
@Override
public void verifyBlockPoolMissing(String bpid) throws IOException {
FsVolumeImpl volume;
try (FsVolumeReferences references = dataset.getFsVolumeReferences()) {
volume = (FsVolumeImpl) references.get(0);
}
File bpDir = new File(volume.getCurrentDir(), bpid);
if (bpDir.exists()) {
throw new IOException(String.format("Block pool directory %s exists", bpDir));
}
}
use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences in project hadoop by apache.
the class FsDatasetImplTestUtils method createReplicaUnderRecovery.
@Override
public Replica createReplicaUnderRecovery(ExtendedBlock block, long recoveryId) throws IOException {
try (FsVolumeReferences volumes = dataset.getFsVolumeReferences()) {
FsVolumeImpl volume = (FsVolumeImpl) volumes.get(0);
ReplicaUnderRecovery rur = new ReplicaUnderRecovery(new FinalizedReplica(block.getLocalBlock(), volume, volume.getCurrentDir().getParentFile()), recoveryId);
dataset.volumeMap.add(block.getBlockPoolId(), rur);
return rur;
}
}
use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences in project hadoop by apache.
the class FsDatasetImplTestUtils method getRawCapacity.
@Override
public long getRawCapacity() throws IOException {
try (FsVolumeReferences volRefs = dataset.getFsVolumeReferences()) {
Preconditions.checkState(volRefs.size() != 0);
DF df = volRefs.get(0).getUsageStats(dataset.datanode.getConf());
if (df != null) {
return df.getCapacity();
} else {
return -1;
}
}
}
Aggregations