use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi in project hadoop by apache.
the class DataNode method reportBadBlock.
private void reportBadBlock(final BPOfferService bpos, final ExtendedBlock block, final String msg) {
FsVolumeSpi volume = getFSDataset().getVolume(block);
if (volume == null) {
LOG.warn("Cannot find FsVolumeSpi to report bad block: " + block);
return;
}
bpos.reportBadBlocks(block, volume.getStorageID(), volume.getStorageType());
LOG.warn(msg);
}
use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi in project hadoop by apache.
the class MiniDFSCluster method setDataNodeStorageCapacities.
private synchronized void setDataNodeStorageCapacities(final int curDnIdx, final DataNode curDn, long[][] storageCapacities) throws IOException {
if (storageCapacities == null || storageCapacities.length == 0) {
return;
}
try {
waitDataNodeFullyStarted(curDn);
} catch (TimeoutException | InterruptedException e) {
throw new IOException(e);
}
try (FsDatasetSpi.FsVolumeReferences volumes = curDn.getFSDataset().getFsVolumeReferences()) {
assert storageCapacities[curDnIdx].length == storagesPerDatanode;
assert volumes.size() == storagesPerDatanode;
int j = 0;
for (FsVolumeSpi fvs : volumes) {
FsVolumeImpl volume = (FsVolumeImpl) fvs;
LOG.info("setCapacityForTesting " + storageCapacities[curDnIdx][j] + " for [" + volume.getStorageType() + "]" + volume.getStorageID());
volume.setCapacityForTesting(storageCapacities[curDnIdx][j]);
j++;
}
}
DataNodeTestUtils.triggerHeartbeat(curDn);
}
use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi in project hadoop by apache.
the class LazyPersistTestCase method ensureLazyPersistBlocksAreSaved.
/**
* Make sure at least one non-transient volume has a saved copy of the replica.
* An infinite loop is used to ensure the async lazy persist tasks are completely
* done before verification. Caller of ensureLazyPersistBlocksAreSaved expects
* either a successful pass or timeout failure.
*/
protected final void ensureLazyPersistBlocksAreSaved(LocatedBlocks locatedBlocks) throws IOException, InterruptedException {
final String bpid = cluster.getNamesystem().getBlockPoolId();
final Set<Long> persistedBlockIds = new HashSet<Long>();
try (FsDatasetSpi.FsVolumeReferences volumes = cluster.getDataNodes().get(0).getFSDataset().getFsVolumeReferences()) {
while (persistedBlockIds.size() < locatedBlocks.getLocatedBlocks().size()) {
// Take 1 second sleep before each verification iteration
Thread.sleep(1000);
for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
for (FsVolumeSpi v : volumes) {
if (v.isTransientStorage()) {
continue;
}
FsVolumeImpl volume = (FsVolumeImpl) v;
File lazyPersistDir = volume.getBlockPoolSlice(bpid).getLazypersistDir();
long blockId = lb.getBlock().getBlockId();
File targetDir = DatanodeUtil.idToBlockDir(lazyPersistDir, blockId);
File blockFile = new File(targetDir, lb.getBlock().getBlockName());
if (blockFile.exists()) {
// Found a persisted copy for this block and added to the Set
persistedBlockIds.add(blockId);
}
}
}
}
}
// We should have found a persisted copy for each located block.
assertThat(persistedBlockIds.size(), is(locatedBlocks.getLocatedBlocks().size()));
}
use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi in project hadoop by apache.
the class LazyPersistTestCase method verifyDeletedBlocks.
protected final boolean verifyDeletedBlocks(LocatedBlocks locatedBlocks) throws IOException, InterruptedException {
LOG.info("Verifying replica has no saved copy after deletion.");
triggerBlockReport();
while (cluster.getFsDatasetTestUtils(0).getPendingAsyncDeletions() > 0L) {
Thread.sleep(1000);
}
final String bpid = cluster.getNamesystem().getBlockPoolId();
final FsDatasetSpi<?> dataset = cluster.getDataNodes().get(0).getFSDataset();
// transient volume or finalized dir of non-transient volume
try (FsDatasetSpi.FsVolumeReferences volumes = dataset.getFsVolumeReferences()) {
for (FsVolumeSpi vol : volumes) {
FsVolumeImpl volume = (FsVolumeImpl) vol;
File targetDir = (volume.isTransientStorage()) ? volume.getBlockPoolSlice(bpid).getFinalizedDir() : volume.getBlockPoolSlice(bpid).getLazypersistDir();
if (verifyBlockDeletedFromDir(targetDir, locatedBlocks) == false) {
return false;
}
}
}
return true;
}
use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi in project hadoop by apache.
the class TestFsDatasetImpl method testRemoveVolumes.
@Test(timeout = 30000)
public void testRemoveVolumes() throws IOException {
// Feed FsDataset with block metadata.
final int NUM_BLOCKS = 100;
for (int i = 0; i < NUM_BLOCKS; i++) {
String bpid = BLOCK_POOL_IDS[NUM_BLOCKS % BLOCK_POOL_IDS.length];
ExtendedBlock eb = new ExtendedBlock(bpid, i);
try (ReplicaHandler replica = dataset.createRbw(StorageType.DEFAULT, eb, false)) {
}
}
final String[] dataDirs = conf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY).split(",");
final String volumePathToRemove = dataDirs[0];
Set<StorageLocation> volumesToRemove = new HashSet<>();
volumesToRemove.add(StorageLocation.parse(volumePathToRemove));
FsVolumeReferences volReferences = dataset.getFsVolumeReferences();
FsVolumeImpl volumeToRemove = null;
for (FsVolumeSpi vol : volReferences) {
if (vol.getStorageLocation().equals(volumesToRemove.iterator().next())) {
volumeToRemove = (FsVolumeImpl) vol;
}
}
assertTrue(volumeToRemove != null);
volReferences.close();
dataset.removeVolumes(volumesToRemove, true);
int expectedNumVolumes = dataDirs.length - 1;
assertEquals("The volume has been removed from the volumeList.", expectedNumVolumes, getNumVolumes());
assertEquals("The volume has been removed from the storageMap.", expectedNumVolumes, dataset.storageMap.size());
try {
dataset.asyncDiskService.execute(volumeToRemove, new Runnable() {
@Override
public void run() {
}
});
fail("Expect RuntimeException: the volume has been removed from the " + "AsyncDiskService.");
} catch (RuntimeException e) {
GenericTestUtils.assertExceptionContains("Cannot find volume", e);
}
int totalNumReplicas = 0;
for (String bpid : dataset.volumeMap.getBlockPoolList()) {
totalNumReplicas += dataset.volumeMap.size(bpid);
}
assertEquals("The replica infos on this volume has been removed from the " + "volumeMap.", NUM_BLOCKS / NUM_INIT_VOLUMES, totalNumReplicas);
}
Aggregations