use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi in project hadoop by apache.
the class TestDiskError method testLocalDirs.
/**
* Check that the permissions of the local DN directories are as expected.
*/
@Test
public void testLocalDirs() throws Exception {
Configuration conf = new Configuration();
final String permStr = conf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY);
FsPermission expected = new FsPermission(permStr);
// Check permissions on directories in 'dfs.datanode.data.dir'
FileSystem localFS = FileSystem.getLocal(conf);
for (DataNode dn : cluster.getDataNodes()) {
try (FsDatasetSpi.FsVolumeReferences volumes = dn.getFSDataset().getFsVolumeReferences()) {
for (FsVolumeSpi vol : volumes) {
Path dataDir = new Path(vol.getStorageLocation().getNormalizedUri());
FsPermission actual = localFS.getFileStatus(dataDir).getPermission();
assertEquals("Permission for dir: " + dataDir + ", is " + actual + ", while expected is " + expected, expected, actual);
}
}
}
}
use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi in project hadoop by apache.
the class FsDatasetImplTestUtils method getStoredReplicas.
@Override
public Iterator<Replica> getStoredReplicas(String bpid) throws IOException {
// Reload replicas from the disk.
ReplicaMap replicaMap = new ReplicaMap(dataset.datasetLock);
try (FsVolumeReferences refs = dataset.getFsVolumeReferences()) {
for (FsVolumeSpi vol : refs) {
FsVolumeImpl volume = (FsVolumeImpl) vol;
volume.getVolumeMap(bpid, replicaMap, dataset.ramDiskReplicaTracker);
}
}
// Cast ReplicaInfo to Replica, because ReplicaInfo assumes a file-based
// FsVolumeSpi implementation.
List<Replica> ret = new ArrayList<>();
if (replicaMap.replicas(bpid) != null) {
ret.addAll(replicaMap.replicas(bpid));
}
return ret.iterator();
}
use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi in project hadoop by apache.
the class TestDatasetVolumeCheckerTimeout method makeSlowVolume.
static FsVolumeSpi makeSlowVolume() throws Exception {
final FsVolumeSpi volume = mock(FsVolumeSpi.class);
final FsVolumeReference reference = mock(FsVolumeReference.class);
final StorageLocation location = mock(StorageLocation.class);
when(reference.getVolume()).thenReturn(volume);
when(volume.obtainReference()).thenReturn(reference);
when(volume.getStorageLocation()).thenReturn(location);
when(volume.check(anyObject())).thenAnswer(new Answer<VolumeCheckResult>() {
@Override
public VolumeCheckResult answer(InvocationOnMock invocationOnMock) throws Throwable {
// Wait for the disk check to timeout and then release lock.
lock.lock();
lock.unlock();
return VolumeCheckResult.HEALTHY;
}
});
return volume;
}
use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi in project hadoop by apache.
the class DataNode method reportBadBlocks.
/**
* Report a bad block which is hosted on the local DN.
*/
public void reportBadBlocks(ExtendedBlock block) throws IOException {
FsVolumeSpi volume = getFSDataset().getVolume(block);
if (volume == null) {
LOG.warn("Cannot find FsVolumeSpi to report bad block: " + block);
return;
}
reportBadBlocks(block, volume);
}
use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi in project hadoop by apache.
the class DataNode method checkDiskError.
/**
* Check the disk error synchronously.
*/
@VisibleForTesting
public void checkDiskError() throws IOException {
Set<FsVolumeSpi> unhealthyVolumes;
try {
unhealthyVolumes = volumeChecker.checkAllVolumes(data);
lastDiskErrorCheck = Time.monotonicNow();
} catch (InterruptedException e) {
LOG.error("Interruped while running disk check", e);
throw new IOException("Interrupted while running disk check", e);
}
if (unhealthyVolumes.size() > 0) {
LOG.warn("checkDiskError got {} failed volumes - {}", unhealthyVolumes.size(), unhealthyVolumes);
handleVolumeFailures(unhealthyVolumes);
} else {
LOG.debug("checkDiskError encountered no failures");
}
}
Aggregations