use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi in project hadoop by apache.
the class DataNodeDiskMetrics method startDiskOutlierDetectionThread.
private void startDiskOutlierDetectionThread() {
slowDiskDetectionDaemon = new Daemon(new Runnable() {
@Override
public void run() {
while (shouldRun) {
Map<String, Double> metadataOpStats = Maps.newHashMap();
Map<String, Double> readIoStats = Maps.newHashMap();
Map<String, Double> writeIoStats = Maps.newHashMap();
FsDatasetSpi.FsVolumeReferences fsVolumeReferences = null;
try {
fsVolumeReferences = dn.getFSDataset().getFsVolumeReferences();
Iterator<FsVolumeSpi> volumeIterator = fsVolumeReferences.iterator();
while (volumeIterator.hasNext()) {
FsVolumeSpi volume = volumeIterator.next();
DataNodeVolumeMetrics metrics = volumeIterator.next().getMetrics();
String volumeName = volume.getBaseURI().getPath();
metadataOpStats.put(volumeName, metrics.getMetadataOperationMean());
readIoStats.put(volumeName, metrics.getReadIoMean());
writeIoStats.put(volumeName, metrics.getWriteIoMean());
}
} finally {
if (fsVolumeReferences != null) {
try {
fsVolumeReferences.close();
} catch (IOException e) {
LOG.error("Error in releasing FS Volume references", e);
}
}
}
if (metadataOpStats.isEmpty() && readIoStats.isEmpty() && writeIoStats.isEmpty()) {
LOG.debug("No disk stats available for detecting outliers.");
return;
}
detectAndUpdateDiskOutliers(metadataOpStats, readIoStats, writeIoStats);
try {
Thread.sleep(detectionInterval);
} catch (InterruptedException e) {
LOG.error("Disk Outlier Detection thread interrupted", e);
Thread.currentThread().interrupt();
}
}
}
});
slowDiskDetectionDaemon.start();
}
use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi in project hadoop by apache.
the class TestWriteToReplica method createReplicas.
private void createReplicas(List<String> bpList, List<FsVolumeSpi> volumes, FsDatasetTestUtils testUtils) throws IOException {
// Here we create all different type of replicas and add it
// to volume map.
// Created all type of ReplicaInfo, each under Blkpool corresponding volume
// This variable is used as both blockId and genStamp
long id = 1;
for (String bpId : bpList) {
for (FsVolumeSpi volume : volumes) {
ExtendedBlock eb = new ExtendedBlock(bpId, id, 1, id);
testUtils.createFinalizedReplica(volume, eb);
id++;
eb = new ExtendedBlock(bpId, id, 1, id);
testUtils.createRBW(volume, eb);
id++;
eb = new ExtendedBlock(bpId, id, 1, id);
testUtils.createReplicaWaitingToBeRecovered(volume, eb);
id++;
eb = new ExtendedBlock(bpId, id, 1, id);
testUtils.createReplicaInPipeline(volume, eb);
id++;
}
}
}
use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi in project hadoop by apache.
the class TestWriteToReplica method testReplicaMapAfterDatanodeRestart.
/**
* This is a test to check the replica map before and after the datanode
* quick restart (less than 5 minutes)
* @throws Exception
*/
@Test
public void testReplicaMapAfterDatanodeRestart() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2)).build();
try {
cluster.waitActive();
NameNode nn1 = cluster.getNameNode(0);
NameNode nn2 = cluster.getNameNode(1);
assertNotNull("cannot create nn1", nn1);
assertNotNull("cannot create nn2", nn2);
// check number of volumes in fsdataset
DataNode dn = cluster.getDataNodes().get(0);
FsDatasetImpl dataSet = (FsDatasetImpl) DataNodeTestUtils.getFSDataset(dn);
List<FsVolumeSpi> volumes = null;
try (FsDatasetSpi.FsVolumeReferences referredVols = dataSet.getFsVolumeReferences()) {
// number of volumes should be 2 - [data1, data2]
assertEquals("number of volumes is wrong", 2, referredVols.size());
volumes = new ArrayList<>(referredVols.size());
for (FsVolumeSpi vol : referredVols) {
volumes.add(vol);
}
}
ArrayList<String> bpList = new ArrayList<>(Arrays.asList(cluster.getNamesystem(0).getBlockPoolId(), cluster.getNamesystem(1).getBlockPoolId()));
Assert.assertTrue("Cluster should have 2 block pools", bpList.size() == 2);
createReplicas(bpList, volumes, cluster.getFsDatasetTestUtils(dn));
ReplicaMap oldReplicaMap = new ReplicaMap(new AutoCloseableLock());
oldReplicaMap.addAll(dataSet.volumeMap);
cluster.restartDataNode(0);
cluster.waitActive();
dn = cluster.getDataNodes().get(0);
dataSet = (FsDatasetImpl) dn.getFSDataset();
testEqualityOfReplicaMap(oldReplicaMap, dataSet.volumeMap, bpList);
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi in project hadoop by apache.
the class DataNode method handleVolumeFailures.
private void handleVolumeFailures(Set<FsVolumeSpi> unhealthyVolumes) {
if (unhealthyVolumes.isEmpty()) {
LOG.debug("handleVolumeFailures done with empty " + "unhealthyVolumes");
return;
}
data.handleVolumeFailures(unhealthyVolumes);
Set<StorageLocation> unhealthyLocations = new HashSet<>(unhealthyVolumes.size());
StringBuilder sb = new StringBuilder("DataNode failed volumes:");
for (FsVolumeSpi vol : unhealthyVolumes) {
unhealthyLocations.add(vol.getStorageLocation());
sb.append(vol.getStorageLocation()).append(";");
}
try {
// Remove all unhealthy volumes from DataNode.
removeVolumes(unhealthyLocations, false);
} catch (IOException e) {
LOG.warn("Error occurred when removing unhealthy storage dirs: " + e.getMessage(), e);
}
if (LOG.isDebugEnabled()) {
LOG.debug(sb.toString());
}
// send blockreport regarding volume failure
handleDiskError(sb.toString());
}
use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi in project hadoop by apache.
the class DiskBalancer method getStorageIDToVolumeBasePathMap.
/**
* Returns volume UUID to volume base path map.
*
* @return Map
* @throws DiskBalancerException
*/
private Map<String, String> getStorageIDToVolumeBasePathMap() throws DiskBalancerException {
Map<String, String> storageIDToVolBasePathMap = new HashMap<>();
FsDatasetSpi.FsVolumeReferences references;
try {
try (AutoCloseableLock lock = this.dataset.acquireDatasetLock()) {
references = this.dataset.getFsVolumeReferences();
for (int ndx = 0; ndx < references.size(); ndx++) {
FsVolumeSpi vol = references.get(ndx);
storageIDToVolBasePathMap.put(vol.getStorageID(), vol.getBaseURI().getPath());
}
references.close();
}
} catch (IOException ex) {
LOG.error("Disk Balancer - Internal Error.", ex);
throw new DiskBalancerException("Internal error", ex, DiskBalancerException.Result.INTERNAL_ERROR);
}
return storageIDToVolBasePathMap;
}
Aggregations