use of org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary in project hadoop by apache.
the class FSNamesystem method getLiveNodes.
/**
* Returned information is a JSON representation of map with host name as the
* key and value is a map of live node attribute keys to its values
*/
// NameNodeMXBean
@Override
public String getLiveNodes() {
final Map<String, Map<String, Object>> info = new HashMap<String, Map<String, Object>>();
final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
blockManager.getDatanodeManager().fetchDatanodes(live, null, false);
for (DatanodeDescriptor node : live) {
ImmutableMap.Builder<String, Object> innerinfo = ImmutableMap.<String, Object>builder();
innerinfo.put("infoAddr", node.getInfoAddr()).put("infoSecureAddr", node.getInfoSecureAddr()).put("xferaddr", node.getXferAddr()).put("lastContact", getLastContact(node)).put("usedSpace", getDfsUsed(node)).put("adminState", node.getAdminState().toString()).put("nonDfsUsedSpace", node.getNonDfsUsed()).put("capacity", node.getCapacity()).put("numBlocks", node.numBlocks()).put("version", node.getSoftwareVersion()).put("used", node.getDfsUsed()).put("remaining", node.getRemaining()).put("blockScheduled", node.getBlocksScheduled()).put("blockPoolUsed", node.getBlockPoolUsed()).put("blockPoolUsedPercent", node.getBlockPoolUsedPercent()).put("volfails", node.getVolumeFailures()).put("lastBlockReport", getLastBlockReport(node));
VolumeFailureSummary volumeFailureSummary = node.getVolumeFailureSummary();
if (volumeFailureSummary != null) {
innerinfo.put("failedStorageIDs", volumeFailureSummary.getFailedStorageLocations()).put("lastVolumeFailureDate", volumeFailureSummary.getLastVolumeFailureDate()).put("estimatedCapacityLostTotal", volumeFailureSummary.getEstimatedCapacityLostTotal());
}
if (node.getUpgradeDomain() != null) {
innerinfo.put("upgradeDomain", node.getUpgradeDomain());
}
info.put(node.getHostName() + ":" + node.getXferPort(), innerinfo.build());
}
return JSON.toString(info);
}
use of org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary in project hadoop by apache.
the class TestDataNodeVolumeFailureReporting method checkFailuresAtNameNode.
/**
* Checks NameNode tracking of a particular DataNode for correct reporting of
* failed volumes.
*
* @param dm DatanodeManager to check
* @param dn DataNode to check
* @param expectCapacityKnown if true, then expect that the capacities of the
* volumes were known before the failures, and therefore the lost capacity
* can be reported
* @param expectedFailedVolumes expected locations of failed volumes
* @throws Exception if there is any failure
*/
private void checkFailuresAtNameNode(DatanodeManager dm, DataNode dn, boolean expectCapacityKnown, String... expectedFailedVolumes) throws Exception {
DatanodeDescriptor dd = cluster.getNamesystem().getBlockManager().getDatanodeManager().getDatanode(dn.getDatanodeId());
assertEquals(expectedFailedVolumes.length, dd.getVolumeFailures());
VolumeFailureSummary volumeFailureSummary = dd.getVolumeFailureSummary();
if (expectedFailedVolumes.length > 0) {
assertArrayEquals(expectedFailedVolumes, convertToAbsolutePaths(volumeFailureSummary.getFailedStorageLocations()));
assertTrue(volumeFailureSummary.getLastVolumeFailureDate() > 0);
long expectedCapacityLost = getExpectedCapacityLost(expectCapacityKnown, expectedFailedVolumes.length);
assertEquals(expectedCapacityLost, volumeFailureSummary.getEstimatedCapacityLostTotal());
} else {
assertNull(volumeFailureSummary);
}
}
Aggregations