Search in sources :

Example 6 with VolumeFailureSummary

use of org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary in project hadoop by apache.

the class FSNamesystem method getLiveNodes.

/**
   * Returned information is a JSON representation of map with host name as the
   * key and value is a map of live node attribute keys to its values
   */
// NameNodeMXBean
@Override
public String getLiveNodes() {
    final Map<String, Map<String, Object>> info = new HashMap<String, Map<String, Object>>();
    final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
    blockManager.getDatanodeManager().fetchDatanodes(live, null, false);
    for (DatanodeDescriptor node : live) {
        ImmutableMap.Builder<String, Object> innerinfo = ImmutableMap.<String, Object>builder();
        innerinfo.put("infoAddr", node.getInfoAddr()).put("infoSecureAddr", node.getInfoSecureAddr()).put("xferaddr", node.getXferAddr()).put("lastContact", getLastContact(node)).put("usedSpace", getDfsUsed(node)).put("adminState", node.getAdminState().toString()).put("nonDfsUsedSpace", node.getNonDfsUsed()).put("capacity", node.getCapacity()).put("numBlocks", node.numBlocks()).put("version", node.getSoftwareVersion()).put("used", node.getDfsUsed()).put("remaining", node.getRemaining()).put("blockScheduled", node.getBlocksScheduled()).put("blockPoolUsed", node.getBlockPoolUsed()).put("blockPoolUsedPercent", node.getBlockPoolUsedPercent()).put("volfails", node.getVolumeFailures()).put("lastBlockReport", getLastBlockReport(node));
        VolumeFailureSummary volumeFailureSummary = node.getVolumeFailureSummary();
        if (volumeFailureSummary != null) {
            innerinfo.put("failedStorageIDs", volumeFailureSummary.getFailedStorageLocations()).put("lastVolumeFailureDate", volumeFailureSummary.getLastVolumeFailureDate()).put("estimatedCapacityLostTotal", volumeFailureSummary.getEstimatedCapacityLostTotal());
        }
        if (node.getUpgradeDomain() != null) {
            innerinfo.put("upgradeDomain", node.getUpgradeDomain());
        }
        info.put(node.getHostName() + ":" + node.getXferPort(), innerinfo.build());
    }
    return JSON.toString(info);
}
Also used : DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) Map(java.util.Map) TreeMap(java.util.TreeMap) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap) VolumeFailureSummary(org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary) ImmutableMap(com.google.common.collect.ImmutableMap)

Example 7 with VolumeFailureSummary

use of org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary in project hadoop by apache.

the class TestDataNodeVolumeFailureReporting method checkFailuresAtNameNode.

/**
   * Checks NameNode tracking of a particular DataNode for correct reporting of
   * failed volumes.
   *
   * @param dm DatanodeManager to check
   * @param dn DataNode to check
   * @param expectCapacityKnown if true, then expect that the capacities of the
   *     volumes were known before the failures, and therefore the lost capacity
   *     can be reported
   * @param expectedFailedVolumes expected locations of failed volumes
   * @throws Exception if there is any failure
   */
private void checkFailuresAtNameNode(DatanodeManager dm, DataNode dn, boolean expectCapacityKnown, String... expectedFailedVolumes) throws Exception {
    DatanodeDescriptor dd = cluster.getNamesystem().getBlockManager().getDatanodeManager().getDatanode(dn.getDatanodeId());
    assertEquals(expectedFailedVolumes.length, dd.getVolumeFailures());
    VolumeFailureSummary volumeFailureSummary = dd.getVolumeFailureSummary();
    if (expectedFailedVolumes.length > 0) {
        assertArrayEquals(expectedFailedVolumes, convertToAbsolutePaths(volumeFailureSummary.getFailedStorageLocations()));
        assertTrue(volumeFailureSummary.getLastVolumeFailureDate() > 0);
        long expectedCapacityLost = getExpectedCapacityLost(expectCapacityKnown, expectedFailedVolumes.length);
        assertEquals(expectedCapacityLost, volumeFailureSummary.getEstimatedCapacityLostTotal());
    } else {
        assertNull(volumeFailureSummary);
    }
}
Also used : DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) VolumeFailureSummary(org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary)

Aggregations

VolumeFailureSummary (org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary)7 IOException (java.io.IOException)3 DatanodeDescriptor (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor)3 StorageReport (org.apache.hadoop.hdfs.server.protocol.StorageReport)3 ServiceException (com.google.protobuf.ServiceException)2 ArrayList (java.util.ArrayList)2 HeartbeatResponse (org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse)2 ImmutableMap (com.google.common.collect.ImmutableMap)1 File (java.io.File)1 HashMap (java.util.HashMap)1 Map (java.util.Map)1 TreeMap (java.util.TreeMap)1 Path (org.apache.hadoop.fs.Path)1 RollingUpgradeStatus (org.apache.hadoop.hdfs.protocol.RollingUpgradeStatus)1 HeartbeatResponseProto (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto)1 RollingUpgradeStatusProto (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto)1 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)1 DatanodeCommand (org.apache.hadoop.hdfs.server.protocol.DatanodeCommand)1 SlowPeerReports (org.apache.hadoop.hdfs.server.protocol.SlowPeerReports)1 Test (org.junit.Test)1