Search in sources :

Example 66 with DatanodeDescriptor

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor in project hadoop by apache.

the class FSNamesystem method getLiveNodes.

/**
   * Returned information is a JSON representation of map with host name as the
   * key and value is a map of live node attribute keys to its values
   */
// NameNodeMXBean
@Override
public String getLiveNodes() {
    final Map<String, Map<String, Object>> info = new HashMap<String, Map<String, Object>>();
    final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
    blockManager.getDatanodeManager().fetchDatanodes(live, null, false);
    for (DatanodeDescriptor node : live) {
        ImmutableMap.Builder<String, Object> innerinfo = ImmutableMap.<String, Object>builder();
        innerinfo.put("infoAddr", node.getInfoAddr()).put("infoSecureAddr", node.getInfoSecureAddr()).put("xferaddr", node.getXferAddr()).put("lastContact", getLastContact(node)).put("usedSpace", getDfsUsed(node)).put("adminState", node.getAdminState().toString()).put("nonDfsUsedSpace", node.getNonDfsUsed()).put("capacity", node.getCapacity()).put("numBlocks", node.numBlocks()).put("version", node.getSoftwareVersion()).put("used", node.getDfsUsed()).put("remaining", node.getRemaining()).put("blockScheduled", node.getBlocksScheduled()).put("blockPoolUsed", node.getBlockPoolUsed()).put("blockPoolUsedPercent", node.getBlockPoolUsedPercent()).put("volfails", node.getVolumeFailures()).put("lastBlockReport", getLastBlockReport(node));
        VolumeFailureSummary volumeFailureSummary = node.getVolumeFailureSummary();
        if (volumeFailureSummary != null) {
            innerinfo.put("failedStorageIDs", volumeFailureSummary.getFailedStorageLocations()).put("lastVolumeFailureDate", volumeFailureSummary.getLastVolumeFailureDate()).put("estimatedCapacityLostTotal", volumeFailureSummary.getEstimatedCapacityLostTotal());
        }
        if (node.getUpgradeDomain() != null) {
            innerinfo.put("upgradeDomain", node.getUpgradeDomain());
        }
        info.put(node.getHostName() + ":" + node.getXferPort(), innerinfo.build());
    }
    return JSON.toString(info);
}
Also used : DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) Map(java.util.Map) TreeMap(java.util.TreeMap) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap) VolumeFailureSummary(org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary) ImmutableMap(com.google.common.collect.ImmutableMap)

Example 67 with DatanodeDescriptor

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor in project hadoop by apache.

the class FSNamesystem method getDatanodeStorageReport.

DatanodeStorageReport[] getDatanodeStorageReport(final DatanodeReportType type) throws AccessControlException, StandbyException {
    checkSuperuserPrivilege();
    checkOperation(OperationCategory.UNCHECKED);
    readLock();
    try {
        checkOperation(OperationCategory.UNCHECKED);
        final DatanodeManager dm = getBlockManager().getDatanodeManager();
        final List<DatanodeDescriptor> datanodes = dm.getDatanodeListForReport(type);
        DatanodeStorageReport[] reports = new DatanodeStorageReport[datanodes.size()];
        for (int i = 0; i < reports.length; i++) {
            final DatanodeDescriptor d = datanodes.get(i);
            reports[i] = new DatanodeStorageReport(new DatanodeInfoBuilder().setFrom(d).build(), d.getStorageReports());
        }
        return reports;
    } finally {
        readUnlock("getDatanodeStorageReport");
    }
}
Also used : DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DatanodeManager(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder) DatanodeStorageReport(org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport)

Example 68 with DatanodeDescriptor

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor in project hadoop by apache.

the class DFSTestUtil method createDatanodeStorageInfo.

public static DatanodeStorageInfo createDatanodeStorageInfo(String storageID, String ip, String rack, String hostname, StorageType type, String upgradeDomain) {
    final DatanodeStorage storage = new DatanodeStorage(storageID, DatanodeStorage.State.NORMAL, type);
    final DatanodeDescriptor dn = BlockManagerTestUtil.getDatanodeDescriptor(ip, rack, storage, hostname);
    if (upgradeDomain != null) {
        dn.setUpgradeDomain(upgradeDomain);
    }
    return BlockManagerTestUtil.newDatanodeStorageInfo(dn, storage);
}
Also used : DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)

Example 69 with DatanodeDescriptor

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor in project hadoop by apache.

the class DFSTestUtil method getLiveDatanodeCapacity.

/*
   * Return the total capacity of all live DNs.
   */
public static long getLiveDatanodeCapacity(DatanodeManager dm) {
    final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
    dm.fetchDatanodes(live, null, false);
    long capacity = 0;
    for (final DatanodeDescriptor dn : live) {
        capacity += dn.getCapacity();
    }
    return capacity;
}
Also used : DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) ArrayList(java.util.ArrayList)

Example 70 with DatanodeDescriptor

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor in project hadoop by apache.

the class DFSTestUtil method waitForDatanodeStatus.

/*
   * Wait for the given # live/dead DNs, total capacity, and # vol failures. 
   */
public static void waitForDatanodeStatus(DatanodeManager dm, int expectedLive, int expectedDead, long expectedVolFails, long expectedTotalCapacity, long timeout) throws InterruptedException, TimeoutException {
    final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
    final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
    final int ATTEMPTS = 10;
    int count = 0;
    long currTotalCapacity = 0;
    int volFails = 0;
    do {
        Thread.sleep(timeout);
        live.clear();
        dead.clear();
        dm.fetchDatanodes(live, dead, false);
        currTotalCapacity = 0;
        volFails = 0;
        for (final DatanodeDescriptor dd : live) {
            currTotalCapacity += dd.getCapacity();
            volFails += dd.getVolumeFailures();
        }
        count++;
    } while ((expectedLive != live.size() || expectedDead != dead.size() || expectedTotalCapacity != currTotalCapacity || expectedVolFails != volFails) && count < ATTEMPTS);
    if (count == ATTEMPTS) {
        throw new TimeoutException("Timed out waiting for capacity." + " Live = " + live.size() + " Expected = " + expectedLive + " Dead = " + dead.size() + " Expected = " + expectedDead + " Total capacity = " + currTotalCapacity + " Expected = " + expectedTotalCapacity + " Vol Fails = " + volFails + " Expected = " + expectedVolFails);
    }
}
Also used : DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) ArrayList(java.util.ArrayList) TimeoutException(java.util.concurrent.TimeoutException)

Aggregations

DatanodeDescriptor (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor)74 Test (org.junit.Test)37 ArrayList (java.util.ArrayList)23 DatanodeManager (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager)21 Path (org.apache.hadoop.fs.Path)19 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)13 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)12 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)12 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)11 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)11 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)10 HashMap (java.util.HashMap)9 Configuration (org.apache.hadoop.conf.Configuration)9 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)9 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)9 Node (org.apache.hadoop.net.Node)9 DatanodeStorageInfo (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo)8 IOException (java.io.IOException)7 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)7 Map (java.util.Map)6