use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor in project hadoop by apache.
the class FSNamesystem method getLiveNodes.
/**
* Returned information is a JSON representation of map with host name as the
* key and value is a map of live node attribute keys to its values
*/
// NameNodeMXBean
@Override
public String getLiveNodes() {
final Map<String, Map<String, Object>> info = new HashMap<String, Map<String, Object>>();
final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
blockManager.getDatanodeManager().fetchDatanodes(live, null, false);
for (DatanodeDescriptor node : live) {
ImmutableMap.Builder<String, Object> innerinfo = ImmutableMap.<String, Object>builder();
innerinfo.put("infoAddr", node.getInfoAddr()).put("infoSecureAddr", node.getInfoSecureAddr()).put("xferaddr", node.getXferAddr()).put("lastContact", getLastContact(node)).put("usedSpace", getDfsUsed(node)).put("adminState", node.getAdminState().toString()).put("nonDfsUsedSpace", node.getNonDfsUsed()).put("capacity", node.getCapacity()).put("numBlocks", node.numBlocks()).put("version", node.getSoftwareVersion()).put("used", node.getDfsUsed()).put("remaining", node.getRemaining()).put("blockScheduled", node.getBlocksScheduled()).put("blockPoolUsed", node.getBlockPoolUsed()).put("blockPoolUsedPercent", node.getBlockPoolUsedPercent()).put("volfails", node.getVolumeFailures()).put("lastBlockReport", getLastBlockReport(node));
VolumeFailureSummary volumeFailureSummary = node.getVolumeFailureSummary();
if (volumeFailureSummary != null) {
innerinfo.put("failedStorageIDs", volumeFailureSummary.getFailedStorageLocations()).put("lastVolumeFailureDate", volumeFailureSummary.getLastVolumeFailureDate()).put("estimatedCapacityLostTotal", volumeFailureSummary.getEstimatedCapacityLostTotal());
}
if (node.getUpgradeDomain() != null) {
innerinfo.put("upgradeDomain", node.getUpgradeDomain());
}
info.put(node.getHostName() + ":" + node.getXferPort(), innerinfo.build());
}
return JSON.toString(info);
}
use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor in project hadoop by apache.
the class FSNamesystem method getDatanodeStorageReport.
DatanodeStorageReport[] getDatanodeStorageReport(final DatanodeReportType type) throws AccessControlException, StandbyException {
checkSuperuserPrivilege();
checkOperation(OperationCategory.UNCHECKED);
readLock();
try {
checkOperation(OperationCategory.UNCHECKED);
final DatanodeManager dm = getBlockManager().getDatanodeManager();
final List<DatanodeDescriptor> datanodes = dm.getDatanodeListForReport(type);
DatanodeStorageReport[] reports = new DatanodeStorageReport[datanodes.size()];
for (int i = 0; i < reports.length; i++) {
final DatanodeDescriptor d = datanodes.get(i);
reports[i] = new DatanodeStorageReport(new DatanodeInfoBuilder().setFrom(d).build(), d.getStorageReports());
}
return reports;
} finally {
readUnlock("getDatanodeStorageReport");
}
}
use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor in project hadoop by apache.
the class DFSTestUtil method createDatanodeStorageInfo.
public static DatanodeStorageInfo createDatanodeStorageInfo(String storageID, String ip, String rack, String hostname, StorageType type, String upgradeDomain) {
final DatanodeStorage storage = new DatanodeStorage(storageID, DatanodeStorage.State.NORMAL, type);
final DatanodeDescriptor dn = BlockManagerTestUtil.getDatanodeDescriptor(ip, rack, storage, hostname);
if (upgradeDomain != null) {
dn.setUpgradeDomain(upgradeDomain);
}
return BlockManagerTestUtil.newDatanodeStorageInfo(dn, storage);
}
use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor in project hadoop by apache.
the class DFSTestUtil method getLiveDatanodeCapacity.
/*
* Return the total capacity of all live DNs.
*/
public static long getLiveDatanodeCapacity(DatanodeManager dm) {
final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
dm.fetchDatanodes(live, null, false);
long capacity = 0;
for (final DatanodeDescriptor dn : live) {
capacity += dn.getCapacity();
}
return capacity;
}
use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor in project hadoop by apache.
the class DFSTestUtil method waitForDatanodeStatus.
/*
* Wait for the given # live/dead DNs, total capacity, and # vol failures.
*/
public static void waitForDatanodeStatus(DatanodeManager dm, int expectedLive, int expectedDead, long expectedVolFails, long expectedTotalCapacity, long timeout) throws InterruptedException, TimeoutException {
final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
final int ATTEMPTS = 10;
int count = 0;
long currTotalCapacity = 0;
int volFails = 0;
do {
Thread.sleep(timeout);
live.clear();
dead.clear();
dm.fetchDatanodes(live, dead, false);
currTotalCapacity = 0;
volFails = 0;
for (final DatanodeDescriptor dd : live) {
currTotalCapacity += dd.getCapacity();
volFails += dd.getVolumeFailures();
}
count++;
} while ((expectedLive != live.size() || expectedDead != dead.size() || expectedTotalCapacity != currTotalCapacity || expectedVolFails != volFails) && count < ATTEMPTS);
if (count == ATTEMPTS) {
throw new TimeoutException("Timed out waiting for capacity." + " Live = " + live.size() + " Expected = " + expectedLive + " Dead = " + dead.size() + " Expected = " + expectedDead + " Total capacity = " + currTotalCapacity + " Expected = " + expectedTotalCapacity + " Vol Fails = " + volFails + " Expected = " + expectedVolFails);
}
}
Aggregations