use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor in project hadoop by apache.
the class NamenodeFsck method getReplicaInfo.
/**
* Display info of each replica for replication block.
* For striped block group, display info of each internal block.
*/
private String getReplicaInfo(BlockInfo storedBlock) {
if (!(showLocations || showRacks || showReplicaDetails || showUpgradeDomains)) {
return "";
}
final boolean isComplete = storedBlock.isComplete();
Iterator<DatanodeStorageInfo> storagesItr;
StringBuilder sb = new StringBuilder(" [");
final boolean isStriped = storedBlock.isStriped();
Map<DatanodeStorageInfo, Long> storage2Id = new HashMap<>();
if (isComplete) {
if (isStriped) {
long blockId = storedBlock.getBlockId();
Iterable<StorageAndBlockIndex> sis = ((BlockInfoStriped) storedBlock).getStorageAndIndexInfos();
for (StorageAndBlockIndex si : sis) {
storage2Id.put(si.getStorage(), blockId + si.getBlockIndex());
}
}
storagesItr = storedBlock.getStorageInfos();
} else {
storagesItr = storedBlock.getUnderConstructionFeature().getExpectedStorageLocationsIterator();
}
while (storagesItr.hasNext()) {
DatanodeStorageInfo storage = storagesItr.next();
if (isStriped && isComplete) {
long index = storage2Id.get(storage);
sb.append("blk_" + index + ":");
}
DatanodeDescriptor dnDesc = storage.getDatanodeDescriptor();
if (showRacks) {
sb.append(NodeBase.getPath(dnDesc));
} else {
sb.append(new DatanodeInfoWithStorage(dnDesc, storage.getStorageID(), storage.getStorageType()));
}
if (showUpgradeDomains) {
String upgradeDomain = (dnDesc.getUpgradeDomain() != null) ? dnDesc.getUpgradeDomain() : UNDEFINED;
sb.append("(ud=" + upgradeDomain + ")");
}
if (showReplicaDetails) {
Collection<DatanodeDescriptor> corruptReplicas = blockManager.getCorruptReplicas(storedBlock);
sb.append("(");
if (dnDesc.isDecommissioned()) {
sb.append("DECOMMISSIONED)");
} else if (dnDesc.isDecommissionInProgress()) {
sb.append("DECOMMISSIONING)");
} else if (this.showMaintenanceState && dnDesc.isEnteringMaintenance()) {
sb.append("ENTERING MAINTENANCE)");
} else if (this.showMaintenanceState && dnDesc.isInMaintenance()) {
sb.append("IN MAINTENANCE)");
} else if (corruptReplicas != null && corruptReplicas.contains(dnDesc)) {
sb.append("CORRUPT)");
} else if (blockManager.isExcess(dnDesc, storedBlock)) {
sb.append("EXCESS)");
} else if (dnDesc.isStale(this.staleInterval)) {
sb.append("STALE_NODE)");
} else if (storage.areBlockContentsStale()) {
sb.append("STALE_BLOCK_CONTENT)");
} else {
sb.append("LIVE)");
}
}
if (storagesItr.hasNext()) {
sb.append(", ");
}
}
sb.append(']');
return sb.toString();
}
use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor in project hadoop by apache.
the class NamenodeFsck method blockIdCK.
/**
* Check block information given a blockId number
*
*/
public void blockIdCK(String blockId) {
if (blockId == null) {
out.println("Please provide valid blockId!");
return;
}
try {
//get blockInfo
Block block = new Block(Block.getBlockId(blockId));
//find which file this block belongs to
BlockInfo blockInfo = blockManager.getStoredBlock(block);
if (blockInfo == null) {
out.println("Block " + blockId + " " + NONEXISTENT_STATUS);
LOG.warn("Block " + blockId + " " + NONEXISTENT_STATUS);
return;
}
final INodeFile iNode = namenode.getNamesystem().getBlockCollection(blockInfo);
NumberReplicas numberReplicas = blockManager.countNodes(blockInfo);
out.println("Block Id: " + blockId);
out.println("Block belongs to: " + iNode.getFullPathName());
out.println("No. of Expected Replica: " + blockManager.getExpectedRedundancyNum(blockInfo));
out.println("No. of live Replica: " + numberReplicas.liveReplicas());
out.println("No. of excess Replica: " + numberReplicas.excessReplicas());
out.println("No. of stale Replica: " + numberReplicas.replicasOnStaleNodes());
out.println("No. of decommissioned Replica: " + numberReplicas.decommissioned());
out.println("No. of decommissioning Replica: " + numberReplicas.decommissioning());
if (this.showMaintenanceState) {
out.println("No. of entering maintenance Replica: " + numberReplicas.liveEnteringMaintenanceReplicas());
out.println("No. of in maintenance Replica: " + numberReplicas.maintenanceNotForReadReplicas());
}
out.println("No. of corrupted Replica: " + numberReplicas.corruptReplicas());
//record datanodes that have corrupted block replica
Collection<DatanodeDescriptor> corruptionRecord = null;
if (blockManager.getCorruptReplicas(block) != null) {
corruptionRecord = blockManager.getCorruptReplicas(block);
}
//report block replicas status on datanodes
for (int idx = (blockInfo.numNodes() - 1); idx >= 0; idx--) {
DatanodeDescriptor dn = blockInfo.getDatanode(idx);
out.print("Block replica on datanode/rack: " + dn.getHostName() + dn.getNetworkLocation() + " ");
if (corruptionRecord != null && corruptionRecord.contains(dn)) {
out.print(CORRUPT_STATUS + "\t ReasonCode: " + blockManager.getCorruptReason(block, dn));
} else if (dn.isDecommissioned()) {
out.print(DECOMMISSIONED_STATUS);
} else if (dn.isDecommissionInProgress()) {
out.print(DECOMMISSIONING_STATUS);
} else if (this.showMaintenanceState && dn.isEnteringMaintenance()) {
out.print(ENTERING_MAINTENANCE_STATUS);
} else if (this.showMaintenanceState && dn.isInMaintenance()) {
out.print(IN_MAINTENANCE_STATUS);
} else {
out.print(HEALTHY_STATUS);
}
out.print("\n");
}
} catch (Exception e) {
String errMsg = "Fsck on blockId '" + blockId;
LOG.warn(errMsg, e);
out.println(e.getMessage());
out.print("\n\n" + errMsg);
LOG.warn("Error in looking up block", e);
}
}
use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor in project hadoop by apache.
the class TestDecommission method testRecommission.
/**
* Test that over-replicated blocks are deleted on recommission.
*/
@Test(timeout = 120000)
public void testRecommission() throws Exception {
final int numDatanodes = 6;
try {
LOG.info("Starting test testRecommission");
startCluster(1, numDatanodes);
final Path file1 = new Path("testDecommission.dat");
final int replicas = numDatanodes - 1;
ArrayList<DatanodeInfo> decommissionedNodes = Lists.newArrayList();
final FileSystem fileSys = getCluster().getFileSystem();
// Write a file to n-1 datanodes
writeFile(fileSys, file1, replicas);
// Decommission one of the datanodes with a replica
BlockLocation loc = fileSys.getFileBlockLocations(file1, 0, 1)[0];
assertEquals("Unexpected number of replicas from getFileBlockLocations", replicas, loc.getHosts().length);
final String toDecomHost = loc.getNames()[0];
String toDecomUuid = null;
for (DataNode d : getCluster().getDataNodes()) {
if (d.getDatanodeId().getXferAddr().equals(toDecomHost)) {
toDecomUuid = d.getDatanodeId().getDatanodeUuid();
break;
}
}
assertNotNull("Could not find a dn with the block!", toDecomUuid);
final DatanodeInfo decomNode = takeNodeOutofService(0, toDecomUuid, 0, decommissionedNodes, AdminStates.DECOMMISSIONED);
decommissionedNodes.add(decomNode);
final BlockManager blockManager = getCluster().getNamesystem().getBlockManager();
final DatanodeManager datanodeManager = blockManager.getDatanodeManager();
BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
// Ensure decommissioned datanode is not automatically shutdown
DFSClient client = getDfsClient(0);
assertEquals("All datanodes must be alive", numDatanodes, client.datanodeReport(DatanodeReportType.LIVE).length);
// wait for the block to be replicated
final ExtendedBlock b = DFSTestUtil.getFirstBlock(fileSys, file1);
final String uuid = toDecomUuid;
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
BlockInfo info = blockManager.getStoredBlock(b.getLocalBlock());
int count = 0;
StringBuilder sb = new StringBuilder("Replica locations: ");
for (int i = 0; i < info.numNodes(); i++) {
DatanodeDescriptor dn = info.getDatanode(i);
sb.append(dn + ", ");
if (!dn.getDatanodeUuid().equals(uuid)) {
count++;
}
}
LOG.info(sb.toString());
LOG.info("Count: " + count);
return count == replicas;
}
}, 500, 30000);
// redecommission and wait for over-replication to be fixed
putNodeInService(0, decomNode);
BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
DFSTestUtil.waitForReplication(getCluster(), b, 1, replicas, 0);
cleanupFile(fileSys, file1);
} finally {
shutdownCluster();
}
}
use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor in project hadoop by apache.
the class FSNamesystem method getNodeUsage.
// NameNodeMXBean
@Override
public String getNodeUsage() {
float median = 0;
float max = 0;
float min = 0;
float dev = 0;
final Map<String, Map<String, Object>> info = new HashMap<String, Map<String, Object>>();
final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
blockManager.getDatanodeManager().fetchDatanodes(live, null, true);
for (Iterator<DatanodeDescriptor> it = live.iterator(); it.hasNext(); ) {
DatanodeDescriptor node = it.next();
if (!node.isInService()) {
it.remove();
}
}
if (live.size() > 0) {
float totalDfsUsed = 0;
float[] usages = new float[live.size()];
int i = 0;
for (DatanodeDescriptor dn : live) {
usages[i++] = dn.getDfsUsedPercent();
totalDfsUsed += dn.getDfsUsedPercent();
}
totalDfsUsed /= live.size();
Arrays.sort(usages);
median = usages[usages.length / 2];
max = usages[usages.length - 1];
min = usages[0];
for (i = 0; i < usages.length; i++) {
dev += (usages[i] - totalDfsUsed) * (usages[i] - totalDfsUsed);
}
dev = (float) Math.sqrt(dev / usages.length);
}
final Map<String, Object> innerInfo = new HashMap<String, Object>();
innerInfo.put("min", StringUtils.format("%.2f%%", min));
innerInfo.put("median", StringUtils.format("%.2f%%", median));
innerInfo.put("max", StringUtils.format("%.2f%%", max));
innerInfo.put("stdDev", StringUtils.format("%.2f%%", dev));
info.put("nodeUsage", innerInfo);
return JSON.toString(info);
}
use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor in project hadoop by apache.
the class FSNamesystem method datanodeReport.
DatanodeInfo[] datanodeReport(final DatanodeReportType type) throws AccessControlException, StandbyException {
checkSuperuserPrivilege();
checkOperation(OperationCategory.UNCHECKED);
readLock();
try {
checkOperation(OperationCategory.UNCHECKED);
final DatanodeManager dm = getBlockManager().getDatanodeManager();
final List<DatanodeDescriptor> results = dm.getDatanodeListForReport(type);
DatanodeInfo[] arr = new DatanodeInfo[results.size()];
for (int i = 0; i < arr.length; i++) {
arr[i] = new DatanodeInfoBuilder().setFrom(results.get(i)).build();
}
return arr;
} finally {
readUnlock("datanodeReport");
}
}
Aggregations