use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor in project hadoop by apache.
the class TestNetworkTopology method testContains.
@Test
public void testContains() throws Exception {
DatanodeDescriptor nodeNotInMap = DFSTestUtil.getDatanodeDescriptor("8.8.8.8", "/d2/r4");
for (int i = 0; i < dataNodes.length; i++) {
assertTrue(cluster.contains(dataNodes[i]));
}
assertFalse(cluster.contains(nodeNotInMap));
}
use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor in project hadoop by apache.
the class DFSNetworkTopology method getEligibleChildren.
/**
* Given root, excluded root and storage type. Find all the children of the
* root, that has the storage type available. One check is that if the
* excluded root is under a children, this children must subtract the storage
* count of the excluded root.
* @param root the subtree root we check.
* @param excludeRoot the root of the subtree that should be excluded.
* @param type the storage type we look for.
* @return a list of possible nodes, each of them is eligible as the next
* level root we search.
*/
private ArrayList<DFSTopologyNodeImpl> getEligibleChildren(DFSTopologyNodeImpl root, Node excludeRoot, StorageType type) {
ArrayList<DFSTopologyNodeImpl> candidates = new ArrayList<>();
int excludeCount = 0;
if (excludeRoot != null && root.isAncestor(excludeRoot)) {
// find out the number of nodes to be excluded.
if (excludeRoot instanceof DFSTopologyNodeImpl) {
// if excludedRoot is an inner node, get the counts of all nodes on
// this subtree of that storage type.
excludeCount = ((DFSTopologyNodeImpl) excludeRoot).getSubtreeStorageCount(type);
} else {
// if excludedRoot is a datanode, simply ignore this one node
if (((DatanodeDescriptor) excludeRoot).hasStorageType(type)) {
excludeCount = 1;
}
}
}
// walk through all children to check eligibility.
for (Node node : root.getChildren()) {
DFSTopologyNodeImpl dfsNode = (DFSTopologyNodeImpl) node;
int storageCount = dfsNode.getSubtreeStorageCount(type);
if (excludeRoot != null && excludeCount != 0 && (dfsNode.isAncestor(excludeRoot) || dfsNode.equals(excludeRoot))) {
storageCount -= excludeCount;
}
if (storageCount > 0) {
candidates.add(dfsNode);
}
}
return candidates;
}
use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor in project hadoop by apache.
the class CacheManager method setCachedLocations.
private void setCachedLocations(LocatedBlock block) {
CachedBlock cachedBlock = new CachedBlock(block.getBlock().getBlockId(), (short) 0, false);
cachedBlock = cachedBlocks.get(cachedBlock);
if (cachedBlock == null) {
return;
}
List<DatanodeDescriptor> cachedDNs = cachedBlock.getDatanodes(Type.CACHED);
for (DatanodeDescriptor datanode : cachedDNs) {
// Filter out cached blocks that do not have a backing replica.
//
// This should not happen since it means the CacheManager thinks
// something is cached that does not exist, but it's a safety
// measure.
boolean found = false;
for (DatanodeInfo loc : block.getLocations()) {
if (loc.equals(datanode)) {
block.addCachedLoc(loc);
found = true;
break;
}
}
if (!found) {
LOG.warn("Datanode {} is not a valid cache location for block {} " + "because that node does not have a backing replica!", datanode, block.getBlock().getBlockName());
}
}
}
use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor in project hadoop by apache.
the class CacheManager method processCacheReport.
public final void processCacheReport(final DatanodeID datanodeID, final List<Long> blockIds) throws IOException {
namesystem.writeLock();
final long startTime = Time.monotonicNow();
final long endTime;
try {
final DatanodeDescriptor datanode = blockManager.getDatanodeManager().getDatanode(datanodeID);
if (datanode == null || !datanode.isRegistered()) {
throw new IOException("processCacheReport from dead or unregistered datanode: " + datanode);
}
processCacheReportImpl(datanode, blockIds);
} finally {
endTime = Time.monotonicNow();
namesystem.writeUnlock("processCacheReport");
}
// Log the block report processing stats from Namenode perspective
final NameNodeMetrics metrics = NameNode.getNameNodeMetrics();
if (metrics != null) {
metrics.addCacheBlockReport((int) (endTime - startTime));
}
LOG.debug("Processed cache report from {}, blocks: {}, " + "processing time: {} msecs", datanodeID, blockIds.size(), (endTime - startTime));
}
use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor in project hadoop by apache.
the class FSNamesystem method getEstimatedCapacityLostTotal.
// FSNamesystemMBean
@Override
public long getEstimatedCapacityLostTotal() {
List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
getBlockManager().getDatanodeManager().fetchDatanodes(live, null, false);
long estimatedCapacityLostTotal = 0;
for (DatanodeDescriptor node : live) {
VolumeFailureSummary volumeFailureSummary = node.getVolumeFailureSummary();
if (volumeFailureSummary != null) {
estimatedCapacityLostTotal += volumeFailureSummary.getEstimatedCapacityLostTotal();
}
}
return estimatedCapacityLostTotal;
}
Aggregations