Search in sources :

Example 61 with DatanodeDescriptor

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor in project hadoop by apache.

the class TestNetworkTopology method testContains.

@Test
public void testContains() throws Exception {
    DatanodeDescriptor nodeNotInMap = DFSTestUtil.getDatanodeDescriptor("8.8.8.8", "/d2/r4");
    for (int i = 0; i < dataNodes.length; i++) {
        assertTrue(cluster.contains(dataNodes[i]));
    }
    assertFalse(cluster.contains(nodeNotInMap));
}
Also used : DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) Test(org.junit.Test)

Example 62 with DatanodeDescriptor

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor in project hadoop by apache.

the class DFSNetworkTopology method getEligibleChildren.

/**
   * Given root, excluded root and storage type. Find all the children of the
   * root, that has the storage type available. One check is that if the
   * excluded root is under a children, this children must subtract the storage
   * count of the excluded root.
   * @param root the subtree root we check.
   * @param excludeRoot the root of the subtree that should be excluded.
   * @param type the storage type we look for.
   * @return a list of possible nodes, each of them is eligible as the next
   * level root we search.
   */
private ArrayList<DFSTopologyNodeImpl> getEligibleChildren(DFSTopologyNodeImpl root, Node excludeRoot, StorageType type) {
    ArrayList<DFSTopologyNodeImpl> candidates = new ArrayList<>();
    int excludeCount = 0;
    if (excludeRoot != null && root.isAncestor(excludeRoot)) {
        // find out the number of nodes to be excluded.
        if (excludeRoot instanceof DFSTopologyNodeImpl) {
            // if excludedRoot is an inner node, get the counts of all nodes on
            // this subtree of that storage type.
            excludeCount = ((DFSTopologyNodeImpl) excludeRoot).getSubtreeStorageCount(type);
        } else {
            // if excludedRoot is a datanode, simply ignore this one node
            if (((DatanodeDescriptor) excludeRoot).hasStorageType(type)) {
                excludeCount = 1;
            }
        }
    }
    // walk through all children to check eligibility.
    for (Node node : root.getChildren()) {
        DFSTopologyNodeImpl dfsNode = (DFSTopologyNodeImpl) node;
        int storageCount = dfsNode.getSubtreeStorageCount(type);
        if (excludeRoot != null && excludeCount != 0 && (dfsNode.isAncestor(excludeRoot) || dfsNode.equals(excludeRoot))) {
            storageCount -= excludeCount;
        }
        if (storageCount > 0) {
            candidates.add(dfsNode);
        }
    }
    return candidates;
}
Also used : DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) Node(org.apache.hadoop.net.Node) ArrayList(java.util.ArrayList)

Example 63 with DatanodeDescriptor

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor in project hadoop by apache.

the class CacheManager method setCachedLocations.

private void setCachedLocations(LocatedBlock block) {
    CachedBlock cachedBlock = new CachedBlock(block.getBlock().getBlockId(), (short) 0, false);
    cachedBlock = cachedBlocks.get(cachedBlock);
    if (cachedBlock == null) {
        return;
    }
    List<DatanodeDescriptor> cachedDNs = cachedBlock.getDatanodes(Type.CACHED);
    for (DatanodeDescriptor datanode : cachedDNs) {
        // Filter out cached blocks that do not have a backing replica.
        //
        // This should not happen since it means the CacheManager thinks
        // something is cached that does not exist, but it's a safety
        // measure.
        boolean found = false;
        for (DatanodeInfo loc : block.getLocations()) {
            if (loc.equals(datanode)) {
                block.addCachedLoc(loc);
                found = true;
                break;
            }
        }
        if (!found) {
            LOG.warn("Datanode {} is not a valid cache location for block {} " + "because that node does not have a backing replica!", datanode, block.getBlock().getBlockName());
        }
    }
}
Also used : DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo)

Example 64 with DatanodeDescriptor

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor in project hadoop by apache.

the class CacheManager method processCacheReport.

public final void processCacheReport(final DatanodeID datanodeID, final List<Long> blockIds) throws IOException {
    namesystem.writeLock();
    final long startTime = Time.monotonicNow();
    final long endTime;
    try {
        final DatanodeDescriptor datanode = blockManager.getDatanodeManager().getDatanode(datanodeID);
        if (datanode == null || !datanode.isRegistered()) {
            throw new IOException("processCacheReport from dead or unregistered datanode: " + datanode);
        }
        processCacheReportImpl(datanode, blockIds);
    } finally {
        endTime = Time.monotonicNow();
        namesystem.writeUnlock("processCacheReport");
    }
    // Log the block report processing stats from Namenode perspective
    final NameNodeMetrics metrics = NameNode.getNameNodeMetrics();
    if (metrics != null) {
        metrics.addCacheBlockReport((int) (endTime - startTime));
    }
    LOG.debug("Processed cache report from {}, blocks: {}, " + "processing time: {} msecs", datanodeID, blockIds.size(), (endTime - startTime));
}
Also used : DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) IOException(java.io.IOException) NameNodeMetrics(org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics)

Example 65 with DatanodeDescriptor

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor in project hadoop by apache.

the class FSNamesystem method getEstimatedCapacityLostTotal.

// FSNamesystemMBean
@Override
public long getEstimatedCapacityLostTotal() {
    List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
    getBlockManager().getDatanodeManager().fetchDatanodes(live, null, false);
    long estimatedCapacityLostTotal = 0;
    for (DatanodeDescriptor node : live) {
        VolumeFailureSummary volumeFailureSummary = node.getVolumeFailureSummary();
        if (volumeFailureSummary != null) {
            estimatedCapacityLostTotal += volumeFailureSummary.getEstimatedCapacityLostTotal();
        }
    }
    return estimatedCapacityLostTotal;
}
Also used : DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) ArrayList(java.util.ArrayList) VolumeFailureSummary(org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary)

Aggregations

DatanodeDescriptor (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor)74 Test (org.junit.Test)37 ArrayList (java.util.ArrayList)23 DatanodeManager (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager)21 Path (org.apache.hadoop.fs.Path)19 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)13 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)12 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)12 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)11 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)11 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)10 HashMap (java.util.HashMap)9 Configuration (org.apache.hadoop.conf.Configuration)9 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)9 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)9 Node (org.apache.hadoop.net.Node)9 DatanodeStorageInfo (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo)8 IOException (java.io.IOException)7 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)7 Map (java.util.Map)6