Search in sources :

Example 46 with DatanodeDescriptor

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor in project hadoop by apache.

the class TestDFSNetworkTopology method testGetSubtreeStorageCount.

/**
   * Tests getting subtree storage counts, and see whether it is correct when
   * we update subtree.
   * @throws Exception
   */
@Test
public void testGetSubtreeStorageCount() throws Exception {
    // add and remove a node to rack /l2/d3/r1. So all the inner nodes /l2,
    // /l2/d3 and /l2/d3/r1 should be affected. /l2/d3/r3 should still be the
    // same, only checked as a reference
    Node l2 = CLUSTER.getNode("/l2");
    Node l2d3 = CLUSTER.getNode("/l2/d3");
    Node l2d3r1 = CLUSTER.getNode("/l2/d3/r1");
    Node l2d3r3 = CLUSTER.getNode("/l2/d3/r3");
    assertTrue(l2 instanceof DFSTopologyNodeImpl);
    assertTrue(l2d3 instanceof DFSTopologyNodeImpl);
    assertTrue(l2d3r1 instanceof DFSTopologyNodeImpl);
    assertTrue(l2d3r3 instanceof DFSTopologyNodeImpl);
    DFSTopologyNodeImpl innerl2 = (DFSTopologyNodeImpl) l2;
    DFSTopologyNodeImpl innerl2d3 = (DFSTopologyNodeImpl) l2d3;
    DFSTopologyNodeImpl innerl2d3r1 = (DFSTopologyNodeImpl) l2d3r1;
    DFSTopologyNodeImpl innerl2d3r3 = (DFSTopologyNodeImpl) l2d3r3;
    assertEquals(4, innerl2.getSubtreeStorageCount(StorageType.DISK));
    assertEquals(2, innerl2d3.getSubtreeStorageCount(StorageType.DISK));
    assertEquals(1, innerl2d3r1.getSubtreeStorageCount(StorageType.DISK));
    assertEquals(1, innerl2d3r3.getSubtreeStorageCount(StorageType.DISK));
    DatanodeStorageInfo storageInfo = DFSTestUtil.createDatanodeStorageInfo("StorageID", "1.2.3.4", "/l2/d3/r1", "newhost");
    DatanodeDescriptor newNode = storageInfo.getDatanodeDescriptor();
    CLUSTER.add(newNode);
    // after adding a storage to /l2/d3/r1, ancestor inner node should have
    // DISK count incremented by 1.
    assertEquals(5, innerl2.getSubtreeStorageCount(StorageType.DISK));
    assertEquals(3, innerl2d3.getSubtreeStorageCount(StorageType.DISK));
    assertEquals(2, innerl2d3r1.getSubtreeStorageCount(StorageType.DISK));
    assertEquals(1, innerl2d3r3.getSubtreeStorageCount(StorageType.DISK));
    CLUSTER.remove(newNode);
    assertEquals(4, innerl2.getSubtreeStorageCount(StorageType.DISK));
    assertEquals(2, innerl2d3.getSubtreeStorageCount(StorageType.DISK));
    assertEquals(1, innerl2d3r1.getSubtreeStorageCount(StorageType.DISK));
    assertEquals(1, innerl2d3r3.getSubtreeStorageCount(StorageType.DISK));
}
Also used : DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) Node(org.apache.hadoop.net.Node) Test(org.junit.Test)

Example 47 with DatanodeDescriptor

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor in project hadoop by apache.

the class TestDFSNetworkTopology method testChooseRandomWithStorageTypeWithExcluded.

@Test
public void testChooseRandomWithStorageTypeWithExcluded() throws Exception {
    Node n;
    DatanodeDescriptor dd;
    // below test choose random with exclude, for /l2/d3, every rack has exactly
    // one host
    // /l2/d3 has five racks r[1~5] but only r4 and r5 have ARCHIVE
    // host12 is the one under "/l2/d3/r4", host13 is the one under "/l2/d3/r5"
    n = CLUSTER.chooseRandomWithStorageType("/l2/d3/r4", null, null, StorageType.ARCHIVE);
    HashSet<Node> excluded = new HashSet<>();
    // exclude the host on r4 (since there is only one host, no randomness here)
    excluded.add(n);
    for (int i = 0; i < 10; i++) {
        n = CLUSTER.chooseRandomWithStorageType("/l2/d3", null, null, StorageType.ARCHIVE);
        assertTrue(n instanceof DatanodeDescriptor);
        dd = (DatanodeDescriptor) n;
        assertTrue(dd.getHostName().equals("host12") || dd.getHostName().equals("host13"));
    }
    // test exclude nodes
    for (int i = 0; i < 10; i++) {
        n = CLUSTER.chooseRandomWithStorageType("/l2/d3", null, excluded, StorageType.ARCHIVE);
        assertTrue(n instanceof DatanodeDescriptor);
        dd = (DatanodeDescriptor) n;
        assertTrue(dd.getHostName().equals("host13"));
    }
    // test exclude scope
    for (int i = 0; i < 10; i++) {
        n = CLUSTER.chooseRandomWithStorageType("/l2/d3", "/l2/d3/r4", null, StorageType.ARCHIVE);
        assertTrue(n instanceof DatanodeDescriptor);
        dd = (DatanodeDescriptor) n;
        assertTrue(dd.getHostName().equals("host13"));
    }
    // test exclude scope + excluded node with expected null return node
    for (int i = 0; i < 10; i++) {
        n = CLUSTER.chooseRandomWithStorageType("/l2/d3", "/l2/d3/r5", excluded, StorageType.ARCHIVE);
        assertNull(n);
    }
    // test exclude scope + excluded node with expected non-null return node
    n = CLUSTER.chooseRandomWithStorageType("/l1/d2", null, null, StorageType.DISK);
    dd = (DatanodeDescriptor) n;
    assertEquals("host6", dd.getHostName());
    // exclude the host on r4 (since there is only one host, no randomness here)
    excluded.add(n);
    Set<String> expectedSet = Sets.newHashSet("host4", "host5");
    for (int i = 0; i < 10; i++) {
        // under l1, there are four hosts with DISK:
        // /l1/d1/r1/host2, /l1/d1/r2/host4, /l1/d1/r2/host5 and /l1/d2/r3/host6
        // host6 is excludedNode, host2 is under excluded range scope /l1/d1/r1
        // so should always return r4 or r5
        n = CLUSTER.chooseRandomWithStorageType("/l1", "/l1/d1/r1", excluded, StorageType.DISK);
        dd = (DatanodeDescriptor) n;
        assertTrue(expectedSet.contains(dd.getHostName()));
    }
}
Also used : DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) Node(org.apache.hadoop.net.Node) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 48 with DatanodeDescriptor

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor in project hadoop by apache.

the class TestLocatedBlock method testAddCachedLocWhenEmpty.

@Test(timeout = 10000)
public void testAddCachedLocWhenEmpty() {
    DatanodeInfo[] ds = new DatanodeInfo[0];
    ExtendedBlock b1 = new ExtendedBlock("bpid", 1, 1, 1);
    LocatedBlock l1 = new LocatedBlock(b1, ds);
    DatanodeDescriptor dn = new DatanodeDescriptor(new DatanodeID("127.0.0.1", "localhost", "abcd", 5000, 5001, 5002, 5003));
    try {
        l1.addCachedLoc(dn);
        fail("Adding dn when block is empty should throw");
    } catch (IllegalArgumentException e) {
        LOG.info("Expected exception:", e);
    }
}
Also used : DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) Test(org.junit.Test)

Example 49 with DatanodeDescriptor

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor in project hadoop by apache.

the class TestDataNodeLifeline method testLifelineForDeadNode.

@Test
public void testLifelineForDeadNode() throws Exception {
    long initialCapacity = cluster.getNamesystem(0).getCapacityTotal();
    assertTrue(initialCapacity > 0);
    dn.setHeartbeatsDisabledForTests(true);
    cluster.setDataNodesDead();
    assertEquals("Capacity should be 0 after all DNs dead", 0, cluster.getNamesystem(0).getCapacityTotal());
    bpsa.sendLifelineForTests();
    assertEquals("Lifeline should be ignored for dead node", 0, cluster.getNamesystem(0).getCapacityTotal());
    // Wait for re-registration and heartbeat
    dn.setHeartbeatsDisabledForTests(false);
    final DatanodeDescriptor dnDesc = cluster.getNamesystem(0).getBlockManager().getDatanodeManager().getDatanodes().iterator().next();
    GenericTestUtils.waitFor(new Supplier<Boolean>() {

        @Override
        public Boolean get() {
            return dnDesc.isAlive() && dnDesc.isHeartbeatedSinceRegistration();
        }
    }, 100, 5000);
    assertEquals("Capacity should include only live capacity", initialCapacity, cluster.getNamesystem(0).getCapacityTotal());
}
Also used : DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) Mockito.anyBoolean(org.mockito.Mockito.anyBoolean) Test(org.junit.Test)

Example 50 with DatanodeDescriptor

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor in project hadoop by apache.

the class TestDataNodeErasureCodingMetrics method setDataNodeDead.

private void setDataNodeDead(DatanodeID dnID) throws IOException {
    DatanodeDescriptor dnd = NameNodeAdapter.getDatanode(cluster.getNamesystem(), dnID);
    DFSTestUtil.setDatanodeDead(dnd);
    BlockManagerTestUtil.checkHeartbeat(cluster.getNamesystem().getBlockManager());
}
Also used : DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor)

Aggregations

DatanodeDescriptor (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor)74 Test (org.junit.Test)37 ArrayList (java.util.ArrayList)23 DatanodeManager (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager)21 Path (org.apache.hadoop.fs.Path)19 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)13 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)12 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)12 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)11 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)11 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)10 HashMap (java.util.HashMap)9 Configuration (org.apache.hadoop.conf.Configuration)9 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)9 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)9 Node (org.apache.hadoop.net.Node)9 DatanodeStorageInfo (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo)8 IOException (java.io.IOException)7 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)7 Map (java.util.Map)6