Search in sources :

Example 16 with DatanodeDescriptor

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor in project hadoop by apache.

the class TestCachedBlocksList method testSingleList.

@Test(timeout = 60000)
public void testSingleList() {
    DatanodeDescriptor dn = new DatanodeDescriptor(new DatanodeID("127.0.0.1", "localhost", "abcd", 5000, 5001, 5002, 5003));
    CachedBlock[] blocks = new CachedBlock[] { new CachedBlock(0L, (short) 1, true), new CachedBlock(1L, (short) 1, true), new CachedBlock(2L, (short) 1, true) };
    // check that lists are empty
    Assert.assertTrue("expected pending cached list to start off empty.", !dn.getPendingCached().iterator().hasNext());
    Assert.assertTrue("expected cached list to start off empty.", !dn.getCached().iterator().hasNext());
    Assert.assertTrue("expected pending uncached list to start off empty.", !dn.getPendingUncached().iterator().hasNext());
    // add a block to the back
    Assert.assertTrue(dn.getCached().add(blocks[0]));
    Assert.assertTrue("expected pending cached list to still be empty.", !dn.getPendingCached().iterator().hasNext());
    Assert.assertEquals("failed to insert blocks[0]", blocks[0], dn.getCached().iterator().next());
    Assert.assertTrue("expected pending uncached list to still be empty.", !dn.getPendingUncached().iterator().hasNext());
    // add another block to the back
    Assert.assertTrue(dn.getCached().add(blocks[1]));
    Iterator<CachedBlock> iter = dn.getCached().iterator();
    Assert.assertEquals(blocks[0], iter.next());
    Assert.assertEquals(blocks[1], iter.next());
    Assert.assertTrue(!iter.hasNext());
    // add a block to the front
    Assert.assertTrue(dn.getCached().addFirst(blocks[2]));
    iter = dn.getCached().iterator();
    Assert.assertEquals(blocks[2], iter.next());
    Assert.assertEquals(blocks[0], iter.next());
    Assert.assertEquals(blocks[1], iter.next());
    Assert.assertTrue(!iter.hasNext());
    // remove a block from the middle
    Assert.assertTrue(dn.getCached().remove(blocks[0]));
    iter = dn.getCached().iterator();
    Assert.assertEquals(blocks[2], iter.next());
    Assert.assertEquals(blocks[1], iter.next());
    Assert.assertTrue(!iter.hasNext());
    // remove all blocks
    dn.getCached().clear();
    Assert.assertTrue("expected cached list to be empty after clear.", !dn.getPendingCached().iterator().hasNext());
}
Also used : DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) CachedBlock(org.apache.hadoop.hdfs.server.namenode.CachedBlock) Test(org.junit.Test)

Example 17 with DatanodeDescriptor

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor in project hadoop by apache.

the class TestCachedBlocksList method testMultipleLists.

@Test(timeout = 60000)
public void testMultipleLists() {
    DatanodeDescriptor[] datanodes = new DatanodeDescriptor[] { new DatanodeDescriptor(new DatanodeID("127.0.0.1", "localhost", "abcd", 5000, 5001, 5002, 5003)), new DatanodeDescriptor(new DatanodeID("127.0.1.1", "localhost", "efgh", 6000, 6001, 6002, 6003)) };
    CachedBlocksList[] lists = new CachedBlocksList[] { datanodes[0].getPendingCached(), datanodes[0].getCached(), datanodes[1].getPendingCached(), datanodes[1].getCached(), datanodes[1].getPendingUncached() };
    final int NUM_BLOCKS = 8000;
    CachedBlock[] blocks = new CachedBlock[NUM_BLOCKS];
    for (int i = 0; i < NUM_BLOCKS; i++) {
        blocks[i] = new CachedBlock(i, (short) i, true);
    }
    Random r = new Random(654);
    for (CachedBlocksList list : lists) {
        testAddElementsToList(list, blocks);
    }
    for (CachedBlocksList list : lists) {
        testRemoveElementsFromList(r, list, blocks);
    }
}
Also used : DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) CachedBlocksList(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList) Random(java.util.Random) CachedBlock(org.apache.hadoop.hdfs.server.namenode.CachedBlock) Test(org.junit.Test)

Example 18 with DatanodeDescriptor

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor in project hadoop by apache.

the class TestDecommissioningStatus method testDecommissionStatusAfterDNRestart.

/**
   * Verify a DN remains in DECOMMISSION_INPROGRESS state if it is marked
   * as dead before decommission has completed. That will allow DN to resume
   * the replication process after it rejoins the cluster.
   */
@Test(timeout = 120000)
public void testDecommissionStatusAfterDNRestart() throws Exception {
    DistributedFileSystem fileSys = (DistributedFileSystem) cluster.getFileSystem();
    // Create a file with one block. That block has one replica.
    Path f = new Path("decommission.dat");
    DFSTestUtil.createFile(fileSys, f, fileSize, fileSize, fileSize, (short) 1, seed);
    // Find the DN that owns the only replica.
    RemoteIterator<LocatedFileStatus> fileList = fileSys.listLocatedStatus(f);
    BlockLocation[] blockLocations = fileList.next().getBlockLocations();
    String dnName = blockLocations[0].getNames()[0];
    // Decommission the DN.
    FSNamesystem fsn = cluster.getNamesystem();
    final DatanodeManager dm = fsn.getBlockManager().getDatanodeManager();
    decommissionNode(dnName);
    dm.refreshNodes(conf);
    // Stop the DN when decommission is in progress.
    // Given DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY is to 1 and the size of
    // the block, it will take much longer time that test timeout value for
    // the decommission to complete. So when stopDataNode is called,
    // decommission should be in progress.
    DataNodeProperties dataNodeProperties = cluster.stopDataNode(dnName);
    final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
    while (true) {
        dm.fetchDatanodes(null, dead, false);
        if (dead.size() == 1) {
            break;
        }
        Thread.sleep(1000);
    }
    // Force removal of the dead node's blocks.
    BlockManagerTestUtil.checkHeartbeat(fsn.getBlockManager());
    // Force DatanodeManager to check decommission state.
    BlockManagerTestUtil.recheckDecommissionState(dm);
    // Verify that the DN remains in DECOMMISSION_INPROGRESS state.
    assertTrue("the node should be DECOMMISSION_IN_PROGRESSS", dead.get(0).isDecommissionInProgress());
    // Check DatanodeManager#getDecommissionNodes, make sure it returns
    // the node as decommissioning, even if it's dead
    List<DatanodeDescriptor> decomlist = dm.getDecommissioningNodes();
    assertTrue("The node should be be decommissioning", decomlist.size() == 1);
    // Delete the under-replicated file, which should let the 
    // DECOMMISSION_IN_PROGRESS node become DECOMMISSIONED
    AdminStatesBaseTest.cleanupFile(fileSys, f);
    BlockManagerTestUtil.recheckDecommissionState(dm);
    assertTrue("the node should be decommissioned", dead.get(0).isDecommissioned());
    // Add the node back
    cluster.restartDataNode(dataNodeProperties, true);
    cluster.waitActive();
    // Call refreshNodes on FSNamesystem with empty exclude file.
    // This will remove the datanodes from decommissioning list and
    // make them available again.
    hostsFileWriter.initExcludeHost("");
    dm.refreshNodes(conf);
}
Also used : Path(org.apache.hadoop.fs.Path) DataNodeProperties(org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties) ArrayList(java.util.ArrayList) LocatedFileStatus(org.apache.hadoop.fs.LocatedFileStatus) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) BlockLocation(org.apache.hadoop.fs.BlockLocation) DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DatanodeManager(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager) Test(org.junit.Test) AdminStatesBaseTest(org.apache.hadoop.hdfs.AdminStatesBaseTest)

Example 19 with DatanodeDescriptor

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor in project hadoop by apache.

the class TestDecommissioningStatus method testDecommissionDeadDN.

/**
   * Verify the support for decommissioning a datanode that is already dead.
   * Under this scenario the datanode should immediately be marked as
   * DECOMMISSIONED
   */
@Test(timeout = 120000)
public void testDecommissionDeadDN() throws Exception {
    Logger log = Logger.getLogger(DecommissionManager.class);
    log.setLevel(Level.DEBUG);
    DatanodeID dnID = cluster.getDataNodes().get(0).getDatanodeId();
    String dnName = dnID.getXferAddr();
    DataNodeProperties stoppedDN = cluster.stopDataNode(0);
    DFSTestUtil.waitForDatanodeState(cluster, dnID.getDatanodeUuid(), false, 30000);
    FSNamesystem fsn = cluster.getNamesystem();
    final DatanodeManager dm = fsn.getBlockManager().getDatanodeManager();
    DatanodeDescriptor dnDescriptor = dm.getDatanode(dnID);
    decommissionNode(dnName);
    dm.refreshNodes(conf);
    BlockManagerTestUtil.recheckDecommissionState(dm);
    assertTrue(dnDescriptor.isDecommissioned());
    // Add the node back
    cluster.restartDataNode(stoppedDN, true);
    cluster.waitActive();
    // Call refreshNodes on FSNamesystem with empty exclude file to remove the
    // datanode from decommissioning list and make it available again.
    hostsFileWriter.initExcludeHost("");
    dm.refreshNodes(conf);
}
Also used : DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DatanodeManager(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) DataNodeProperties(org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties) Logger(org.apache.log4j.Logger) Test(org.junit.Test) AdminStatesBaseTest(org.apache.hadoop.hdfs.AdminStatesBaseTest)

Example 20 with DatanodeDescriptor

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor in project hadoop by apache.

the class TestCacheDirectives method checkPendingCachedEmpty.

/**
   * Check that the NameNode is not attempting to cache anything.
   */
private void checkPendingCachedEmpty(MiniDFSCluster cluster) throws Exception {
    Thread.sleep(1000);
    cluster.getNamesystem().readLock();
    try {
        final DatanodeManager datanodeManager = cluster.getNamesystem().getBlockManager().getDatanodeManager();
        for (DataNode dn : cluster.getDataNodes()) {
            DatanodeDescriptor descriptor = datanodeManager.getDatanode(dn.getDatanodeId());
            Assert.assertTrue("Pending cached list of " + descriptor + " is not empty, " + Arrays.toString(descriptor.getPendingCached().toArray()), descriptor.getPendingCached().isEmpty());
        }
    } finally {
        cluster.getNamesystem().readUnlock();
    }
}
Also used : DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DatanodeManager(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode)

Aggregations

DatanodeDescriptor (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor)75 Test (org.junit.Test)37 ArrayList (java.util.ArrayList)23 DatanodeManager (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager)21 Path (org.apache.hadoop.fs.Path)19 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)13 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)12 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)12 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)11 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)11 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)10 HashMap (java.util.HashMap)9 Configuration (org.apache.hadoop.conf.Configuration)9 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)9 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)9 Node (org.apache.hadoop.net.Node)9 DatanodeStorageInfo (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo)8 IOException (java.io.IOException)7 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)7 Map (java.util.Map)6