Search in sources :

Example 71 with DatanodeInfo

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.

the class TestDecommission method testDecommissionWithOpenFileAndBlockRecovery.

@Test(timeout = 360000)
public void testDecommissionWithOpenFileAndBlockRecovery() throws IOException, InterruptedException {
    startCluster(1, 6);
    getCluster().waitActive();
    Path file = new Path("/testRecoveryDecommission");
    // Create a file and never close the output stream to trigger recovery
    DistributedFileSystem dfs = getCluster().getFileSystem();
    FSDataOutputStream out = dfs.create(file, true, getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), (short) 3, blockSize);
    // Write data to the file
    long writtenBytes = 0;
    while (writtenBytes < fileSize) {
        out.writeLong(writtenBytes);
        writtenBytes += 8;
    }
    out.hsync();
    DatanodeInfo[] lastBlockLocations = NameNodeAdapter.getBlockLocations(getCluster().getNameNode(), "/testRecoveryDecommission", 0, fileSize).getLastLocatedBlock().getLocations();
    // Decommission all nodes of the last block
    ArrayList<String> toDecom = new ArrayList<>();
    for (DatanodeInfo dnDecom : lastBlockLocations) {
        toDecom.add(dnDecom.getXferAddr());
    }
    initExcludeHosts(toDecom);
    refreshNodes(0);
    // Make sure hard lease expires to trigger replica recovery
    getCluster().setLeasePeriod(300L, 300L);
    Thread.sleep(2 * BLOCKREPORT_INTERVAL_MSEC);
    for (DatanodeInfo dnDecom : lastBlockLocations) {
        DatanodeInfo datanode = NameNodeAdapter.getDatanode(getCluster().getNamesystem(), dnDecom);
        waitNodeState(datanode, AdminStates.DECOMMISSIONED);
    }
    assertEquals(dfs.getFileStatus(file).getLen(), writtenBytes);
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) ArrayList(java.util.ArrayList) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 72 with DatanodeInfo

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.

the class TestDecommission method testDecommissionWithNamenodeRestart.

/**
   * Tests restart of namenode while datanode hosts are added to exclude file
   **/
@Test(timeout = 360000)
public void testDecommissionWithNamenodeRestart() throws IOException, InterruptedException {
    LOG.info("Starting test testDecommissionWithNamenodeRestart");
    int numNamenodes = 1;
    int numDatanodes = 1;
    int replicas = 1;
    getConf().setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_DEFAULT);
    getConf().setLong(DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY, 5);
    startCluster(numNamenodes, numDatanodes);
    Path file1 = new Path("testDecommissionWithNamenodeRestart.dat");
    FileSystem fileSys = getCluster().getFileSystem();
    writeFile(fileSys, file1, replicas);
    DFSClient client = getDfsClient(0);
    DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
    DatanodeID excludedDatanodeID = info[0];
    String excludedDatanodeName = info[0].getXferAddr();
    initExcludeHost(excludedDatanodeName);
    //Add a new datanode to cluster
    getCluster().startDataNodes(getConf(), 1, true, null, null, null, null);
    numDatanodes += 1;
    assertEquals("Number of datanodes should be 2 ", 2, getCluster().getDataNodes().size());
    //Restart the namenode
    getCluster().restartNameNode();
    DatanodeInfo datanodeInfo = NameNodeAdapter.getDatanode(getCluster().getNamesystem(), excludedDatanodeID);
    waitNodeState(datanodeInfo, AdminStates.DECOMMISSIONED);
    // Ensure decommissioned datanode is not automatically shutdown
    assertEquals("All datanodes must be alive", numDatanodes, client.datanodeReport(DatanodeReportType.LIVE).length);
    assertTrue("Checked if block was replicated after decommission.", checkFile(fileSys, file1, replicas, datanodeInfo.getXferAddr(), numDatanodes) == null);
    cleanupFile(fileSys, file1);
    // Restart the cluster and ensure recommissioned datanodes
    // are allowed to register with the namenode
    shutdownCluster();
    startCluster(numNamenodes, numDatanodes);
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) FileSystem(org.apache.hadoop.fs.FileSystem) Test(org.junit.Test)

Example 73 with DatanodeInfo

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.

the class TestDecommission method doDecomCheck.

private void doDecomCheck(DatanodeManager datanodeManager, DecommissionManager decomManager, int expectedNumCheckedNodes) throws IOException, ExecutionException, InterruptedException {
    // Decom all nodes
    ArrayList<DatanodeInfo> decommissionedNodes = Lists.newArrayList();
    for (DataNode d : getCluster().getDataNodes()) {
        DatanodeInfo dn = takeNodeOutofService(0, d.getDatanodeUuid(), 0, decommissionedNodes, AdminStates.DECOMMISSION_INPROGRESS);
        decommissionedNodes.add(dn);
    }
    // Run decom scan and check
    BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
    assertEquals("Unexpected # of nodes checked", expectedNumCheckedNodes, decomManager.getNumNodesChecked());
    // Recommission all nodes
    for (DatanodeInfo dn : decommissionedNodes) {
        putNodeInService(0, dn);
    }
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode)

Example 74 with DatanodeInfo

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.

the class TestDecommission method testClusterStats.

public void testClusterStats(int numNameNodes) throws IOException, InterruptedException {
    LOG.info("Starting test testClusterStats");
    int numDatanodes = 1;
    startCluster(numNameNodes, numDatanodes);
    for (int i = 0; i < numNameNodes; i++) {
        FileSystem fileSys = getCluster().getFileSystem(i);
        Path file = new Path("testClusterStats.dat");
        writeFile(fileSys, file, 1);
        FSNamesystem fsn = getCluster().getNamesystem(i);
        NameNode namenode = getCluster().getNameNode(i);
        DatanodeInfo decomInfo = takeNodeOutofService(i, null, 0, null, AdminStates.DECOMMISSION_INPROGRESS);
        DataNode decomNode = getDataNode(decomInfo);
        // Check namenode stats for multiple datanode heartbeats
        verifyStats(namenode, fsn, decomInfo, decomNode, true);
        // Stop decommissioning and verify stats
        DatanodeInfo retInfo = NameNodeAdapter.getDatanode(fsn, decomInfo);
        putNodeInService(i, retInfo);
        DataNode retNode = getDataNode(decomInfo);
        verifyStats(namenode, fsn, retInfo, retNode, false);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FileSystem(org.apache.hadoop.fs.FileSystem) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem)

Example 75 with DatanodeInfo

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.

the class TestDecommission method testUsedCapacity.

@Test
public void testUsedCapacity() throws Exception {
    int numNamenodes = 1;
    int numDatanodes = 2;
    startCluster(numNamenodes, numDatanodes);
    FSNamesystem ns = getCluster().getNamesystem(0);
    BlockManager blockManager = ns.getBlockManager();
    DatanodeStatistics datanodeStatistics = blockManager.getDatanodeManager().getDatanodeStatistics();
    long initialUsedCapacity = datanodeStatistics.getCapacityUsed();
    long initialTotalCapacity = datanodeStatistics.getCapacityTotal();
    long initialBlockPoolUsed = datanodeStatistics.getBlockPoolUsed();
    ArrayList<ArrayList<DatanodeInfo>> namenodeDecomList = new ArrayList<ArrayList<DatanodeInfo>>(numNamenodes);
    namenodeDecomList.add(0, new ArrayList<>(numDatanodes));
    ArrayList<DatanodeInfo> decommissionedNodes = namenodeDecomList.get(0);
    //decommission one node
    DatanodeInfo decomNode = takeNodeOutofService(0, null, 0, decommissionedNodes, AdminStates.DECOMMISSIONED);
    decommissionedNodes.add(decomNode);
    long newUsedCapacity = datanodeStatistics.getCapacityUsed();
    long newTotalCapacity = datanodeStatistics.getCapacityTotal();
    long newBlockPoolUsed = datanodeStatistics.getBlockPoolUsed();
    assertTrue("DfsUsedCapacity should not be the same after a node has " + "been decommissioned!", initialUsedCapacity != newUsedCapacity);
    assertTrue("TotalCapacity should not be the same after a node has " + "been decommissioned!", initialTotalCapacity != newTotalCapacity);
    assertTrue("BlockPoolUsed should not be the same after a node has " + "been decommissioned!", initialBlockPoolUsed != newBlockPoolUsed);
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) ArrayList(java.util.ArrayList) DatanodeStatistics(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStatistics) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) Test(org.junit.Test)

Aggregations

DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)214 Test (org.junit.Test)103 Path (org.apache.hadoop.fs.Path)91 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)73 IOException (java.io.IOException)47 FileSystem (org.apache.hadoop.fs.FileSystem)44 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)43 ArrayList (java.util.ArrayList)39 Configuration (org.apache.hadoop.conf.Configuration)38 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)37 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)32 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)32 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)29 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)27 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)25 InetSocketAddress (java.net.InetSocketAddress)20 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)20 StorageType (org.apache.hadoop.fs.StorageType)18 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)14 DatanodeInfoBuilder (org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder)14