Search in sources :

Example 21 with NameNode

use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.

the class TestSnapshotDeletion method testHANNRestartAfterSnapshotDeletion.

/*
   * OP_DELETE_SNAPSHOT edits op was not decrementing the safemode threshold on
   * restart in HA mode. HDFS-5504
   */
@Test(timeout = 60000)
public void testHANNRestartAfterSnapshotDeletion() throws Exception {
    hdfs.close();
    cluster.shutdown();
    conf = new Configuration();
    cluster = new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(1).build();
    cluster.transitionToActive(0);
    // stop the standby namenode
    NameNode snn = cluster.getNameNode(1);
    snn.stop();
    hdfs = (DistributedFileSystem) HATestUtil.configureFailoverFs(cluster, conf);
    Path dir = new Path("/dir");
    Path subDir = new Path(dir, "sub");
    hdfs.mkdirs(dir);
    hdfs.allowSnapshot(dir);
    for (int i = 0; i < 5; i++) {
        DFSTestUtil.createFile(hdfs, new Path(subDir, "" + i), 100, (short) 1, 1024L);
    }
    // take snapshot
    hdfs.createSnapshot(dir, "s0");
    // delete the subdir
    hdfs.delete(subDir, true);
    // roll the edit log
    NameNode ann = cluster.getNameNode(0);
    ann.getRpcServer().rollEditLog();
    hdfs.deleteSnapshot(dir, "s0");
    // wait for the blocks deletion at namenode
    Thread.sleep(2000);
    NameNodeAdapter.abortEditLogs(ann);
    cluster.restartNameNode(0, false);
    cluster.transitionToActive(0);
    // wait till the cluster becomes active
    cluster.waitClusterUp();
}
Also used : Path(org.apache.hadoop.fs.Path) NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) Configuration(org.apache.hadoop.conf.Configuration) Test(org.junit.Test)

Example 22 with NameNode

use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.

the class TestWebHdfsDataLocality method testExcludeDataNodes.

@Test
public void testExcludeDataNodes() throws Exception {
    final Configuration conf = WebHdfsTestUtil.createConf();
    final String[] racks = { RACK0, RACK0, RACK1, RACK1, RACK2, RACK2 };
    final String[] hosts = { "DataNode1", "DataNode2", "DataNode3", "DataNode4", "DataNode5", "DataNode6" };
    final int nDataNodes = hosts.length;
    LOG.info("nDataNodes=" + nDataNodes + ", racks=" + Arrays.asList(racks) + ", hosts=" + Arrays.asList(hosts));
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).hosts(hosts).numDataNodes(nDataNodes).racks(racks).build();
    try {
        cluster.waitActive();
        final DistributedFileSystem dfs = cluster.getFileSystem();
        final NameNode namenode = cluster.getNameNode();
        final DatanodeManager dm = namenode.getNamesystem().getBlockManager().getDatanodeManager();
        LOG.info("dm=" + dm);
        final long blocksize = DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
        final String f = "/foo";
        //create a file with three replica.
        final Path p = new Path(f);
        final FSDataOutputStream out = dfs.create(p, (short) 3);
        out.write(1);
        out.close();
        //get replica location.
        final LocatedBlocks locatedblocks = NameNodeAdapter.getBlockLocations(namenode, f, 0, 1);
        final List<LocatedBlock> lb = locatedblocks.getLocatedBlocks();
        Assert.assertEquals(1, lb.size());
        final DatanodeInfo[] locations = lb.get(0).getLocations();
        Assert.assertEquals(3, locations.length);
        //For GETFILECHECKSUM, OPEN and APPEND,
        //the chosen datanode must be different with exclude nodes.
        StringBuffer sb = new StringBuffer();
        for (int i = 0; i < 2; i++) {
            sb.append(locations[i].getXferAddr());
            {
                // test GETFILECHECKSUM
                final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(namenode, f, GetOpParam.Op.GETFILECHECKSUM, -1L, blocksize, sb.toString(), LOCALHOST);
                for (int j = 0; j <= i; j++) {
                    Assert.assertNotEquals(locations[j].getHostName(), chosen.getHostName());
                }
            }
            {
                // test OPEN
                final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(namenode, f, GetOpParam.Op.OPEN, 0, blocksize, sb.toString(), LOCALHOST);
                for (int j = 0; j <= i; j++) {
                    Assert.assertNotEquals(locations[j].getHostName(), chosen.getHostName());
                }
            }
            {
                // test APPEND
                final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(namenode, f, PostOpParam.Op.APPEND, -1L, blocksize, sb.toString(), LOCALHOST);
                for (int j = 0; j <= i; j++) {
                    Assert.assertNotEquals(locations[j].getHostName(), chosen.getHostName());
                }
            }
            sb.append(",");
        }
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) DatanodeManager(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 23 with NameNode

use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.

the class TestDecommission method testClusterStats.

public void testClusterStats(int numNameNodes) throws IOException, InterruptedException {
    LOG.info("Starting test testClusterStats");
    int numDatanodes = 1;
    startCluster(numNameNodes, numDatanodes);
    for (int i = 0; i < numNameNodes; i++) {
        FileSystem fileSys = getCluster().getFileSystem(i);
        Path file = new Path("testClusterStats.dat");
        writeFile(fileSys, file, 1);
        FSNamesystem fsn = getCluster().getNamesystem(i);
        NameNode namenode = getCluster().getNameNode(i);
        DatanodeInfo decomInfo = takeNodeOutofService(i, null, 0, null, AdminStates.DECOMMISSION_INPROGRESS);
        DataNode decomNode = getDataNode(decomInfo);
        // Check namenode stats for multiple datanode heartbeats
        verifyStats(namenode, fsn, decomInfo, decomNode, true);
        // Stop decommissioning and verify stats
        DatanodeInfo retInfo = NameNodeAdapter.getDatanode(fsn, decomInfo);
        putNodeInService(i, retInfo);
        DataNode retNode = getDataNode(decomInfo);
        verifyStats(namenode, fsn, retInfo, retNode, false);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FileSystem(org.apache.hadoop.fs.FileSystem) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem)

Example 24 with NameNode

use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.

the class TestOpenFilesWithSnapshot method doTestMultipleSnapshots.

private void doTestMultipleSnapshots(boolean saveNamespace) throws IOException {
    Path path = new Path("/test");
    doWriteAndAbort(fs, path);
    fs.createSnapshot(path, "s2");
    fs.delete(new Path("/test/test"), true);
    fs.deleteSnapshot(path, "s2");
    cluster.triggerBlockReports();
    if (saveNamespace) {
        NameNode nameNode = cluster.getNameNode();
        NameNodeAdapter.enterSafeMode(nameNode, false);
        NameNodeAdapter.saveNamespace(nameNode);
        NameNodeAdapter.leaveSafeMode(nameNode);
    }
    cluster.restartNameNode(true);
}
Also used : Path(org.apache.hadoop.fs.Path) NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode)

Example 25 with NameNode

use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.

the class TestOpenFilesWithSnapshot method restartNameNode.

private void restartNameNode() throws Exception {
    cluster.triggerBlockReports();
    NameNode nameNode = cluster.getNameNode();
    NameNodeAdapter.enterSafeMode(nameNode, false);
    NameNodeAdapter.saveNamespace(nameNode);
    NameNodeAdapter.leaveSafeMode(nameNode);
    cluster.restartNameNode(true);
}
Also used : NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode)

Aggregations

NameNode (org.apache.hadoop.hdfs.server.namenode.NameNode)65 Test (org.junit.Test)44 Configuration (org.apache.hadoop.conf.Configuration)28 Path (org.apache.hadoop.fs.Path)22 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)17 FileSystem (org.apache.hadoop.fs.FileSystem)15 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)9 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)8 File (java.io.File)7 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)7 DatanodeProtocolClientSideTranslatorPB (org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB)7 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)6 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)6 IOException (java.io.IOException)5 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)5 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)4 BlockTokenSecretManager (org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager)4 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)4 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)4 NamenodeProtocols (org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols)4