Search in sources :

Example 11 with NameNode

use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.

the class TestWebHDFS method testRaceWhileNNStartup.

/**
   * Make sure a RetriableException is thrown when rpcServer is null in
   * NamenodeWebHdfsMethods.
   */
@Test
public void testRaceWhileNNStartup() throws Exception {
    MiniDFSCluster cluster = null;
    final Configuration conf = WebHdfsTestUtil.createConf();
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
        cluster.waitActive();
        final NameNode namenode = cluster.getNameNode();
        final NamenodeProtocols rpcServer = namenode.getRpcServer();
        Whitebox.setInternalState(namenode, "rpcServer", null);
        final Path foo = new Path("/foo");
        final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
        try {
            webHdfs.mkdirs(foo);
            fail("Expected RetriableException");
        } catch (RetriableException e) {
            GenericTestUtils.assertExceptionContains("Namenode is in startup mode", e);
        }
        Whitebox.setInternalState(namenode, "rpcServer", rpcServer);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) RetriableException(org.apache.hadoop.ipc.RetriableException) Test(org.junit.Test) HttpServerFunctionalTest(org.apache.hadoop.http.HttpServerFunctionalTest)

Example 12 with NameNode

use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.

the class TestWebHDFSForHA method testRetryWhileNNStartup.

/**
   * Make sure the WebHdfsFileSystem will retry based on RetriableException when
   * rpcServer is null in NamenodeWebHdfsMethods while NameNode starts up.
   */
@Test(timeout = 120000)
public void testRetryWhileNNStartup() throws Exception {
    final Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
    MiniDFSCluster cluster = null;
    final Map<String, Boolean> resultMap = new HashMap<String, Boolean>();
    try {
        cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo).numDataNodes(0).build();
        HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);
        cluster.waitActive();
        cluster.transitionToActive(0);
        final NameNode namenode = cluster.getNameNode(0);
        final NamenodeProtocols rpcServer = namenode.getRpcServer();
        Whitebox.setInternalState(namenode, "rpcServer", null);
        new Thread() {

            @Override
            public void run() {
                boolean result = false;
                FileSystem fs = null;
                try {
                    fs = FileSystem.get(WEBHDFS_URI, conf);
                    final Path dir = new Path("/test");
                    result = fs.mkdirs(dir);
                } catch (IOException e) {
                    result = false;
                } finally {
                    IOUtils.cleanup(null, fs);
                }
                synchronized (TestWebHDFSForHA.this) {
                    resultMap.put("mkdirs", result);
                    TestWebHDFSForHA.this.notifyAll();
                }
            }
        }.start();
        Thread.sleep(1000);
        Whitebox.setInternalState(namenode, "rpcServer", rpcServer);
        synchronized (this) {
            while (!resultMap.containsKey("mkdirs")) {
                this.wait();
            }
            Assert.assertTrue(resultMap.get("mkdirs"));
        }
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HashMap(java.util.HashMap) IOException(java.io.IOException) FileSystem(org.apache.hadoop.fs.FileSystem) Test(org.junit.Test)

Example 13 with NameNode

use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.

the class TestHDFSServerPorts method testSecondaryNodePorts.

/**
   * Verify secondary namenode port usage.
   */
@Test(timeout = 300000)
public void testSecondaryNodePorts() throws Exception {
    NameNode nn = null;
    try {
        nn = startNameNode();
        // bind http server to the same port as name-node
        Configuration conf2 = new HdfsConfiguration(config);
        conf2.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY));
        LOG.info("= Starting 1 on: " + conf2.get(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY));
        boolean started = canStartSecondaryNode(conf2);
        // should fail
        assertFalse(started);
        // bind http server to a different port
        conf2.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, THIS_HOST);
        LOG.info("= Starting 2 on: " + conf2.get(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY));
        started = canStartSecondaryNode(conf2);
        // should start now
        assertTrue(started);
    } finally {
        stopNameNode(nn);
    }
}
Also used : NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) Configuration(org.apache.hadoop.conf.Configuration) Test(org.junit.Test)

Example 14 with NameNode

use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.

the class TestHDFSServerPorts method testDataNodePorts.

/**
   * Verify datanode port usage.
   */
@Test(timeout = 300000)
public void testDataNodePorts() throws Exception {
    NameNode nn = null;
    try {
        nn = startNameNode();
        // start data-node on the same port as name-node
        Configuration conf2 = new HdfsConfiguration(config);
        conf2.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, new File(hdfsDir, "data").getPath());
        conf2.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, FileSystem.getDefaultUri(config).getAuthority());
        conf2.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, THIS_HOST);
        boolean started = canStartDataNode(conf2);
        // should fail
        assertFalse(started);
        // bind http server to the same port as name-node
        conf2.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, THIS_HOST);
        conf2.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY));
        started = canStartDataNode(conf2);
        // should fail
        assertFalse(started);
        // both ports are different from the name-node ones
        conf2.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, THIS_HOST);
        conf2.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, THIS_HOST);
        conf2.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, THIS_HOST);
        started = canStartDataNode(conf2);
        // should start now
        assertTrue(started);
    } finally {
        stopNameNode(nn);
    }
}
Also used : NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) Configuration(org.apache.hadoop.conf.Configuration) File(java.io.File) Test(org.junit.Test)

Example 15 with NameNode

use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.

the class TestDelegationToken method testDTManagerInSafeMode.

/**
   * Test that the delegation token secret manager only runs when the
   * NN is out of safe mode. This is because the secret manager
   * has to log to the edit log, which should not be written in
   * safe mode. Regression test for HDFS-2579.
   */
@Test
public void testDTManagerInSafeMode() throws Exception {
    cluster.startDataNodes(config, 1, true, StartupOption.REGULAR, null);
    FileSystem fs = cluster.getFileSystem();
    for (int i = 0; i < 5; i++) {
        DFSTestUtil.createFile(fs, new Path("/test-" + i), 100, (short) 1, 1L);
    }
    cluster.getConfiguration(0).setInt(DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_KEY, 500);
    cluster.getConfiguration(0).setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 30000);
    cluster.setWaitSafeMode(false);
    cluster.restartNameNode();
    NameNode nn = cluster.getNameNode();
    assertTrue(nn.isInSafeMode());
    DelegationTokenSecretManager sm = NameNodeAdapter.getDtSecretManager(nn.getNamesystem());
    assertFalse("Secret manager should not run in safe mode", sm.isRunning());
    NameNodeAdapter.leaveSafeMode(nn);
    assertTrue("Secret manager should start when safe mode is exited", sm.isRunning());
    LOG.info("========= entering safemode again");
    NameNodeAdapter.enterSafeMode(nn, false);
    assertFalse("Secret manager should stop again when safe mode " + "is manually entered", sm.isRunning());
    // Set the cluster to leave safemode quickly on its own.
    cluster.getConfiguration(0).setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 0);
    cluster.setWaitSafeMode(true);
    cluster.restartNameNode();
    nn = cluster.getNameNode();
    sm = NameNodeAdapter.getDtSecretManager(nn.getNamesystem());
    assertFalse(nn.isInSafeMode());
    assertTrue(sm.isRunning());
}
Also used : Path(org.apache.hadoop.fs.Path) NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) DelegationTokenSecretManager(org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) WebHdfsFileSystem(org.apache.hadoop.hdfs.web.WebHdfsFileSystem) Test(org.junit.Test)

Aggregations

NameNode (org.apache.hadoop.hdfs.server.namenode.NameNode)65 Test (org.junit.Test)44 Configuration (org.apache.hadoop.conf.Configuration)28 Path (org.apache.hadoop.fs.Path)22 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)17 FileSystem (org.apache.hadoop.fs.FileSystem)15 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)9 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)8 File (java.io.File)7 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)7 DatanodeProtocolClientSideTranslatorPB (org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB)7 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)6 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)6 IOException (java.io.IOException)5 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)5 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)4 BlockTokenSecretManager (org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager)4 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)4 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)4 NamenodeProtocols (org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols)4