use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.
the class TestWebHDFS method testRaceWhileNNStartup.
/**
* Make sure a RetriableException is thrown when rpcServer is null in
* NamenodeWebHdfsMethods.
*/
@Test
public void testRaceWhileNNStartup() throws Exception {
MiniDFSCluster cluster = null;
final Configuration conf = WebHdfsTestUtil.createConf();
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
final NameNode namenode = cluster.getNameNode();
final NamenodeProtocols rpcServer = namenode.getRpcServer();
Whitebox.setInternalState(namenode, "rpcServer", null);
final Path foo = new Path("/foo");
final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
try {
webHdfs.mkdirs(foo);
fail("Expected RetriableException");
} catch (RetriableException e) {
GenericTestUtils.assertExceptionContains("Namenode is in startup mode", e);
}
Whitebox.setInternalState(namenode, "rpcServer", rpcServer);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.
the class TestWebHDFSForHA method testRetryWhileNNStartup.
/**
* Make sure the WebHdfsFileSystem will retry based on RetriableException when
* rpcServer is null in NamenodeWebHdfsMethods while NameNode starts up.
*/
@Test(timeout = 120000)
public void testRetryWhileNNStartup() throws Exception {
final Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
MiniDFSCluster cluster = null;
final Map<String, Boolean> resultMap = new HashMap<String, Boolean>();
try {
cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo).numDataNodes(0).build();
HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);
cluster.waitActive();
cluster.transitionToActive(0);
final NameNode namenode = cluster.getNameNode(0);
final NamenodeProtocols rpcServer = namenode.getRpcServer();
Whitebox.setInternalState(namenode, "rpcServer", null);
new Thread() {
@Override
public void run() {
boolean result = false;
FileSystem fs = null;
try {
fs = FileSystem.get(WEBHDFS_URI, conf);
final Path dir = new Path("/test");
result = fs.mkdirs(dir);
} catch (IOException e) {
result = false;
} finally {
IOUtils.cleanup(null, fs);
}
synchronized (TestWebHDFSForHA.this) {
resultMap.put("mkdirs", result);
TestWebHDFSForHA.this.notifyAll();
}
}
}.start();
Thread.sleep(1000);
Whitebox.setInternalState(namenode, "rpcServer", rpcServer);
synchronized (this) {
while (!resultMap.containsKey("mkdirs")) {
this.wait();
}
Assert.assertTrue(resultMap.get("mkdirs"));
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.
the class TestHDFSServerPorts method testSecondaryNodePorts.
/**
* Verify secondary namenode port usage.
*/
@Test(timeout = 300000)
public void testSecondaryNodePorts() throws Exception {
NameNode nn = null;
try {
nn = startNameNode();
// bind http server to the same port as name-node
Configuration conf2 = new HdfsConfiguration(config);
conf2.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY));
LOG.info("= Starting 1 on: " + conf2.get(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY));
boolean started = canStartSecondaryNode(conf2);
// should fail
assertFalse(started);
// bind http server to a different port
conf2.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, THIS_HOST);
LOG.info("= Starting 2 on: " + conf2.get(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY));
started = canStartSecondaryNode(conf2);
// should start now
assertTrue(started);
} finally {
stopNameNode(nn);
}
}
use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.
the class TestHDFSServerPorts method testDataNodePorts.
/**
* Verify datanode port usage.
*/
@Test(timeout = 300000)
public void testDataNodePorts() throws Exception {
NameNode nn = null;
try {
nn = startNameNode();
// start data-node on the same port as name-node
Configuration conf2 = new HdfsConfiguration(config);
conf2.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, new File(hdfsDir, "data").getPath());
conf2.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, FileSystem.getDefaultUri(config).getAuthority());
conf2.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, THIS_HOST);
boolean started = canStartDataNode(conf2);
// should fail
assertFalse(started);
// bind http server to the same port as name-node
conf2.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, THIS_HOST);
conf2.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY));
started = canStartDataNode(conf2);
// should fail
assertFalse(started);
// both ports are different from the name-node ones
conf2.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, THIS_HOST);
conf2.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, THIS_HOST);
conf2.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, THIS_HOST);
started = canStartDataNode(conf2);
// should start now
assertTrue(started);
} finally {
stopNameNode(nn);
}
}
use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.
the class TestDelegationToken method testDTManagerInSafeMode.
/**
* Test that the delegation token secret manager only runs when the
* NN is out of safe mode. This is because the secret manager
* has to log to the edit log, which should not be written in
* safe mode. Regression test for HDFS-2579.
*/
@Test
public void testDTManagerInSafeMode() throws Exception {
cluster.startDataNodes(config, 1, true, StartupOption.REGULAR, null);
FileSystem fs = cluster.getFileSystem();
for (int i = 0; i < 5; i++) {
DFSTestUtil.createFile(fs, new Path("/test-" + i), 100, (short) 1, 1L);
}
cluster.getConfiguration(0).setInt(DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_KEY, 500);
cluster.getConfiguration(0).setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 30000);
cluster.setWaitSafeMode(false);
cluster.restartNameNode();
NameNode nn = cluster.getNameNode();
assertTrue(nn.isInSafeMode());
DelegationTokenSecretManager sm = NameNodeAdapter.getDtSecretManager(nn.getNamesystem());
assertFalse("Secret manager should not run in safe mode", sm.isRunning());
NameNodeAdapter.leaveSafeMode(nn);
assertTrue("Secret manager should start when safe mode is exited", sm.isRunning());
LOG.info("========= entering safemode again");
NameNodeAdapter.enterSafeMode(nn, false);
assertFalse("Secret manager should stop again when safe mode " + "is manually entered", sm.isRunning());
// Set the cluster to leave safemode quickly on its own.
cluster.getConfiguration(0).setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 0);
cluster.setWaitSafeMode(true);
cluster.restartNameNode();
nn = cluster.getNameNode();
sm = NameNodeAdapter.getDtSecretManager(nn.getNamesystem());
assertFalse(nn.isInSafeMode());
assertTrue(sm.isRunning());
}
Aggregations