use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.
the class MiniDFSCluster method isNameNodeUp.
/**
* Returns true if the NameNode is running and is out of Safe Mode
* or if waiting for safe mode is disabled.
*/
public boolean isNameNodeUp(int nnIndex) {
NameNode nameNode = getNN(nnIndex).nameNode;
if (nameNode == null) {
return false;
}
long[] sizes;
sizes = NameNodeAdapter.getStats(nameNode.getNamesystem());
boolean isUp = false;
synchronized (this) {
isUp = ((!nameNode.isInSafeMode() || !waitSafeMode) && sizes[ClientProtocol.GET_STATS_CAPACITY_IDX] != 0);
}
return isUp;
}
use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.
the class MiniDFSCluster method createNameNode.
private void createNameNode(Configuration hdfsConf, boolean format, StartupOption operation, String clusterId, String nameserviceId, String nnId) throws IOException {
// Format and clean out DataNode directories
if (format) {
DFSTestUtil.formatNameNode(hdfsConf);
}
if (operation == StartupOption.UPGRADE) {
operation.setClusterId(clusterId);
}
String[] args = createArgs(operation);
NameNode nn = NameNode.createNameNode(args, hdfsConf);
if (operation == StartupOption.RECOVER) {
return;
}
// After the NN has started, set back the bound ports into
// the conf
hdfsConf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId, nnId), nn.getNameNodeAddressHostPortString());
if (nn.getHttpAddress() != null) {
hdfsConf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_HTTP_ADDRESS_KEY, nameserviceId, nnId), NetUtils.getHostPortString(nn.getHttpAddress()));
}
if (nn.getHttpsAddress() != null) {
hdfsConf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_HTTPS_ADDRESS_KEY, nameserviceId, nnId), NetUtils.getHostPortString(nn.getHttpsAddress()));
}
if (hdfsConf.get(DFS_NAMENODE_LIFELINE_RPC_ADDRESS_KEY) != null) {
hdfsConf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_LIFELINE_RPC_ADDRESS_KEY, nameserviceId, nnId), hdfsConf.get(DFS_NAMENODE_LIFELINE_RPC_ADDRESS_KEY));
}
copyKeys(hdfsConf, conf, nameserviceId, nnId);
DFSUtil.setGenericConf(hdfsConf, nameserviceId, nnId, DFS_NAMENODE_HTTP_ADDRESS_KEY);
NameNodeInfo info = new NameNodeInfo(nn, nameserviceId, nnId, operation, hdfsConf);
namenodes.put(nameserviceId, info);
}
use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.
the class TestBlockStoragePolicy method testChooseSsdOverDisk.
@Test
public void testChooseSsdOverDisk() throws Exception {
BlockStoragePolicy policy = new BlockStoragePolicy((byte) 9, "TEST1", new StorageType[] { StorageType.SSD, StorageType.DISK, StorageType.ARCHIVE }, new StorageType[] {}, new StorageType[] {});
final String[] racks = { "/d1/r1", "/d1/r1", "/d1/r1" };
final String[] hosts = { "host1", "host2", "host3" };
final StorageType[] disks = { StorageType.DISK, StorageType.DISK, StorageType.DISK };
final DatanodeStorageInfo[] diskStorages = DFSTestUtil.createDatanodeStorageInfos(3, racks, hosts, disks);
final DatanodeDescriptor[] dataNodes = DFSTestUtil.toDatanodeDescriptor(diskStorages);
for (int i = 0; i < dataNodes.length; i++) {
BlockManagerTestUtil.updateStorage(dataNodes[i], new DatanodeStorage("ssd" + i, DatanodeStorage.State.NORMAL, StorageType.SSD));
}
FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, new File(baseDir, "name").getPath());
DFSTestUtil.formatNameNode(conf);
NameNode namenode = new NameNode(conf);
final BlockManager bm = namenode.getNamesystem().getBlockManager();
BlockPlacementPolicy replicator = bm.getBlockPlacementPolicy();
NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
for (DatanodeDescriptor datanode : dataNodes) {
cluster.add(datanode);
}
DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3, dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false, new HashSet<Node>(), 0, policy, null);
System.out.println(policy.getName() + ": " + Arrays.asList(targets));
Assert.assertEquals(2, targets.length);
Assert.assertEquals(StorageType.SSD, targets[0].getStorageType());
Assert.assertEquals(StorageType.DISK, targets[1].getStorageType());
}
use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.
the class TestFileCreation method testFileCreationWithOverwrite.
/**
* 1. Check the blocks of old file are cleaned after creating with overwrite
* 2. Restart NN, check the file
* 3. Save new checkpoint and restart NN, check the file
*/
@Test(timeout = 120000)
public void testFileCreationWithOverwrite() throws Exception {
Configuration conf = new Configuration();
conf.setInt("dfs.blocksize", blockSize);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
DistributedFileSystem dfs = cluster.getFileSystem();
try {
dfs.mkdirs(new Path("/foo/dir"));
String file = "/foo/dir/file";
Path filePath = new Path(file);
// Case 1: Create file with overwrite, check the blocks of old file
// are cleaned after creating with overwrite
NameNode nn = cluster.getNameNode();
FSNamesystem fsn = NameNodeAdapter.getNamesystem(nn);
BlockManager bm = fsn.getBlockManager();
FSDataOutputStream out = dfs.create(filePath);
byte[] oldData = AppendTestUtil.randomBytes(seed, fileSize);
try {
out.write(oldData);
} finally {
out.close();
}
LocatedBlocks oldBlocks = NameNodeAdapter.getBlockLocations(nn, file, 0, fileSize);
assertBlocks(bm, oldBlocks, true);
out = dfs.create(filePath, true);
byte[] newData = AppendTestUtil.randomBytes(seed, fileSize);
try {
out.write(newData);
} finally {
out.close();
}
dfs.deleteOnExit(filePath);
LocatedBlocks newBlocks = NameNodeAdapter.getBlockLocations(nn, file, 0, fileSize);
assertBlocks(bm, newBlocks, true);
assertBlocks(bm, oldBlocks, false);
FSDataInputStream in = dfs.open(filePath);
byte[] result = null;
try {
result = readAll(in);
} finally {
in.close();
}
Assert.assertArrayEquals(newData, result);
// Case 2: Restart NN, check the file
cluster.restartNameNode();
nn = cluster.getNameNode();
in = dfs.open(filePath);
try {
result = readAll(in);
} finally {
in.close();
}
Assert.assertArrayEquals(newData, result);
// Case 3: Save new checkpoint and restart NN, check the file
NameNodeAdapter.enterSafeMode(nn, false);
NameNodeAdapter.saveNamespace(nn);
cluster.restartNameNode();
nn = cluster.getNameNode();
in = dfs.open(filePath);
try {
result = readAll(in);
} finally {
in.close();
}
Assert.assertArrayEquals(newData, result);
} finally {
if (dfs != null) {
dfs.close();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.
the class TestHDFSServerPorts method runTestNameNodePorts.
/**
* Verify namenode port usage.
*/
public void runTestNameNodePorts(boolean withService) throws Exception {
NameNode nn = null;
try {
nn = startNameNode(withService);
// start another namenode on the same port
Configuration conf2 = new HdfsConfiguration(config);
conf2.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, fileAsURI(new File(hdfsDir, "name2")).toString());
DFSTestUtil.formatNameNode(conf2);
boolean started = canStartNameNode(conf2);
// should fail
assertFalse(started);
// start on a different main port
FileSystem.setDefaultUri(conf2, "hdfs://" + THIS_HOST);
started = canStartNameNode(conf2);
// should fail again
assertFalse(started);
// reset conf2 since NameNode modifies it
FileSystem.setDefaultUri(conf2, "hdfs://" + THIS_HOST);
// different http port
conf2.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, THIS_HOST);
started = canStartNameNode(conf2);
if (withService) {
assertFalse("Should've failed on service port", started);
// reset conf2 since NameNode modifies it
FileSystem.setDefaultUri(conf2, "hdfs://" + THIS_HOST);
conf2.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, THIS_HOST);
// Set Service address
conf2.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, THIS_HOST);
started = canStartNameNode(conf2);
}
assertTrue(started);
} finally {
stopNameNode(nn);
}
}
Aggregations