use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.
the class TestHostsFiles method testHostsExcludeInUI.
@Test
public void testHostsExcludeInUI() throws Exception {
Configuration conf = getConf();
short REPLICATION_FACTOR = 2;
final Path filePath = new Path("/testFile");
HostsFileWriter hostsFileWriter = new HostsFileWriter();
hostsFileWriter.initialize(conf, "temp/decommission");
// Two blocks and four racks
String[] racks = { "/rack1", "/rack1", "/rack2", "/rack2" };
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(racks.length).racks(racks).build();
final FSNamesystem ns = cluster.getNameNode().getNamesystem();
try {
// Create a file with one block
final FileSystem fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
// Decommission one of the hosts with the block, this should cause
// the block to get replicated to another host on the same rack,
// otherwise the rack policy is violated.
BlockLocation[] locs = fs.getFileBlockLocations(fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
String name = locs[0].getNames()[0];
LOG.info("adding '" + name + "' to decommission");
hostsFileWriter.initExcludeHost(name);
ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
DFSTestUtil.waitForDecommission(fs, name);
// Check the block still has sufficient # replicas across racks
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName = new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo");
String nodes = (String) mbs.getAttribute(mxbeanName, "LiveNodes");
assertTrue("Live nodes should contain the decommissioned node", nodes.contains("Decommissioned"));
} finally {
if (cluster != null) {
cluster.shutdown();
}
hostsFileWriter.cleanup();
}
}
use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.
the class TestINodeFile method testLocationLimitInListingOps.
@Test
public void testLocationLimitInListingOps() throws Exception {
final Configuration conf = new Configuration();
// 3 blocks * 3 replicas
conf.setInt(DFSConfigKeys.DFS_LIST_LIMIT, 9);
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
final DistributedFileSystem hdfs = cluster.getFileSystem();
ArrayList<String> source = new ArrayList<String>();
// tmp1 holds files with 3 blocks, 3 replicas
// tmp2 holds files with 3 blocks, 1 replica
hdfs.mkdirs(new Path("/tmp1"));
hdfs.mkdirs(new Path("/tmp2"));
source.add("f1");
source.add("f2");
int numEntries = source.size();
for (int j = 0; j < numEntries; j++) {
DFSTestUtil.createFile(hdfs, new Path("/tmp1/" + source.get(j)), 4096, 3 * 1024 - 100, 1024, (short) 3, 0);
}
byte[] start = HdfsFileStatus.EMPTY_NAME;
for (int j = 0; j < numEntries; j++) {
DirectoryListing dl = cluster.getNameNodeRpc().getListing("/tmp1", start, true);
assertTrue(dl.getPartialListing().length == 1);
for (int i = 0; i < dl.getPartialListing().length; i++) {
source.remove(dl.getPartialListing()[i].getLocalName());
}
start = dl.getLastName();
}
// Verify we have listed all entries in the directory.
assertTrue(source.size() == 0);
// Now create 6 files, each with 3 locations. Should take 2 iterations of 3
source.add("f1");
source.add("f2");
source.add("f3");
source.add("f4");
source.add("f5");
source.add("f6");
numEntries = source.size();
for (int j = 0; j < numEntries; j++) {
DFSTestUtil.createFile(hdfs, new Path("/tmp2/" + source.get(j)), 4096, 3 * 1024 - 100, 1024, (short) 1, 0);
}
start = HdfsFileStatus.EMPTY_NAME;
for (int j = 0; j < numEntries / 3; j++) {
DirectoryListing dl = cluster.getNameNodeRpc().getListing("/tmp2", start, true);
assertTrue(dl.getPartialListing().length == 3);
for (int i = 0; i < dl.getPartialListing().length; i++) {
source.remove(dl.getPartialListing()[i].getLocalName());
}
start = dl.getLastName();
}
// Verify we have listed all entries in tmp2.
assertTrue(source.size() == 0);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.
the class TestINodeFile method testInodeId.
/**
* This test verifies inode ID counter and inode map functionality.
*/
@Test
public void testInodeId() throws IOException {
Configuration conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT);
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
FSNamesystem fsn = cluster.getNamesystem();
long lastId = fsn.dir.getLastInodeId();
// Ensure root has the correct inode ID
// Last inode ID should be root inode ID and inode map size should be 1
int inodeCount = 1;
long expectedLastInodeId = INodeId.ROOT_INODE_ID;
assertEquals(fsn.dir.rootDir.getId(), INodeId.ROOT_INODE_ID);
assertEquals(expectedLastInodeId, lastId);
assertEquals(inodeCount, fsn.dir.getInodeMapSize());
// Create a directory
// Last inode ID and inode map size should increase by 1
FileSystem fs = cluster.getFileSystem();
Path path = new Path("/test1");
assertTrue(fs.mkdirs(path));
assertEquals(++expectedLastInodeId, fsn.dir.getLastInodeId());
assertEquals(++inodeCount, fsn.dir.getInodeMapSize());
// Create a file
// Last inode ID and inode map size should increase by 1
NamenodeProtocols nnrpc = cluster.getNameNodeRpc();
DFSTestUtil.createFile(fs, new Path("/test1/file"), 1024, (short) 1, 0);
assertEquals(++expectedLastInodeId, fsn.dir.getLastInodeId());
assertEquals(++inodeCount, fsn.dir.getInodeMapSize());
// Ensure right inode ID is returned in file status
HdfsFileStatus fileStatus = nnrpc.getFileInfo("/test1/file");
assertEquals(expectedLastInodeId, fileStatus.getFileId());
// Rename a directory
// Last inode ID and inode map size should not change
Path renamedPath = new Path("/test2");
assertTrue(fs.rename(path, renamedPath));
assertEquals(expectedLastInodeId, fsn.dir.getLastInodeId());
assertEquals(inodeCount, fsn.dir.getInodeMapSize());
// Delete test2/file and test2 and ensure inode map size decreases
assertTrue(fs.delete(renamedPath, true));
inodeCount -= 2;
assertEquals(inodeCount, fsn.dir.getInodeMapSize());
// Create and concat /test/file1 /test/file2
// Create /test1/file1 and /test1/file2
String file1 = "/test1/file1";
String file2 = "/test1/file2";
DFSTestUtil.createFile(fs, new Path(file1), 512, (short) 1, 0);
DFSTestUtil.createFile(fs, new Path(file2), 512, (short) 1, 0);
// test1, file1 and file2 are created
inodeCount += 3;
expectedLastInodeId += 3;
assertEquals(inodeCount, fsn.dir.getInodeMapSize());
assertEquals(expectedLastInodeId, fsn.dir.getLastInodeId());
// Concat the /test1/file1 /test1/file2 into /test1/file2
nnrpc.concat(file2, new String[] { file1 });
// file1 and file2 are concatenated to file2
inodeCount--;
assertEquals(inodeCount, fsn.dir.getInodeMapSize());
assertEquals(expectedLastInodeId, fsn.dir.getLastInodeId());
assertTrue(fs.delete(new Path("/test1"), true));
// test1 and file2 is deleted
inodeCount -= 2;
assertEquals(inodeCount, fsn.dir.getInodeMapSize());
// Make sure editlog is loaded correctly
cluster.restartNameNode();
cluster.waitActive();
fsn = cluster.getNamesystem();
assertEquals(expectedLastInodeId, fsn.dir.getLastInodeId());
assertEquals(inodeCount, fsn.dir.getInodeMapSize());
// Create two inodes test2 and test2/file2
DFSTestUtil.createFile(fs, new Path("/test2/file2"), 1024, (short) 1, 0);
expectedLastInodeId += 2;
inodeCount += 2;
assertEquals(expectedLastInodeId, fsn.dir.getLastInodeId());
assertEquals(inodeCount, fsn.dir.getInodeMapSize());
// create /test3, and /test3/file.
// /test3/file is a file under construction
FSDataOutputStream outStream = fs.create(new Path("/test3/file"));
assertTrue(outStream != null);
expectedLastInodeId += 2;
inodeCount += 2;
assertEquals(expectedLastInodeId, fsn.dir.getLastInodeId());
assertEquals(inodeCount, fsn.dir.getInodeMapSize());
// Apply editlogs to fsimage, ensure inodeUnderConstruction is handled
fsn.enterSafeMode(false);
fsn.saveNamespace(0, 0);
fsn.leaveSafeMode(false);
outStream.close();
// The lastInodeId in fsimage should remain the same after reboot
cluster.restartNameNode();
cluster.waitActive();
fsn = cluster.getNamesystem();
assertEquals(expectedLastInodeId, fsn.dir.getLastInodeId());
assertEquals(inodeCount, fsn.dir.getInodeMapSize());
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.
the class TestNameNodeResourceChecker method testCheckThatNameNodeResourceMonitorIsRunning.
/**
* Tests that NameNode resource monitor causes the NN to enter safe mode when
* resources are low.
*/
@Test
public void testCheckThatNameNodeResourceMonitorIsRunning() throws IOException, InterruptedException {
MiniDFSCluster cluster = null;
try {
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameDir.getAbsolutePath());
conf.setLong(DFSConfigKeys.DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY, 1);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
MockNameNodeResourceChecker mockResourceChecker = new MockNameNodeResourceChecker(conf);
cluster.getNameNode().getNamesystem().nnResourceChecker = mockResourceChecker;
cluster.waitActive();
String name = NameNodeResourceMonitor.class.getName();
boolean isNameNodeMonitorRunning = false;
Set<Thread> runningThreads = Thread.getAllStackTraces().keySet();
for (Thread runningThread : runningThreads) {
if (runningThread.toString().startsWith("Thread[" + name)) {
isNameNodeMonitorRunning = true;
break;
}
}
assertTrue("NN resource monitor should be running", isNameNodeMonitorRunning);
assertFalse("NN should not presently be in safe mode", cluster.getNameNode().isInSafeMode());
mockResourceChecker.setResourcesAvailable(false);
// Make sure the NNRM thread has a chance to run.
long startMillis = Time.now();
while (!cluster.getNameNode().isInSafeMode() && Time.now() < startMillis + (60 * 1000)) {
Thread.sleep(1000);
}
assertTrue("NN should be in safe mode after resources crossed threshold", cluster.getNameNode().isInSafeMode());
} finally {
if (cluster != null)
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.
the class TestNameNodeRespectsBindHostKeys method testRpcBindHostKey.
@Test(timeout = 300000)
public void testRpcBindHostKey() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
LOG.info("Testing without " + DFS_NAMENODE_RPC_BIND_HOST_KEY);
// NN should not bind the wildcard address by default.
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
String address = getRpcServerAddress(cluster);
assertThat("Bind address not expected to be wildcard by default.", address, not("/" + WILDCARD_ADDRESS));
} finally {
if (cluster != null) {
cluster.shutdown();
cluster = null;
}
}
LOG.info("Testing with " + DFS_NAMENODE_RPC_BIND_HOST_KEY);
// Tell NN to bind the wildcard address.
conf.set(DFS_NAMENODE_RPC_BIND_HOST_KEY, WILDCARD_ADDRESS);
// Verify that NN binds wildcard address now.
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
String address = getRpcServerAddress(cluster);
assertThat("Bind address " + address + " is not wildcard.", address, is("/" + WILDCARD_ADDRESS));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
Aggregations