use of org.apache.hadoop.hdfs.server.namenode.FSNamesystem in project hadoop by apache.
the class TestDataNodeVolumeFailure method verify.
/**
* verifies two things:
* 1. number of locations of each block in the name node
* matches number of actual files
* 2. block files + pending block equals to total number of blocks that a file has
* including the replication (HDFS file has 30 blocks, repl=2 - total 60
* @param fn - file name
* @param fs - file size
* @throws IOException
*/
private void verify(String fn, int fs) throws IOException {
// now count how many physical blocks are there
int totalReal = countRealBlocks(block_map);
System.out.println("countRealBlocks counted " + totalReal + " blocks");
// count how many blocks store in NN structures.
int totalNN = countNNBlocks(block_map, fn, fs);
System.out.println("countNNBlocks counted " + totalNN + " blocks");
for (String bid : block_map.keySet()) {
BlockLocs bl = block_map.get(bid);
// System.out.println(bid + "->" + bl.num_files + "vs." + bl.num_locs);
// number of physical files (1 or 2) should be same as number of datanodes
// in the list of the block locations
assertEquals("Num files should match num locations", bl.num_files, bl.num_locs);
}
assertEquals("Num physical blocks should match num stored in the NN", totalReal, totalNN);
// now check the number of under-replicated blocks
FSNamesystem fsn = cluster.getNamesystem();
// force update of all the metric counts by calling computeDatanodeWork
BlockManagerTestUtil.getComputedDatanodeWork(fsn.getBlockManager());
// get all the counts
long underRepl = fsn.getUnderReplicatedBlocks();
long pendRepl = fsn.getPendingReplicationBlocks();
long totalRepl = underRepl + pendRepl;
System.out.println("underreplicated after = " + underRepl + " and pending repl =" + pendRepl + "; total underRepl = " + totalRepl);
System.out.println("total blocks (real and replicating):" + (totalReal + totalRepl) + " vs. all files blocks " + blocks_num * 2);
// together all the blocks should be equal to all real + all underreplicated
assertEquals("Incorrect total block count", totalReal + totalRepl, blocks_num * repl);
}
use of org.apache.hadoop.hdfs.server.namenode.FSNamesystem in project hadoop by apache.
the class TestHASafeMode method testEnterSafeModeInSBNShouldNotThrowNPE.
/**
* Test case for enter safemode in standby namenode, when it is already in startup safemode.
* It is a regression test for HDFS-2747.
*/
@Test
public void testEnterSafeModeInSBNShouldNotThrowNPE() throws Exception {
banner("Starting with NN0 active and NN1 standby, creating some blocks");
DFSTestUtil.createFile(fs, new Path("/test"), 3 * BLOCK_SIZE, (short) 3, 1L);
// Roll edit log so that, when the SBN restarts, it will load
// the namespace during startup and enter safemode.
nn0.getRpcServer().rollEditLog();
banner("Creating some blocks that won't be in the edit log");
DFSTestUtil.createFile(fs, new Path("/test2"), 5 * BLOCK_SIZE, (short) 3, 1L);
banner("Deleting the original blocks");
fs.delete(new Path("/test"), true);
banner("Restarting standby");
restartStandby();
FSNamesystem namesystem = nn1.getNamesystem();
String status = namesystem.getSafemode();
assertTrue("Bad safemode status: '" + status + "'", status.startsWith("Safe mode is ON."));
NameNodeAdapter.enterSafeMode(nn1, false);
assertTrue("Failed to enter into safemode in standby", namesystem.isInSafeMode());
NameNodeAdapter.enterSafeMode(nn1, false);
assertTrue("Failed to enter into safemode in standby", namesystem.isInSafeMode());
}
use of org.apache.hadoop.hdfs.server.namenode.FSNamesystem in project hadoop by apache.
the class TestHASafeMode method testEnterSafeModeInANNShouldNotThrowNPE.
/**
* Test case for enter safemode in active namenode, when it is already in startup safemode.
* It is a regression test for HDFS-2747.
*/
@Test
public void testEnterSafeModeInANNShouldNotThrowNPE() throws Exception {
banner("Restarting active");
DFSTestUtil.createFile(fs, new Path("/test"), 3 * BLOCK_SIZE, (short) 3, 1L);
restartActive();
nn0.getRpcServer().transitionToActive(new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER));
FSNamesystem namesystem = nn0.getNamesystem();
String status = namesystem.getSafemode();
assertTrue("Bad safemode status: '" + status + "'", status.startsWith("Safe mode is ON."));
NameNodeAdapter.enterSafeMode(nn0, false);
assertTrue("Failed to enter into safemode in active", namesystem.isInSafeMode());
NameNodeAdapter.enterSafeMode(nn0, false);
assertTrue("Failed to enter into safemode in active", namesystem.isInSafeMode());
}
use of org.apache.hadoop.hdfs.server.namenode.FSNamesystem in project hadoop by apache.
the class MiniDFSCluster method waitFirstBRCompleted.
/** Wait until the given namenode gets first block reports from all the datanodes */
public void waitFirstBRCompleted(int nnIndex, int timeout) throws IOException, TimeoutException, InterruptedException {
if (namenodes.size() == 0 || getNN(nnIndex) == null || getNN(nnIndex).nameNode == null) {
return;
}
final FSNamesystem ns = getNamesystem(nnIndex);
final DatanodeManager dm = ns.getBlockManager().getDatanodeManager();
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
List<DatanodeDescriptor> nodes = dm.getDatanodeListForReport(DatanodeReportType.LIVE);
for (DatanodeDescriptor node : nodes) {
if (!node.checkBlockReportReceived()) {
return false;
}
}
return true;
}
}, 100, timeout);
}
use of org.apache.hadoop.hdfs.server.namenode.FSNamesystem in project hadoop by apache.
the class TestDecommission method testMultipleNodesDecommission.
/**
* Verify if multiple DataNodes can be decommission at the same time.
*/
@Test(timeout = 360000)
public void testMultipleNodesDecommission() throws Exception {
startCluster(1, 5);
final Path file = new Path("/testMultipleNodesDecommission.dat");
final FileSystem fileSys = getCluster().getFileSystem(0);
final FSNamesystem ns = getCluster().getNamesystem(0);
int repl = 3;
writeFile(fileSys, file, repl, 1);
// Request Decommission for DataNodes 1 and 2.
List<DatanodeInfo> decomDataNodes = takeNodeOutofService(0, Lists.newArrayList(getCluster().getDataNodes().get(0).getDatanodeUuid(), getCluster().getDataNodes().get(1).getDatanodeUuid()), Long.MAX_VALUE, null, null, AdminStates.DECOMMISSIONED);
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
try {
String errMsg = checkFile(fileSys, file, repl, decomDataNodes.get(0).getXferAddr(), 5);
if (errMsg != null) {
LOG.warn("Check file: " + errMsg);
}
return true;
} catch (IOException e) {
LOG.warn("Check file: " + e);
return false;
}
}
}, 500, 30000);
// Put the decommissioned nodes back in service.
for (DatanodeInfo datanodeInfo : decomDataNodes) {
putNodeInService(0, datanodeInfo);
}
cleanupFile(fileSys, file);
}
Aggregations