use of org.apache.hadoop.hdfs.server.namenode.FSNamesystem in project hadoop by apache.
the class TestDecommission method testCountOnDecommissionedNodeList.
/**
* Fetching Live DataNodes by passing removeDecommissionedNode value as
* false- returns LiveNodeList with Node in Decommissioned state
* true - returns LiveNodeList without Node in Decommissioned state
* @throws InterruptedException
*/
@Test
public void testCountOnDecommissionedNodeList() throws IOException {
getConf().setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
getConf().setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1);
try {
startCluster(1, 1);
ArrayList<ArrayList<DatanodeInfo>> namenodeDecomList = new ArrayList<ArrayList<DatanodeInfo>>(1);
namenodeDecomList.add(0, new ArrayList<DatanodeInfo>(1));
// Move datanode1 to Decommissioned state
ArrayList<DatanodeInfo> decommissionedNode = namenodeDecomList.get(0);
takeNodeOutofService(0, null, 0, decommissionedNode, AdminStates.DECOMMISSIONED);
FSNamesystem ns = getCluster().getNamesystem(0);
DatanodeManager datanodeManager = ns.getBlockManager().getDatanodeManager();
List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
// fetchDatanode with false should return livedecommisioned node
datanodeManager.fetchDatanodes(live, null, false);
assertTrue(1 == live.size());
// fetchDatanode with true should not return livedecommisioned node
datanodeManager.fetchDatanodes(live, null, true);
assertTrue(0 == live.size());
} finally {
shutdownCluster();
}
}
use of org.apache.hadoop.hdfs.server.namenode.FSNamesystem in project hadoop by apache.
the class TestDecommission method testDecommissionWithOpenfile.
@Test(timeout = 120000)
public void testDecommissionWithOpenfile() throws IOException, InterruptedException {
LOG.info("Starting test testDecommissionWithOpenfile");
//At most 4 nodes will be decommissioned
startCluster(1, 7);
FileSystem fileSys = getCluster().getFileSystem(0);
FSNamesystem ns = getCluster().getNamesystem(0);
String openFile = "/testDecommissionWithOpenfile.dat";
writeFile(fileSys, new Path(openFile), (short) 3);
// make sure the file was open for write
FSDataOutputStream fdos = fileSys.append(new Path(openFile));
LocatedBlocks lbs = NameNodeAdapter.getBlockLocations(getCluster().getNameNode(0), openFile, 0, fileSize);
DatanodeInfo[] dnInfos4LastBlock = lbs.getLastLocatedBlock().getLocations();
DatanodeInfo[] dnInfos4FirstBlock = lbs.get(0).getLocations();
ArrayList<String> nodes = new ArrayList<String>();
ArrayList<DatanodeInfo> dnInfos = new ArrayList<DatanodeInfo>();
DatanodeManager dm = ns.getBlockManager().getDatanodeManager();
for (DatanodeInfo datanodeInfo : dnInfos4FirstBlock) {
DatanodeInfo found = datanodeInfo;
for (DatanodeInfo dif : dnInfos4LastBlock) {
if (datanodeInfo.equals(dif)) {
found = null;
}
}
if (found != null) {
nodes.add(found.getXferAddr());
dnInfos.add(dm.getDatanode(found));
}
}
//decommission one of the 3 nodes which have last block
nodes.add(dnInfos4LastBlock[0].getXferAddr());
dnInfos.add(dm.getDatanode(dnInfos4LastBlock[0]));
initExcludeHosts(nodes);
refreshNodes(0);
for (DatanodeInfo dn : dnInfos) {
waitNodeState(dn, AdminStates.DECOMMISSIONED);
}
fdos.close();
}
use of org.apache.hadoop.hdfs.server.namenode.FSNamesystem in project hadoop by apache.
the class TestDecommission method testClusterStats.
public void testClusterStats(int numNameNodes) throws IOException, InterruptedException {
LOG.info("Starting test testClusterStats");
int numDatanodes = 1;
startCluster(numNameNodes, numDatanodes);
for (int i = 0; i < numNameNodes; i++) {
FileSystem fileSys = getCluster().getFileSystem(i);
Path file = new Path("testClusterStats.dat");
writeFile(fileSys, file, 1);
FSNamesystem fsn = getCluster().getNamesystem(i);
NameNode namenode = getCluster().getNameNode(i);
DatanodeInfo decomInfo = takeNodeOutofService(i, null, 0, null, AdminStates.DECOMMISSION_INPROGRESS);
DataNode decomNode = getDataNode(decomInfo);
// Check namenode stats for multiple datanode heartbeats
verifyStats(namenode, fsn, decomInfo, decomNode, true);
// Stop decommissioning and verify stats
DatanodeInfo retInfo = NameNodeAdapter.getDatanode(fsn, decomInfo);
putNodeInService(i, retInfo);
DataNode retNode = getDataNode(decomInfo);
verifyStats(namenode, fsn, retInfo, retNode, false);
}
}
use of org.apache.hadoop.hdfs.server.namenode.FSNamesystem in project hadoop by apache.
the class TestDecommission method testUsedCapacity.
@Test
public void testUsedCapacity() throws Exception {
int numNamenodes = 1;
int numDatanodes = 2;
startCluster(numNamenodes, numDatanodes);
FSNamesystem ns = getCluster().getNamesystem(0);
BlockManager blockManager = ns.getBlockManager();
DatanodeStatistics datanodeStatistics = blockManager.getDatanodeManager().getDatanodeStatistics();
long initialUsedCapacity = datanodeStatistics.getCapacityUsed();
long initialTotalCapacity = datanodeStatistics.getCapacityTotal();
long initialBlockPoolUsed = datanodeStatistics.getBlockPoolUsed();
ArrayList<ArrayList<DatanodeInfo>> namenodeDecomList = new ArrayList<ArrayList<DatanodeInfo>>(numNamenodes);
namenodeDecomList.add(0, new ArrayList<>(numDatanodes));
ArrayList<DatanodeInfo> decommissionedNodes = namenodeDecomList.get(0);
//decommission one node
DatanodeInfo decomNode = takeNodeOutofService(0, null, 0, decommissionedNodes, AdminStates.DECOMMISSIONED);
decommissionedNodes.add(decomNode);
long newUsedCapacity = datanodeStatistics.getCapacityUsed();
long newTotalCapacity = datanodeStatistics.getCapacityTotal();
long newBlockPoolUsed = datanodeStatistics.getBlockPoolUsed();
assertTrue("DfsUsedCapacity should not be the same after a node has " + "been decommissioned!", initialUsedCapacity != newUsedCapacity);
assertTrue("TotalCapacity should not be the same after a node has " + "been decommissioned!", initialTotalCapacity != newTotalCapacity);
assertTrue("BlockPoolUsed should not be the same after a node has " + "been decommissioned!", initialBlockPoolUsed != newBlockPoolUsed);
}
use of org.apache.hadoop.hdfs.server.namenode.FSNamesystem in project hadoop by apache.
the class TestSortLocatedStripedBlock method mockDatanodeManager.
private static DatanodeManager mockDatanodeManager() throws IOException {
Configuration conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true);
conf.setLong(DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY, STALE_INTERVAL);
FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
BlockManager bm = Mockito.mock(BlockManager.class);
BlockReportLeaseManager blm = new BlockReportLeaseManager(conf);
Mockito.when(bm.getBlockReportLeaseManager()).thenReturn(blm);
DatanodeManager dm = new DatanodeManager(bm, fsn, conf);
return dm;
}
Aggregations