use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager in project hadoop by apache.
the class TestWebHdfsDataLocality method testExcludeDataNodes.
@Test
public void testExcludeDataNodes() throws Exception {
final Configuration conf = WebHdfsTestUtil.createConf();
final String[] racks = { RACK0, RACK0, RACK1, RACK1, RACK2, RACK2 };
final String[] hosts = { "DataNode1", "DataNode2", "DataNode3", "DataNode4", "DataNode5", "DataNode6" };
final int nDataNodes = hosts.length;
LOG.info("nDataNodes=" + nDataNodes + ", racks=" + Arrays.asList(racks) + ", hosts=" + Arrays.asList(hosts));
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).hosts(hosts).numDataNodes(nDataNodes).racks(racks).build();
try {
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final NameNode namenode = cluster.getNameNode();
final DatanodeManager dm = namenode.getNamesystem().getBlockManager().getDatanodeManager();
LOG.info("dm=" + dm);
final long blocksize = DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
final String f = "/foo";
//create a file with three replica.
final Path p = new Path(f);
final FSDataOutputStream out = dfs.create(p, (short) 3);
out.write(1);
out.close();
//get replica location.
final LocatedBlocks locatedblocks = NameNodeAdapter.getBlockLocations(namenode, f, 0, 1);
final List<LocatedBlock> lb = locatedblocks.getLocatedBlocks();
Assert.assertEquals(1, lb.size());
final DatanodeInfo[] locations = lb.get(0).getLocations();
Assert.assertEquals(3, locations.length);
//For GETFILECHECKSUM, OPEN and APPEND,
//the chosen datanode must be different with exclude nodes.
StringBuffer sb = new StringBuffer();
for (int i = 0; i < 2; i++) {
sb.append(locations[i].getXferAddr());
{
// test GETFILECHECKSUM
final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(namenode, f, GetOpParam.Op.GETFILECHECKSUM, -1L, blocksize, sb.toString(), LOCALHOST);
for (int j = 0; j <= i; j++) {
Assert.assertNotEquals(locations[j].getHostName(), chosen.getHostName());
}
}
{
// test OPEN
final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(namenode, f, GetOpParam.Op.OPEN, 0, blocksize, sb.toString(), LOCALHOST);
for (int j = 0; j <= i; j++) {
Assert.assertNotEquals(locations[j].getHostName(), chosen.getHostName());
}
}
{
// test APPEND
final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(namenode, f, PostOpParam.Op.APPEND, -1L, blocksize, sb.toString(), LOCALHOST);
for (int j = 0; j <= i; j++) {
Assert.assertNotEquals(locations[j].getHostName(), chosen.getHostName());
}
}
sb.append(",");
}
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager in project hadoop by apache.
the class TestDecommission method testRecommission.
/**
* Test that over-replicated blocks are deleted on recommission.
*/
@Test(timeout = 120000)
public void testRecommission() throws Exception {
final int numDatanodes = 6;
try {
LOG.info("Starting test testRecommission");
startCluster(1, numDatanodes);
final Path file1 = new Path("testDecommission.dat");
final int replicas = numDatanodes - 1;
ArrayList<DatanodeInfo> decommissionedNodes = Lists.newArrayList();
final FileSystem fileSys = getCluster().getFileSystem();
// Write a file to n-1 datanodes
writeFile(fileSys, file1, replicas);
// Decommission one of the datanodes with a replica
BlockLocation loc = fileSys.getFileBlockLocations(file1, 0, 1)[0];
assertEquals("Unexpected number of replicas from getFileBlockLocations", replicas, loc.getHosts().length);
final String toDecomHost = loc.getNames()[0];
String toDecomUuid = null;
for (DataNode d : getCluster().getDataNodes()) {
if (d.getDatanodeId().getXferAddr().equals(toDecomHost)) {
toDecomUuid = d.getDatanodeId().getDatanodeUuid();
break;
}
}
assertNotNull("Could not find a dn with the block!", toDecomUuid);
final DatanodeInfo decomNode = takeNodeOutofService(0, toDecomUuid, 0, decommissionedNodes, AdminStates.DECOMMISSIONED);
decommissionedNodes.add(decomNode);
final BlockManager blockManager = getCluster().getNamesystem().getBlockManager();
final DatanodeManager datanodeManager = blockManager.getDatanodeManager();
BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
// Ensure decommissioned datanode is not automatically shutdown
DFSClient client = getDfsClient(0);
assertEquals("All datanodes must be alive", numDatanodes, client.datanodeReport(DatanodeReportType.LIVE).length);
// wait for the block to be replicated
final ExtendedBlock b = DFSTestUtil.getFirstBlock(fileSys, file1);
final String uuid = toDecomUuid;
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
BlockInfo info = blockManager.getStoredBlock(b.getLocalBlock());
int count = 0;
StringBuilder sb = new StringBuilder("Replica locations: ");
for (int i = 0; i < info.numNodes(); i++) {
DatanodeDescriptor dn = info.getDatanode(i);
sb.append(dn + ", ");
if (!dn.getDatanodeUuid().equals(uuid)) {
count++;
}
}
LOG.info(sb.toString());
LOG.info("Count: " + count);
return count == replicas;
}
}, 500, 30000);
// redecommission and wait for over-replication to be fixed
putNodeInService(0, decomNode);
BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
DFSTestUtil.waitForReplication(getCluster(), b, 1, replicas, 0);
cleanupFile(fileSys, file1);
} finally {
shutdownCluster();
}
}
use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager in project hadoop by apache.
the class TestDecommission method testCountOnDecommissionedNodeList.
/**
* Fetching Live DataNodes by passing removeDecommissionedNode value as
* false- returns LiveNodeList with Node in Decommissioned state
* true - returns LiveNodeList without Node in Decommissioned state
* @throws InterruptedException
*/
@Test
public void testCountOnDecommissionedNodeList() throws IOException {
getConf().setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
getConf().setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1);
try {
startCluster(1, 1);
ArrayList<ArrayList<DatanodeInfo>> namenodeDecomList = new ArrayList<ArrayList<DatanodeInfo>>(1);
namenodeDecomList.add(0, new ArrayList<DatanodeInfo>(1));
// Move datanode1 to Decommissioned state
ArrayList<DatanodeInfo> decommissionedNode = namenodeDecomList.get(0);
takeNodeOutofService(0, null, 0, decommissionedNode, AdminStates.DECOMMISSIONED);
FSNamesystem ns = getCluster().getNamesystem(0);
DatanodeManager datanodeManager = ns.getBlockManager().getDatanodeManager();
List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
// fetchDatanode with false should return livedecommisioned node
datanodeManager.fetchDatanodes(live, null, false);
assertTrue(1 == live.size());
// fetchDatanode with true should not return livedecommisioned node
datanodeManager.fetchDatanodes(live, null, true);
assertTrue(0 == live.size());
} finally {
shutdownCluster();
}
}
use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager in project hadoop by apache.
the class TestDecommission method testDecommissionWithOpenfile.
@Test(timeout = 120000)
public void testDecommissionWithOpenfile() throws IOException, InterruptedException {
LOG.info("Starting test testDecommissionWithOpenfile");
//At most 4 nodes will be decommissioned
startCluster(1, 7);
FileSystem fileSys = getCluster().getFileSystem(0);
FSNamesystem ns = getCluster().getNamesystem(0);
String openFile = "/testDecommissionWithOpenfile.dat";
writeFile(fileSys, new Path(openFile), (short) 3);
// make sure the file was open for write
FSDataOutputStream fdos = fileSys.append(new Path(openFile));
LocatedBlocks lbs = NameNodeAdapter.getBlockLocations(getCluster().getNameNode(0), openFile, 0, fileSize);
DatanodeInfo[] dnInfos4LastBlock = lbs.getLastLocatedBlock().getLocations();
DatanodeInfo[] dnInfos4FirstBlock = lbs.get(0).getLocations();
ArrayList<String> nodes = new ArrayList<String>();
ArrayList<DatanodeInfo> dnInfos = new ArrayList<DatanodeInfo>();
DatanodeManager dm = ns.getBlockManager().getDatanodeManager();
for (DatanodeInfo datanodeInfo : dnInfos4FirstBlock) {
DatanodeInfo found = datanodeInfo;
for (DatanodeInfo dif : dnInfos4LastBlock) {
if (datanodeInfo.equals(dif)) {
found = null;
}
}
if (found != null) {
nodes.add(found.getXferAddr());
dnInfos.add(dm.getDatanode(found));
}
}
//decommission one of the 3 nodes which have last block
nodes.add(dnInfos4LastBlock[0].getXferAddr());
dnInfos.add(dm.getDatanode(dnInfos4LastBlock[0]));
initExcludeHosts(nodes);
refreshNodes(0);
for (DatanodeInfo dn : dnInfos) {
waitNodeState(dn, AdminStates.DECOMMISSIONED);
}
fdos.close();
}
use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager in project hadoop by apache.
the class TestDatanodeRegistration method testDNSLookups.
/**
* Ensure the datanode manager does not do host lookup after registration,
* especially for node reports.
* @throws Exception
*/
@Test
public void testDNSLookups() throws Exception {
MonitorDNS sm = new MonitorDNS();
System.setSecurityManager(sm);
MiniDFSCluster cluster = null;
try {
HdfsConfiguration conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(8).build();
cluster.waitActive();
int initialLookups = sm.lookups;
assertTrue("dns security manager is active", initialLookups != 0);
DatanodeManager dm = cluster.getNamesystem().getBlockManager().getDatanodeManager();
// make sure no lookups occur
dm.refreshNodes(conf);
assertEquals(initialLookups, sm.lookups);
dm.refreshNodes(conf);
assertEquals(initialLookups, sm.lookups);
// ensure none of the reports trigger lookups
dm.getDatanodeListForReport(DatanodeReportType.ALL);
assertEquals(initialLookups, sm.lookups);
dm.getDatanodeListForReport(DatanodeReportType.LIVE);
assertEquals(initialLookups, sm.lookups);
dm.getDatanodeListForReport(DatanodeReportType.DEAD);
assertEquals(initialLookups, sm.lookups);
} finally {
if (cluster != null) {
cluster.shutdown();
}
System.setSecurityManager(null);
}
}
Aggregations