use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.
the class TestDecommissionWithStriped method prepareBlockIndexAndTokenList.
private void prepareBlockIndexAndTokenList(List<LocatedBlock> lbs, List<HashMap<DatanodeInfo, Byte>> locToIndexList, List<HashMap<DatanodeInfo, Token<BlockTokenIdentifier>>> locToTokenList) {
for (LocatedBlock lb : lbs) {
HashMap<DatanodeInfo, Byte> locToIndex = new HashMap<DatanodeInfo, Byte>();
locToIndexList.add(locToIndex);
HashMap<DatanodeInfo, Token<BlockTokenIdentifier>> locToToken = new HashMap<DatanodeInfo, Token<BlockTokenIdentifier>>();
locToTokenList.add(locToToken);
DatanodeInfo[] di = lb.getLocations();
LocatedStripedBlock stripedBlk = (LocatedStripedBlock) lb;
for (int i = 0; i < di.length; i++) {
locToIndex.put(di[i], stripedBlk.getBlockIndices()[i]);
locToToken.put(di[i], stripedBlk.getBlockTokens()[i]);
}
}
}
use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.
the class TestDatanodeReport method assertReports.
static void assertReports(int numDatanodes, DatanodeReportType type, DFSClient client, List<DataNode> datanodes, String bpid) throws IOException {
final DatanodeInfo[] infos = client.datanodeReport(type);
assertEquals(numDatanodes, infos.length);
final DatanodeStorageReport[] reports = client.getDatanodeStorageReport(type);
assertEquals(numDatanodes, reports.length);
for (int i = 0; i < infos.length; i++) {
assertEquals(infos[i], reports[i].getDatanodeInfo());
final DataNode d = findDatanode(infos[i].getDatanodeUuid(), datanodes);
if (bpid != null) {
//check storage
final StorageReport[] computed = reports[i].getStorageReports();
Arrays.sort(computed, CMP);
final StorageReport[] expected = d.getFSDataset().getStorageReports(bpid);
Arrays.sort(expected, CMP);
assertEquals(expected.length, computed.length);
for (int j = 0; j < expected.length; j++) {
assertEquals(expected[j].getStorage().getStorageID(), computed[j].getStorage().getStorageID());
}
}
}
}
use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.
the class TestDatanodeReport method testDatanodeReport.
/**
* This test attempts to different types of datanode report.
*/
@Test
public void testDatanodeReport() throws Exception {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, // 0.5s
500);
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES).build();
try {
//wait until the cluster is up
cluster.waitActive();
final String bpid = cluster.getNamesystem().getBlockPoolId();
final List<DataNode> datanodes = cluster.getDataNodes();
final DFSClient client = cluster.getFileSystem().dfs;
assertReports(NUM_OF_DATANODES, DatanodeReportType.ALL, client, datanodes, bpid);
assertReports(NUM_OF_DATANODES, DatanodeReportType.LIVE, client, datanodes, bpid);
assertReports(0, DatanodeReportType.DEAD, client, datanodes, bpid);
// bring down one datanode
final DataNode last = datanodes.get(datanodes.size() - 1);
LOG.info("XXX shutdown datanode " + last.getDatanodeUuid());
last.shutdown();
DatanodeInfo[] nodeInfo = client.datanodeReport(DatanodeReportType.DEAD);
while (nodeInfo.length != 1) {
try {
Thread.sleep(500);
} catch (Exception e) {
}
nodeInfo = client.datanodeReport(DatanodeReportType.DEAD);
}
assertReports(NUM_OF_DATANODES, DatanodeReportType.ALL, client, datanodes, null);
assertReports(NUM_OF_DATANODES - 1, DatanodeReportType.LIVE, client, datanodes, null);
assertReports(1, DatanodeReportType.DEAD, client, datanodes, null);
Thread.sleep(5000);
assertGauge("ExpiredHeartbeats", 1, getMetrics("FSNamesystem"));
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.
the class TestDecommission method testCountOnDecommissionedNodeList.
/**
* Fetching Live DataNodes by passing removeDecommissionedNode value as
* false- returns LiveNodeList with Node in Decommissioned state
* true - returns LiveNodeList without Node in Decommissioned state
* @throws InterruptedException
*/
@Test
public void testCountOnDecommissionedNodeList() throws IOException {
getConf().setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
getConf().setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1);
try {
startCluster(1, 1);
ArrayList<ArrayList<DatanodeInfo>> namenodeDecomList = new ArrayList<ArrayList<DatanodeInfo>>(1);
namenodeDecomList.add(0, new ArrayList<DatanodeInfo>(1));
// Move datanode1 to Decommissioned state
ArrayList<DatanodeInfo> decommissionedNode = namenodeDecomList.get(0);
takeNodeOutofService(0, null, 0, decommissionedNode, AdminStates.DECOMMISSIONED);
FSNamesystem ns = getCluster().getNamesystem(0);
DatanodeManager datanodeManager = ns.getBlockManager().getDatanodeManager();
List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
// fetchDatanode with false should return livedecommisioned node
datanodeManager.fetchDatanodes(live, null, false);
assertTrue(1 == live.size());
// fetchDatanode with true should not return livedecommisioned node
datanodeManager.fetchDatanodes(live, null, true);
assertTrue(0 == live.size());
} finally {
shutdownCluster();
}
}
use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.
the class TestDecommission method testDecommissionWithOpenfile.
@Test(timeout = 120000)
public void testDecommissionWithOpenfile() throws IOException, InterruptedException {
LOG.info("Starting test testDecommissionWithOpenfile");
//At most 4 nodes will be decommissioned
startCluster(1, 7);
FileSystem fileSys = getCluster().getFileSystem(0);
FSNamesystem ns = getCluster().getNamesystem(0);
String openFile = "/testDecommissionWithOpenfile.dat";
writeFile(fileSys, new Path(openFile), (short) 3);
// make sure the file was open for write
FSDataOutputStream fdos = fileSys.append(new Path(openFile));
LocatedBlocks lbs = NameNodeAdapter.getBlockLocations(getCluster().getNameNode(0), openFile, 0, fileSize);
DatanodeInfo[] dnInfos4LastBlock = lbs.getLastLocatedBlock().getLocations();
DatanodeInfo[] dnInfos4FirstBlock = lbs.get(0).getLocations();
ArrayList<String> nodes = new ArrayList<String>();
ArrayList<DatanodeInfo> dnInfos = new ArrayList<DatanodeInfo>();
DatanodeManager dm = ns.getBlockManager().getDatanodeManager();
for (DatanodeInfo datanodeInfo : dnInfos4FirstBlock) {
DatanodeInfo found = datanodeInfo;
for (DatanodeInfo dif : dnInfos4LastBlock) {
if (datanodeInfo.equals(dif)) {
found = null;
}
}
if (found != null) {
nodes.add(found.getXferAddr());
dnInfos.add(dm.getDatanode(found));
}
}
//decommission one of the 3 nodes which have last block
nodes.add(dnInfos4LastBlock[0].getXferAddr());
dnInfos.add(dm.getDatanode(dnInfos4LastBlock[0]));
initExcludeHosts(nodes);
refreshNodes(0);
for (DatanodeInfo dn : dnInfos) {
waitNodeState(dn, AdminStates.DECOMMISSIONED);
}
fdos.close();
}
Aggregations