use of org.apache.hadoop.hdfs.protocol.DatanodeID in project hadoop by apache.
the class TestDatanodeManager method testRemoveIncludedNode.
/**
* Test whether removing a host from the includes list without adding it to
* the excludes list will exclude it from data node reports.
*/
@Test
public void testRemoveIncludedNode() throws IOException {
FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
// Set the write lock so that the DatanodeManager can start
Mockito.when(fsn.hasWriteLock()).thenReturn(true);
DatanodeManager dm = mockDatanodeManager(fsn, new Configuration());
HostFileManager hm = new HostFileManager();
HostSet noNodes = new HostSet();
HostSet oneNode = new HostSet();
HostSet twoNodes = new HostSet();
DatanodeRegistration dr1 = new DatanodeRegistration(new DatanodeID("127.0.0.1", "127.0.0.1", "someStorageID-123", 12345, 12345, 12345, 12345), new StorageInfo(HdfsServerConstants.NodeType.DATA_NODE), new ExportedBlockKeys(), "test");
DatanodeRegistration dr2 = new DatanodeRegistration(new DatanodeID("127.0.0.1", "127.0.0.1", "someStorageID-234", 23456, 23456, 23456, 23456), new StorageInfo(HdfsServerConstants.NodeType.DATA_NODE), new ExportedBlockKeys(), "test");
twoNodes.add(entry("127.0.0.1:12345"));
twoNodes.add(entry("127.0.0.1:23456"));
oneNode.add(entry("127.0.0.1:23456"));
hm.refresh(twoNodes, noNodes);
Whitebox.setInternalState(dm, "hostConfigManager", hm);
// Register two data nodes to simulate them coming up.
// We need to add two nodes, because if we have only one node, removing it
// will cause the includes list to be empty, which means all hosts will be
// allowed.
dm.registerDatanode(dr1);
dm.registerDatanode(dr2);
// Make sure that both nodes are reported
List<DatanodeDescriptor> both = dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.ALL);
// Sort the list so that we know which one is which
Collections.sort(both);
Assert.assertEquals("Incorrect number of hosts reported", 2, both.size());
Assert.assertEquals("Unexpected host or host in unexpected position", "127.0.0.1:12345", both.get(0).getInfoAddr());
Assert.assertEquals("Unexpected host or host in unexpected position", "127.0.0.1:23456", both.get(1).getInfoAddr());
// Remove one node from includes, but do not add it to excludes.
hm.refresh(oneNode, noNodes);
// Make sure that only one node is still reported
List<DatanodeDescriptor> onlyOne = dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.ALL);
Assert.assertEquals("Incorrect number of hosts reported", 1, onlyOne.size());
Assert.assertEquals("Unexpected host reported", "127.0.0.1:23456", onlyOne.get(0).getInfoAddr());
// Remove all nodes from includes
hm.refresh(noNodes, noNodes);
// Check that both nodes are reported again
List<DatanodeDescriptor> bothAgain = dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.ALL);
// Sort the list so that we know which one is which
Collections.sort(bothAgain);
Assert.assertEquals("Incorrect number of hosts reported", 2, bothAgain.size());
Assert.assertEquals("Unexpected host or host in unexpected position", "127.0.0.1:12345", bothAgain.get(0).getInfoAddr());
Assert.assertEquals("Unexpected host or host in unexpected position", "127.0.0.1:23456", bothAgain.get(1).getInfoAddr());
}
use of org.apache.hadoop.hdfs.protocol.DatanodeID in project hadoop by apache.
the class TestDecommissioningStatus method checkDFSAdminDecommissionStatus.
private void checkDFSAdminDecommissionStatus(List<DatanodeDescriptor> expectedDecomm, DistributedFileSystem dfs, DFSAdmin admin) throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
PrintStream ps = new PrintStream(baos);
PrintStream oldOut = System.out;
System.setOut(ps);
try {
// Parse DFSAdmin just to check the count
admin.report(new String[] { "-decommissioning" }, 0);
String[] lines = baos.toString().split("\n");
Integer num = null;
int count = 0;
for (String line : lines) {
if (line.startsWith("Decommissioning datanodes")) {
// Pull out the "(num)" and parse it into an int
String temp = line.split(" ")[2];
num = Integer.parseInt((String) temp.subSequence(1, temp.length() - 2));
}
if (line.contains("Decommission in progress")) {
count++;
}
}
assertTrue("No decommissioning output", num != null);
assertEquals("Unexpected number of decomming DNs", expectedDecomm.size(), num.intValue());
assertEquals("Unexpected number of decomming DNs", expectedDecomm.size(), count);
// Check Java API for correct contents
List<DatanodeInfo> decomming = new ArrayList<DatanodeInfo>(Arrays.asList(dfs.getDataNodeStats(DatanodeReportType.DECOMMISSIONING)));
assertEquals("Unexpected number of decomming DNs", expectedDecomm.size(), decomming.size());
for (DatanodeID id : expectedDecomm) {
assertTrue("Did not find expected decomming DN " + id, decomming.contains(id));
}
} finally {
System.setOut(oldOut);
}
}
use of org.apache.hadoop.hdfs.protocol.DatanodeID in project hadoop by apache.
the class TestDecommissioningStatus method testDecommissionDeadDN.
/**
* Verify the support for decommissioning a datanode that is already dead.
* Under this scenario the datanode should immediately be marked as
* DECOMMISSIONED
*/
@Test(timeout = 120000)
public void testDecommissionDeadDN() throws Exception {
Logger log = Logger.getLogger(DecommissionManager.class);
log.setLevel(Level.DEBUG);
DatanodeID dnID = cluster.getDataNodes().get(0).getDatanodeId();
String dnName = dnID.getXferAddr();
DataNodeProperties stoppedDN = cluster.stopDataNode(0);
DFSTestUtil.waitForDatanodeState(cluster, dnID.getDatanodeUuid(), false, 30000);
FSNamesystem fsn = cluster.getNamesystem();
final DatanodeManager dm = fsn.getBlockManager().getDatanodeManager();
DatanodeDescriptor dnDescriptor = dm.getDatanode(dnID);
decommissionNode(dnName);
dm.refreshNodes(conf);
BlockManagerTestUtil.recheckDecommissionState(dm);
assertTrue(dnDescriptor.isDecommissioned());
// Add the node back
cluster.restartDataNode(stoppedDN, true);
cluster.waitActive();
// Call refreshNodes on FSNamesystem with empty exclude file to remove the
// datanode from decommissioning list and make it available again.
hostsFileWriter.initExcludeHost("");
dm.refreshNodes(conf);
}
use of org.apache.hadoop.hdfs.protocol.DatanodeID in project hadoop by apache.
the class TestCommitBlockSynchronization method testCommitBlockSynchronizationWithCloseAndNonExistantTarget.
@Test
public void testCommitBlockSynchronizationWithCloseAndNonExistantTarget() throws IOException {
INodeFile file = mockFileUnderConstruction();
Block block = new Block(blockId, length, genStamp);
FSNamesystem namesystemSpy = makeNameSystemSpy(block, file);
DatanodeID[] newTargets = new DatanodeID[] { new DatanodeID("0.0.0.0", "nonexistantHost", "1", 0, 0, 0, 0) };
String[] storageIDs = new String[] { "fake-storage-ID" };
ExtendedBlock lastBlock = new ExtendedBlock();
namesystemSpy.commitBlockSynchronization(lastBlock, genStamp, length, true, false, newTargets, storageIDs);
// Repeat the call to make sure it returns true
namesystemSpy.commitBlockSynchronization(lastBlock, genStamp, length, true, false, newTargets, storageIDs);
}
use of org.apache.hadoop.hdfs.protocol.DatanodeID in project hadoop by apache.
the class TestCommitBlockSynchronization method testCommitBlockSynchronizationWithDelete.
@Test
public void testCommitBlockSynchronizationWithDelete() throws IOException {
INodeFile file = mockFileUnderConstruction();
Block block = new Block(blockId, length, genStamp);
FSNamesystem namesystemSpy = makeNameSystemSpy(block, file);
DatanodeID[] newTargets = new DatanodeID[0];
ExtendedBlock lastBlock = new ExtendedBlock();
namesystemSpy.commitBlockSynchronization(lastBlock, genStamp, length, false, true, newTargets, null);
// Simulate removing the last block from the file.
doReturn(null).when(file).removeLastBlock(any(Block.class));
// Repeat the call to make sure it does not throw
namesystemSpy.commitBlockSynchronization(lastBlock, genStamp, length, false, true, newTargets, null);
}
Aggregations