use of org.apache.hadoop.hdfs.server.namenode.FSNamesystem in project hadoop by apache.
the class TestDatanodeManager method testNumVersionsCorrectAfterReregister.
/**
* This test checks that if a node is re-registered with a new software
* version after the heartbeat expiry interval but before the HeartbeatManager
* has a chance to detect this and remove it, the node's version will still
* be correctly decremented.
*/
@Test
public void testNumVersionsCorrectAfterReregister() throws IOException, InterruptedException {
//Create the DatanodeManager which will be tested
FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
Mockito.when(fsn.hasWriteLock()).thenReturn(true);
Configuration conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 0);
conf.setLong(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 10);
DatanodeManager dm = mockDatanodeManager(fsn, conf);
String storageID = "someStorageID1";
String ip = "someIP" + storageID;
// Register then reregister the same node but with a different version
for (int i = 0; i <= 1; i++) {
dm.registerDatanode(new DatanodeRegistration(new DatanodeID(ip, "", storageID, 9000, 0, 0, 0), null, null, "version" + i));
if (i == 0) {
Thread.sleep(25);
}
}
//Verify DatanodeManager has the correct count
Map<String, Integer> mapToCheck = dm.getDatanodesSoftwareVersions();
assertNull("should be no more version0 nodes", mapToCheck.get("version0"));
assertEquals("should be one version1 node", mapToCheck.get("version1").intValue(), 1);
}
use of org.apache.hadoop.hdfs.server.namenode.FSNamesystem in project hadoop by apache.
the class TestDatanodeManager method testRemoveIncludedNode.
/**
* Test whether removing a host from the includes list without adding it to
* the excludes list will exclude it from data node reports.
*/
@Test
public void testRemoveIncludedNode() throws IOException {
FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
// Set the write lock so that the DatanodeManager can start
Mockito.when(fsn.hasWriteLock()).thenReturn(true);
DatanodeManager dm = mockDatanodeManager(fsn, new Configuration());
HostFileManager hm = new HostFileManager();
HostSet noNodes = new HostSet();
HostSet oneNode = new HostSet();
HostSet twoNodes = new HostSet();
DatanodeRegistration dr1 = new DatanodeRegistration(new DatanodeID("127.0.0.1", "127.0.0.1", "someStorageID-123", 12345, 12345, 12345, 12345), new StorageInfo(HdfsServerConstants.NodeType.DATA_NODE), new ExportedBlockKeys(), "test");
DatanodeRegistration dr2 = new DatanodeRegistration(new DatanodeID("127.0.0.1", "127.0.0.1", "someStorageID-234", 23456, 23456, 23456, 23456), new StorageInfo(HdfsServerConstants.NodeType.DATA_NODE), new ExportedBlockKeys(), "test");
twoNodes.add(entry("127.0.0.1:12345"));
twoNodes.add(entry("127.0.0.1:23456"));
oneNode.add(entry("127.0.0.1:23456"));
hm.refresh(twoNodes, noNodes);
Whitebox.setInternalState(dm, "hostConfigManager", hm);
// Register two data nodes to simulate them coming up.
// We need to add two nodes, because if we have only one node, removing it
// will cause the includes list to be empty, which means all hosts will be
// allowed.
dm.registerDatanode(dr1);
dm.registerDatanode(dr2);
// Make sure that both nodes are reported
List<DatanodeDescriptor> both = dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.ALL);
// Sort the list so that we know which one is which
Collections.sort(both);
Assert.assertEquals("Incorrect number of hosts reported", 2, both.size());
Assert.assertEquals("Unexpected host or host in unexpected position", "127.0.0.1:12345", both.get(0).getInfoAddr());
Assert.assertEquals("Unexpected host or host in unexpected position", "127.0.0.1:23456", both.get(1).getInfoAddr());
// Remove one node from includes, but do not add it to excludes.
hm.refresh(oneNode, noNodes);
// Make sure that only one node is still reported
List<DatanodeDescriptor> onlyOne = dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.ALL);
Assert.assertEquals("Incorrect number of hosts reported", 1, onlyOne.size());
Assert.assertEquals("Unexpected host reported", "127.0.0.1:23456", onlyOne.get(0).getInfoAddr());
// Remove all nodes from includes
hm.refresh(noNodes, noNodes);
// Check that both nodes are reported again
List<DatanodeDescriptor> bothAgain = dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.ALL);
// Sort the list so that we know which one is which
Collections.sort(bothAgain);
Assert.assertEquals("Incorrect number of hosts reported", 2, bothAgain.size());
Assert.assertEquals("Unexpected host or host in unexpected position", "127.0.0.1:12345", bothAgain.get(0).getInfoAddr());
Assert.assertEquals("Unexpected host or host in unexpected position", "127.0.0.1:23456", bothAgain.get(1).getInfoAddr());
}
use of org.apache.hadoop.hdfs.server.namenode.FSNamesystem in project hadoop by apache.
the class BlockManagerTestUtil method noticeDeadDatanode.
/**
* Ensure that the given NameNode marks the specified DataNode as
* entirely dead/expired.
* @param nn the NameNode to manipulate
* @param dnName the name of the DataNode
*/
public static void noticeDeadDatanode(NameNode nn, String dnName) {
FSNamesystem namesystem = nn.getNamesystem();
namesystem.writeLock();
try {
DatanodeManager dnm = namesystem.getBlockManager().getDatanodeManager();
HeartbeatManager hbm = dnm.getHeartbeatManager();
DatanodeDescriptor[] dnds = hbm.getDatanodes();
DatanodeDescriptor theDND = null;
for (DatanodeDescriptor dnd : dnds) {
if (dnd.getXferAddr().equals(dnName)) {
theDND = dnd;
}
}
Assert.assertNotNull("Could not find DN with name: " + dnName, theDND);
synchronized (hbm) {
DFSTestUtil.setDatanodeDead(theDND);
hbm.heartbeatCheck();
}
} finally {
namesystem.writeUnlock();
}
}
use of org.apache.hadoop.hdfs.server.namenode.FSNamesystem in project hadoop by apache.
the class TestOverReplicatedBlocks method testInvalidateOverReplicatedBlock.
/**
* Test over replicated block should get invalidated when decreasing the
* replication for a partial block.
*/
@Test
public void testInvalidateOverReplicatedBlock() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
try {
final FSNamesystem namesystem = cluster.getNamesystem();
final BlockManager bm = namesystem.getBlockManager();
FileSystem fs = cluster.getFileSystem();
Path p = new Path(MiniDFSCluster.getBaseDirectory(), "/foo1");
FSDataOutputStream out = fs.create(p, (short) 2);
out.writeBytes("HDFS-3119: " + p);
out.hsync();
fs.setReplication(p, (short) 1);
out.close();
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, p);
assertEquals("Expected only one live replica for the block", 1, bm.countNodes(bm.getStoredBlock(block.getLocalBlock())).liveReplicas());
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.server.namenode.FSNamesystem in project hadoop by apache.
the class TestDataNodeUGIProvider method getWebHdfsFileSystem.
private WebHdfsFileSystem getWebHdfsFileSystem(UserGroupInformation ugi, Configuration conf, List<Token<DelegationTokenIdentifier>> tokens) throws IOException {
if (UserGroupInformation.isSecurityEnabled()) {
DelegationTokenIdentifier dtId = new DelegationTokenIdentifier(new Text(ugi.getUserName()), null, null);
FSNamesystem namesystem = mock(FSNamesystem.class);
DelegationTokenSecretManager dtSecretManager = new DelegationTokenSecretManager(86400000, 86400000, 86400000, 86400000, namesystem);
dtSecretManager.startThreads();
Token<DelegationTokenIdentifier> token1 = new Token<DelegationTokenIdentifier>(dtId, dtSecretManager);
Token<DelegationTokenIdentifier> token2 = new Token<DelegationTokenIdentifier>(dtId, dtSecretManager);
SecurityUtil.setTokenService(token1, NetUtils.createSocketAddr(uri.getAuthority()));
SecurityUtil.setTokenService(token2, NetUtils.createSocketAddr(uri.getAuthority()));
token1.setKind(WebHdfsConstants.WEBHDFS_TOKEN_KIND);
token2.setKind(WebHdfsConstants.WEBHDFS_TOKEN_KIND);
tokens.add(token1);
tokens.add(token2);
ugi.addToken(token1);
ugi.addToken(token2);
}
return (WebHdfsFileSystem) FileSystem.get(uri, conf);
}
Aggregations