use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.
the class TestInterDatanodeProtocol method testUpdateReplicaUnderRecovery.
/**
* Test for
* {@link FsDatasetImpl#updateReplicaUnderRecovery(ExtendedBlock, long, long)}
* */
@Test
public void testUpdateReplicaUnderRecovery() throws IOException {
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
//create a file
DistributedFileSystem dfs = cluster.getFileSystem();
String filestr = "/foo";
Path filepath = new Path(filestr);
DFSTestUtil.createFile(dfs, filepath, 1024L, (short) 3, 0L);
//get block info
final LocatedBlock locatedblock = getLastLocatedBlock(DFSClientAdapter.getDFSClient(dfs).getNamenode(), filestr);
final DatanodeInfo[] datanodeinfo = locatedblock.getLocations();
Assert.assertTrue(datanodeinfo.length > 0);
//get DataNode and FSDataset objects
final DataNode datanode = cluster.getDataNode(datanodeinfo[0].getIpcPort());
Assert.assertTrue(datanode != null);
//initReplicaRecovery
final ExtendedBlock b = locatedblock.getBlock();
final long recoveryid = b.getGenerationStamp() + 1;
final long newlength = b.getNumBytes() - 1;
final FsDatasetSpi<?> fsdataset = DataNodeTestUtils.getFSDataset(datanode);
final ReplicaRecoveryInfo rri = fsdataset.initReplicaRecovery(new RecoveringBlock(b, null, recoveryid));
//check replica
final Replica replica = cluster.getFsDatasetTestUtils(datanode).fetchReplica(b);
Assert.assertEquals(ReplicaState.RUR, replica.getState());
//check meta data before update
cluster.getFsDatasetTestUtils(datanode).checkStoredReplica(replica);
//case "THIS IS NOT SUPPOSED TO HAPPEN"
//with (block length) != (stored replica's on disk length).
{
//create a block with same id and gs but different length.
final ExtendedBlock tmp = new ExtendedBlock(b.getBlockPoolId(), rri.getBlockId(), rri.getNumBytes() - 1, rri.getGenerationStamp());
try {
//update should fail
fsdataset.updateReplicaUnderRecovery(tmp, recoveryid, tmp.getBlockId(), newlength);
Assert.fail();
} catch (IOException ioe) {
System.out.println("GOOD: getting " + ioe);
}
}
//update
final Replica r = fsdataset.updateReplicaUnderRecovery(new ExtendedBlock(b.getBlockPoolId(), rri), recoveryid, rri.getBlockId(), newlength);
assertTrue(r != null);
assertTrue(r.getStorageUuid() != null);
} finally {
if (cluster != null)
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.
the class TestDiskBalancerCommand method testDiskBalancerQueryWithoutSubmit.
/**
* Making sure that we can query the node without having done a submit.
* @throws Exception
*/
@Test
public void testDiskBalancerQueryWithoutSubmit() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
final int numDatanodes = 2;
MiniDFSCluster miniDFSCluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
try {
miniDFSCluster.waitActive();
DataNode dataNode = miniDFSCluster.getDataNodes().get(0);
final String queryArg = String.format("-query localhost:%d", dataNode.getIpcPort());
final String cmdLine = String.format("hdfs diskbalancer %s", queryArg);
runCommand(cmdLine);
} finally {
miniDFSCluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.
the class TestNameNodeMXBean method testNameNodeMXBeanInfo.
@SuppressWarnings({ "unchecked" })
@Test
public void testNameNodeMXBeanInfo() throws Exception {
Configuration conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, NativeIO.POSIX.getCacheManipulator().getMemlockLimit());
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
cluster.waitActive();
// Set upgrade domain on the first DN.
String upgradeDomain = "abcd";
DatanodeManager dm = cluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager();
DatanodeDescriptor dd = dm.getDatanode(cluster.getDataNodes().get(0).getDatanodeId());
dd.setUpgradeDomain(upgradeDomain);
String dnXferAddrWithUpgradeDomainSet = dd.getXferAddr();
// Put the second DN to maintenance state.
DatanodeDescriptor maintenanceNode = dm.getDatanode(cluster.getDataNodes().get(1).getDatanodeId());
maintenanceNode.setInMaintenance();
String dnXferAddrInMaintenance = maintenanceNode.getXferAddr();
FSNamesystem fsn = cluster.getNameNode().namesystem;
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName = new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo");
// get attribute "ClusterId"
String clusterId = (String) mbs.getAttribute(mxbeanName, "ClusterId");
assertEquals(fsn.getClusterId(), clusterId);
// get attribute "BlockPoolId"
String blockpoolId = (String) mbs.getAttribute(mxbeanName, "BlockPoolId");
assertEquals(fsn.getBlockPoolId(), blockpoolId);
// get attribute "Version"
String version = (String) mbs.getAttribute(mxbeanName, "Version");
assertEquals(fsn.getVersion(), version);
assertTrue(version.equals(VersionInfo.getVersion() + ", r" + VersionInfo.getRevision()));
// get attribute "Used"
Long used = (Long) mbs.getAttribute(mxbeanName, "Used");
assertEquals(fsn.getUsed(), used.longValue());
// get attribute "Total"
Long total = (Long) mbs.getAttribute(mxbeanName, "Total");
assertEquals(fsn.getTotal(), total.longValue());
// get attribute "safemode"
String safemode = (String) mbs.getAttribute(mxbeanName, "Safemode");
assertEquals(fsn.getSafemode(), safemode);
// get attribute nondfs
Long nondfs = (Long) (mbs.getAttribute(mxbeanName, "NonDfsUsedSpace"));
assertEquals(fsn.getNonDfsUsedSpace(), nondfs.longValue());
// get attribute percentremaining
Float percentremaining = (Float) (mbs.getAttribute(mxbeanName, "PercentRemaining"));
assertEquals(fsn.getPercentRemaining(), percentremaining, DELTA);
// get attribute Totalblocks
Long totalblocks = (Long) (mbs.getAttribute(mxbeanName, "TotalBlocks"));
assertEquals(fsn.getTotalBlocks(), totalblocks.longValue());
// get attribute alivenodeinfo
String alivenodeinfo = (String) (mbs.getAttribute(mxbeanName, "LiveNodes"));
Map<String, Map<String, Object>> liveNodes = (Map<String, Map<String, Object>>) JSON.parse(alivenodeinfo);
assertTrue(liveNodes.size() == 2);
for (Map<String, Object> liveNode : liveNodes.values()) {
assertTrue(liveNode.containsKey("nonDfsUsedSpace"));
assertTrue(((Long) liveNode.get("nonDfsUsedSpace")) >= 0);
assertTrue(liveNode.containsKey("capacity"));
assertTrue(((Long) liveNode.get("capacity")) > 0);
assertTrue(liveNode.containsKey("numBlocks"));
assertTrue(((Long) liveNode.get("numBlocks")) == 0);
assertTrue(liveNode.containsKey("lastBlockReport"));
// a. By default the upgrade domain isn't defined on any DN.
// b. If the upgrade domain is set on a DN, JMX should have the same
// value.
String xferAddr = (String) liveNode.get("xferaddr");
if (!xferAddr.equals(dnXferAddrWithUpgradeDomainSet)) {
assertTrue(!liveNode.containsKey("upgradeDomain"));
} else {
assertTrue(liveNode.get("upgradeDomain").equals(upgradeDomain));
}
// "adminState" is set to maintenance only for the specific dn.
boolean inMaintenance = liveNode.get("adminState").equals(DatanodeInfo.AdminStates.IN_MAINTENANCE.toString());
assertFalse(xferAddr.equals(dnXferAddrInMaintenance) ^ inMaintenance);
}
assertEquals(fsn.getLiveNodes(), alivenodeinfo);
// get attributes DeadNodes
String deadNodeInfo = (String) (mbs.getAttribute(mxbeanName, "DeadNodes"));
assertEquals(fsn.getDeadNodes(), deadNodeInfo);
// get attribute NodeUsage
String nodeUsage = (String) (mbs.getAttribute(mxbeanName, "NodeUsage"));
assertEquals("Bad value for NodeUsage", fsn.getNodeUsage(), nodeUsage);
// get attribute NameJournalStatus
String nameJournalStatus = (String) (mbs.getAttribute(mxbeanName, "NameJournalStatus"));
assertEquals("Bad value for NameJournalStatus", fsn.getNameJournalStatus(), nameJournalStatus);
// get attribute JournalTransactionInfo
String journalTxnInfo = (String) mbs.getAttribute(mxbeanName, "JournalTransactionInfo");
assertEquals("Bad value for NameTxnIds", fsn.getJournalTransactionInfo(), journalTxnInfo);
// get attribute "CompileInfo"
String compileInfo = (String) mbs.getAttribute(mxbeanName, "CompileInfo");
assertEquals("Bad value for CompileInfo", fsn.getCompileInfo(), compileInfo);
// get attribute CorruptFiles
String corruptFiles = (String) (mbs.getAttribute(mxbeanName, "CorruptFiles"));
assertEquals("Bad value for CorruptFiles", fsn.getCorruptFiles(), corruptFiles);
// get attribute NameDirStatuses
String nameDirStatuses = (String) (mbs.getAttribute(mxbeanName, "NameDirStatuses"));
assertEquals(fsn.getNameDirStatuses(), nameDirStatuses);
Map<String, Map<String, String>> statusMap = (Map<String, Map<String, String>>) JSON.parse(nameDirStatuses);
Collection<URI> nameDirUris = cluster.getNameDirs(0);
for (URI nameDirUri : nameDirUris) {
File nameDir = new File(nameDirUri);
System.out.println("Checking for the presence of " + nameDir + " in active name dirs.");
assertTrue(statusMap.get("active").containsKey(nameDir.getAbsolutePath()));
}
assertEquals(2, statusMap.get("active").size());
assertEquals(0, statusMap.get("failed").size());
// This will cause the first dir to fail.
File failedNameDir = new File(nameDirUris.iterator().next());
assertEquals(0, FileUtil.chmod(new File(failedNameDir, "current").getAbsolutePath(), "000"));
cluster.getNameNodeRpc().rollEditLog();
nameDirStatuses = (String) (mbs.getAttribute(mxbeanName, "NameDirStatuses"));
statusMap = (Map<String, Map<String, String>>) JSON.parse(nameDirStatuses);
for (URI nameDirUri : nameDirUris) {
File nameDir = new File(nameDirUri);
String expectedStatus = nameDir.equals(failedNameDir) ? "failed" : "active";
System.out.println("Checking for the presence of " + nameDir + " in " + expectedStatus + " name dirs.");
assertTrue(statusMap.get(expectedStatus).containsKey(nameDir.getAbsolutePath()));
}
assertEquals(1, statusMap.get("active").size());
assertEquals(1, statusMap.get("failed").size());
assertEquals(0L, mbs.getAttribute(mxbeanName, "CacheUsed"));
assertEquals(NativeIO.POSIX.getCacheManipulator().getMemlockLimit() * cluster.getDataNodes().size(), mbs.getAttribute(mxbeanName, "CacheCapacity"));
assertNull("RollingUpgradeInfo should be null when there is no rolling" + " upgrade", mbs.getAttribute(mxbeanName, "RollingUpgradeStatus"));
} finally {
if (cluster != null) {
for (URI dir : cluster.getNameDirs(0)) {
FileUtil.chmod(new File(new File(dir), "current").getAbsolutePath(), "755");
}
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.
the class TestNameNodeMXBean method testTopUsers.
@Test(timeout = 120000)
@SuppressWarnings("unchecked")
public void testTopUsers() throws Exception {
final Configuration conf = new Configuration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanNameFsns = new ObjectName("Hadoop:service=NameNode,name=FSNamesystemState");
FileSystem fs = cluster.getFileSystem();
final Path path = new Path("/");
final int NUM_OPS = 10;
for (int i = 0; i < NUM_OPS; i++) {
fs.listStatus(path);
fs.setTimes(path, 0, 1);
}
String topUsers = (String) (mbs.getAttribute(mxbeanNameFsns, "TopUserOpCounts"));
ObjectMapper mapper = new ObjectMapper();
Map<String, Object> map = mapper.readValue(topUsers, Map.class);
assertTrue("Could not find map key timestamp", map.containsKey("timestamp"));
assertTrue("Could not find map key windows", map.containsKey("windows"));
List<Map<String, List<Map<String, Object>>>> windows = (List<Map<String, List<Map<String, Object>>>>) map.get("windows");
assertEquals("Unexpected num windows", 3, windows.size());
for (Map<String, List<Map<String, Object>>> window : windows) {
final List<Map<String, Object>> ops = window.get("ops");
assertEquals("Unexpected num ops", 3, ops.size());
for (Map<String, Object> op : ops) {
final long count = Long.parseLong(op.get("totalCount").toString());
final String opType = op.get("opType").toString();
final int expected;
if (opType.equals(TopConf.ALL_CMDS)) {
expected = 2 * NUM_OPS;
} else {
expected = NUM_OPS;
}
assertEquals("Unexpected total count", expected, count);
}
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.
the class TestNameNodeMXBean method testQueueLength.
@Test(timeout = 120000)
public void testQueueLength() throws Exception {
final Configuration conf = new Configuration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanNameFs = new ObjectName("Hadoop:service=NameNode,name=FSNamesystem");
int queueLength = (int) mbs.getAttribute(mxbeanNameFs, "LockQueueLength");
assertEquals(0, queueLength);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
Aggregations