use of javax.management.MBeanServer in project hadoop by apache.
the class TestNameNodeMXBean method testTopUsers.
@Test(timeout = 120000)
@SuppressWarnings("unchecked")
public void testTopUsers() throws Exception {
final Configuration conf = new Configuration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanNameFsns = new ObjectName("Hadoop:service=NameNode,name=FSNamesystemState");
FileSystem fs = cluster.getFileSystem();
final Path path = new Path("/");
final int NUM_OPS = 10;
for (int i = 0; i < NUM_OPS; i++) {
fs.listStatus(path);
fs.setTimes(path, 0, 1);
}
String topUsers = (String) (mbs.getAttribute(mxbeanNameFsns, "TopUserOpCounts"));
ObjectMapper mapper = new ObjectMapper();
Map<String, Object> map = mapper.readValue(topUsers, Map.class);
assertTrue("Could not find map key timestamp", map.containsKey("timestamp"));
assertTrue("Could not find map key windows", map.containsKey("windows"));
List<Map<String, List<Map<String, Object>>>> windows = (List<Map<String, List<Map<String, Object>>>>) map.get("windows");
assertEquals("Unexpected num windows", 3, windows.size());
for (Map<String, List<Map<String, Object>>> window : windows) {
final List<Map<String, Object>> ops = window.get("ops");
assertEquals("Unexpected num ops", 3, ops.size());
for (Map<String, Object> op : ops) {
final long count = Long.parseLong(op.get("totalCount").toString());
final String opType = op.get("opType").toString();
final int expected;
if (opType.equals(TopConf.ALL_CMDS)) {
expected = 2 * NUM_OPS;
} else {
expected = NUM_OPS;
}
assertEquals("Unexpected total count", expected, count);
}
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of javax.management.MBeanServer in project hadoop by apache.
the class TestNameNodeMXBean method testQueueLength.
@Test(timeout = 120000)
public void testQueueLength() throws Exception {
final Configuration conf = new Configuration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanNameFs = new ObjectName("Hadoop:service=NameNode,name=FSNamesystem");
int queueLength = (int) mbs.getAttribute(mxbeanNameFs, "LockQueueLength");
assertEquals(0, queueLength);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of javax.management.MBeanServer in project hadoop by apache.
the class TestNameNodeMXBean method testTopUsersDisabled.
@Test(timeout = 120000)
public void testTopUsersDisabled() throws Exception {
final Configuration conf = new Configuration();
// Disable nntop
conf.setBoolean(DFSConfigKeys.NNTOP_ENABLED_KEY, false);
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanNameFsns = new ObjectName("Hadoop:service=NameNode,name=FSNamesystemState");
FileSystem fs = cluster.getFileSystem();
final Path path = new Path("/");
final int NUM_OPS = 10;
for (int i = 0; i < NUM_OPS; i++) {
fs.listStatus(path);
fs.setTimes(path, 0, 1);
}
String topUsers = (String) (mbs.getAttribute(mxbeanNameFsns, "TopUserOpCounts"));
assertNull("Did not expect to find TopUserOpCounts bean!", topUsers);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of javax.management.MBeanServer in project hadoop by apache.
the class TestNameNodeMXBean method testDecommissioningNodes.
@Test(timeout = 120000)
public void testDecommissioningNodes() throws Exception {
Configuration conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 30);
MiniDFSCluster cluster = null;
HostsFileWriter hostsFileWriter = new HostsFileWriter();
hostsFileWriter.initialize(conf, "temp/TestNameNodeMXBean");
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
FSNamesystem fsn = cluster.getNameNode().namesystem;
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName = new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo");
List<String> hosts = new ArrayList<>();
for (DataNode dn : cluster.getDataNodes()) {
hosts.add(dn.getDisplayName());
}
hostsFileWriter.initIncludeHosts(hosts.toArray(new String[hosts.size()]));
fsn.getBlockManager().getDatanodeManager().refreshNodes(conf);
// 1. Verify Live nodes
String liveNodesInfo = (String) (mbs.getAttribute(mxbeanName, "LiveNodes"));
Map<String, Map<String, Object>> liveNodes = (Map<String, Map<String, Object>>) JSON.parse(liveNodesInfo);
assertEquals(fsn.getLiveNodes(), liveNodesInfo);
assertEquals(fsn.getNumLiveDataNodes(), liveNodes.size());
for (Map<String, Object> liveNode : liveNodes.values()) {
assertTrue(liveNode.containsKey("lastContact"));
assertTrue(liveNode.containsKey("xferaddr"));
}
// Add the 1st DataNode to Decommission list
hostsFileWriter.initExcludeHost(cluster.getDataNodes().get(0).getDisplayName());
fsn.getBlockManager().getDatanodeManager().refreshNodes(conf);
// Wait for the DecommissionManager to complete refresh nodes
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
try {
String decomNodesInfo = (String) (mbs.getAttribute(mxbeanName, "DecomNodes"));
Map<String, Map<String, Object>> decomNodes = (Map<String, Map<String, Object>>) JSON.parse(decomNodesInfo);
if (decomNodes.size() > 0) {
return true;
}
} catch (Exception e) {
return false;
}
return false;
}
}, 1000, 60000);
// 2. Verify Decommission InProgress nodes
String decomNodesInfo = (String) (mbs.getAttribute(mxbeanName, "DecomNodes"));
Map<String, Map<String, Object>> decomNodes = (Map<String, Map<String, Object>>) JSON.parse(decomNodesInfo);
assertEquals(fsn.getDecomNodes(), decomNodesInfo);
assertEquals(fsn.getNumDecommissioningDataNodes(), decomNodes.size());
assertEquals(0, fsn.getNumDecomLiveDataNodes());
assertEquals(0, fsn.getNumDecomDeadDataNodes());
// Wait for the DecommissionManager to complete check
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
if (fsn.getNumDecomLiveDataNodes() == 1) {
return true;
}
return false;
}
}, 1000, 60000);
// 3. Verify Decommissioned nodes
decomNodesInfo = (String) (mbs.getAttribute(mxbeanName, "DecomNodes"));
decomNodes = (Map<String, Map<String, Object>>) JSON.parse(decomNodesInfo);
assertEquals(0, decomNodes.size());
assertEquals(fsn.getDecomNodes(), decomNodesInfo);
assertEquals(1, fsn.getNumDecomLiveDataNodes());
assertEquals(0, fsn.getNumDecomDeadDataNodes());
} finally {
if (cluster != null) {
cluster.shutdown();
}
hostsFileWriter.cleanup();
}
}
use of javax.management.MBeanServer in project hadoop by apache.
the class TestHostsFiles method testHostsExcludeInUI.
@Test
public void testHostsExcludeInUI() throws Exception {
Configuration conf = getConf();
short REPLICATION_FACTOR = 2;
final Path filePath = new Path("/testFile");
HostsFileWriter hostsFileWriter = new HostsFileWriter();
hostsFileWriter.initialize(conf, "temp/decommission");
// Two blocks and four racks
String[] racks = { "/rack1", "/rack1", "/rack2", "/rack2" };
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(racks.length).racks(racks).build();
final FSNamesystem ns = cluster.getNameNode().getNamesystem();
try {
// Create a file with one block
final FileSystem fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
// Decommission one of the hosts with the block, this should cause
// the block to get replicated to another host on the same rack,
// otherwise the rack policy is violated.
BlockLocation[] locs = fs.getFileBlockLocations(fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
String name = locs[0].getNames()[0];
LOG.info("adding '" + name + "' to decommission");
hostsFileWriter.initExcludeHost(name);
ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
DFSTestUtil.waitForDecommission(fs, name);
// Check the block still has sufficient # replicas across racks
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName = new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo");
String nodes = (String) mbs.getAttribute(mxbeanName, "LiveNodes");
assertTrue("Live nodes should contain the decommissioned node", nodes.contains("Decommissioned"));
} finally {
if (cluster != null) {
cluster.shutdown();
}
hostsFileWriter.cleanup();
}
}
Aggregations