Search in sources :

Example 26 with MBeanServer

use of javax.management.MBeanServer in project hadoop by apache.

the class TestNameNodeMXBean method testTopUsers.

@Test(timeout = 120000)
@SuppressWarnings("unchecked")
public void testTopUsers() throws Exception {
    final Configuration conf = new Configuration();
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
        cluster.waitActive();
        MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
        ObjectName mxbeanNameFsns = new ObjectName("Hadoop:service=NameNode,name=FSNamesystemState");
        FileSystem fs = cluster.getFileSystem();
        final Path path = new Path("/");
        final int NUM_OPS = 10;
        for (int i = 0; i < NUM_OPS; i++) {
            fs.listStatus(path);
            fs.setTimes(path, 0, 1);
        }
        String topUsers = (String) (mbs.getAttribute(mxbeanNameFsns, "TopUserOpCounts"));
        ObjectMapper mapper = new ObjectMapper();
        Map<String, Object> map = mapper.readValue(topUsers, Map.class);
        assertTrue("Could not find map key timestamp", map.containsKey("timestamp"));
        assertTrue("Could not find map key windows", map.containsKey("windows"));
        List<Map<String, List<Map<String, Object>>>> windows = (List<Map<String, List<Map<String, Object>>>>) map.get("windows");
        assertEquals("Unexpected num windows", 3, windows.size());
        for (Map<String, List<Map<String, Object>>> window : windows) {
            final List<Map<String, Object>> ops = window.get("ops");
            assertEquals("Unexpected num ops", 3, ops.size());
            for (Map<String, Object> op : ops) {
                final long count = Long.parseLong(op.get("totalCount").toString());
                final String opType = op.get("opType").toString();
                final int expected;
                if (opType.equals(TopConf.ALL_CMDS)) {
                    expected = 2 * NUM_OPS;
                } else {
                    expected = NUM_OPS;
                }
                assertEquals("Unexpected total count", expected, count);
            }
        }
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) ObjectName(javax.management.ObjectName) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) List(java.util.List) ArrayList(java.util.ArrayList) Map(java.util.Map) HashMap(java.util.HashMap) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) MBeanServer(javax.management.MBeanServer) Test(org.junit.Test)

Example 27 with MBeanServer

use of javax.management.MBeanServer in project hadoop by apache.

the class TestNameNodeMXBean method testQueueLength.

@Test(timeout = 120000)
public void testQueueLength() throws Exception {
    final Configuration conf = new Configuration();
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
        cluster.waitActive();
        MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
        ObjectName mxbeanNameFs = new ObjectName("Hadoop:service=NameNode,name=FSNamesystem");
        int queueLength = (int) mbs.getAttribute(mxbeanNameFs, "LockQueueLength");
        assertEquals(0, queueLength);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) MBeanServer(javax.management.MBeanServer) ObjectName(javax.management.ObjectName) Test(org.junit.Test)

Example 28 with MBeanServer

use of javax.management.MBeanServer in project hadoop by apache.

the class TestNameNodeMXBean method testTopUsersDisabled.

@Test(timeout = 120000)
public void testTopUsersDisabled() throws Exception {
    final Configuration conf = new Configuration();
    // Disable nntop
    conf.setBoolean(DFSConfigKeys.NNTOP_ENABLED_KEY, false);
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
        cluster.waitActive();
        MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
        ObjectName mxbeanNameFsns = new ObjectName("Hadoop:service=NameNode,name=FSNamesystemState");
        FileSystem fs = cluster.getFileSystem();
        final Path path = new Path("/");
        final int NUM_OPS = 10;
        for (int i = 0; i < NUM_OPS; i++) {
            fs.listStatus(path);
            fs.setTimes(path, 0, 1);
        }
        String topUsers = (String) (mbs.getAttribute(mxbeanNameFsns, "TopUserOpCounts"));
        assertNull("Did not expect to find TopUserOpCounts bean!", topUsers);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) MBeanServer(javax.management.MBeanServer) ObjectName(javax.management.ObjectName) Test(org.junit.Test)

Example 29 with MBeanServer

use of javax.management.MBeanServer in project hadoop by apache.

the class TestNameNodeMXBean method testDecommissioningNodes.

@Test(timeout = 120000)
public void testDecommissioningNodes() throws Exception {
    Configuration conf = new Configuration();
    conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 30);
    MiniDFSCluster cluster = null;
    HostsFileWriter hostsFileWriter = new HostsFileWriter();
    hostsFileWriter.initialize(conf, "temp/TestNameNodeMXBean");
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
        cluster.waitActive();
        FSNamesystem fsn = cluster.getNameNode().namesystem;
        MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
        ObjectName mxbeanName = new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo");
        List<String> hosts = new ArrayList<>();
        for (DataNode dn : cluster.getDataNodes()) {
            hosts.add(dn.getDisplayName());
        }
        hostsFileWriter.initIncludeHosts(hosts.toArray(new String[hosts.size()]));
        fsn.getBlockManager().getDatanodeManager().refreshNodes(conf);
        // 1. Verify Live nodes
        String liveNodesInfo = (String) (mbs.getAttribute(mxbeanName, "LiveNodes"));
        Map<String, Map<String, Object>> liveNodes = (Map<String, Map<String, Object>>) JSON.parse(liveNodesInfo);
        assertEquals(fsn.getLiveNodes(), liveNodesInfo);
        assertEquals(fsn.getNumLiveDataNodes(), liveNodes.size());
        for (Map<String, Object> liveNode : liveNodes.values()) {
            assertTrue(liveNode.containsKey("lastContact"));
            assertTrue(liveNode.containsKey("xferaddr"));
        }
        // Add the 1st DataNode to Decommission list
        hostsFileWriter.initExcludeHost(cluster.getDataNodes().get(0).getDisplayName());
        fsn.getBlockManager().getDatanodeManager().refreshNodes(conf);
        // Wait for the DecommissionManager to complete refresh nodes
        GenericTestUtils.waitFor(new Supplier<Boolean>() {

            @Override
            public Boolean get() {
                try {
                    String decomNodesInfo = (String) (mbs.getAttribute(mxbeanName, "DecomNodes"));
                    Map<String, Map<String, Object>> decomNodes = (Map<String, Map<String, Object>>) JSON.parse(decomNodesInfo);
                    if (decomNodes.size() > 0) {
                        return true;
                    }
                } catch (Exception e) {
                    return false;
                }
                return false;
            }
        }, 1000, 60000);
        // 2. Verify Decommission InProgress nodes
        String decomNodesInfo = (String) (mbs.getAttribute(mxbeanName, "DecomNodes"));
        Map<String, Map<String, Object>> decomNodes = (Map<String, Map<String, Object>>) JSON.parse(decomNodesInfo);
        assertEquals(fsn.getDecomNodes(), decomNodesInfo);
        assertEquals(fsn.getNumDecommissioningDataNodes(), decomNodes.size());
        assertEquals(0, fsn.getNumDecomLiveDataNodes());
        assertEquals(0, fsn.getNumDecomDeadDataNodes());
        // Wait for the DecommissionManager to complete check
        GenericTestUtils.waitFor(new Supplier<Boolean>() {

            @Override
            public Boolean get() {
                if (fsn.getNumDecomLiveDataNodes() == 1) {
                    return true;
                }
                return false;
            }
        }, 1000, 60000);
        // 3. Verify Decommissioned nodes
        decomNodesInfo = (String) (mbs.getAttribute(mxbeanName, "DecomNodes"));
        decomNodes = (Map<String, Map<String, Object>>) JSON.parse(decomNodesInfo);
        assertEquals(0, decomNodes.size());
        assertEquals(fsn.getDecomNodes(), decomNodesInfo);
        assertEquals(1, fsn.getNumDecomLiveDataNodes());
        assertEquals(0, fsn.getNumDecomDeadDataNodes());
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
        hostsFileWriter.cleanup();
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) HostsFileWriter(org.apache.hadoop.hdfs.util.HostsFileWriter) ArrayList(java.util.ArrayList) BindException(java.net.BindException) IOException(java.io.IOException) ObjectName(javax.management.ObjectName) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) Map(java.util.Map) HashMap(java.util.HashMap) MBeanServer(javax.management.MBeanServer) Test(org.junit.Test)

Example 30 with MBeanServer

use of javax.management.MBeanServer in project hadoop by apache.

the class TestHostsFiles method testHostsExcludeInUI.

@Test
public void testHostsExcludeInUI() throws Exception {
    Configuration conf = getConf();
    short REPLICATION_FACTOR = 2;
    final Path filePath = new Path("/testFile");
    HostsFileWriter hostsFileWriter = new HostsFileWriter();
    hostsFileWriter.initialize(conf, "temp/decommission");
    // Two blocks and four racks
    String[] racks = { "/rack1", "/rack1", "/rack2", "/rack2" };
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(racks.length).racks(racks).build();
    final FSNamesystem ns = cluster.getNameNode().getNamesystem();
    try {
        // Create a file with one block
        final FileSystem fs = cluster.getFileSystem();
        DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
        ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
        DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
        // Decommission one of the hosts with the block, this should cause 
        // the block to get replicated to another host on the same rack,
        // otherwise the rack policy is violated.
        BlockLocation[] locs = fs.getFileBlockLocations(fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
        String name = locs[0].getNames()[0];
        LOG.info("adding '" + name + "' to decommission");
        hostsFileWriter.initExcludeHost(name);
        ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
        DFSTestUtil.waitForDecommission(fs, name);
        // Check the block still has sufficient # replicas across racks
        DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
        MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
        ObjectName mxbeanName = new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo");
        String nodes = (String) mbs.getAttribute(mxbeanName, "LiveNodes");
        assertTrue("Live nodes should contain the decommissioned node", nodes.contains("Decommissioned"));
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
        hostsFileWriter.cleanup();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) HostsFileWriter(org.apache.hadoop.hdfs.util.HostsFileWriter) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) BlockLocation(org.apache.hadoop.fs.BlockLocation) ObjectName(javax.management.ObjectName) FileSystem(org.apache.hadoop.fs.FileSystem) MBeanServer(javax.management.MBeanServer) Test(org.junit.Test)

Aggregations

MBeanServer (javax.management.MBeanServer)1218 ObjectName (javax.management.ObjectName)939 Test (org.junit.Test)214 MalformedObjectNameException (javax.management.MalformedObjectNameException)123 MockEndpoint (org.apache.camel.component.mock.MockEndpoint)94 InstanceNotFoundException (javax.management.InstanceNotFoundException)87 IOException (java.io.IOException)82 JMXServiceURL (javax.management.remote.JMXServiceURL)70 Attribute (javax.management.Attribute)66 InstanceAlreadyExistsException (javax.management.InstanceAlreadyExistsException)65 HashMap (java.util.HashMap)63 MBeanRegistrationException (javax.management.MBeanRegistrationException)56 NotCompliantMBeanException (javax.management.NotCompliantMBeanException)54 TabularData (javax.management.openmbean.TabularData)51 ArrayList (java.util.ArrayList)47 JMXConnectorServer (javax.management.remote.JMXConnectorServer)47 JMXConnector (javax.management.remote.JMXConnector)40 Map (java.util.Map)38 JMException (javax.management.JMException)38 Test (org.junit.jupiter.api.Test)36