Search in sources :

Example 11 with HostsFileWriter

use of org.apache.hadoop.hdfs.util.HostsFileWriter in project hadoop by apache.

the class TestNameNodeMXBean method testLastContactTime.

@SuppressWarnings({ "unchecked" })
@Test
public void testLastContactTime() throws Exception {
    Configuration conf = new Configuration();
    conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1);
    MiniDFSCluster cluster = null;
    HostsFileWriter hostsFileWriter = new HostsFileWriter();
    hostsFileWriter.initialize(conf, "temp/TestNameNodeMXBean");
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
        cluster.waitActive();
        FSNamesystem fsn = cluster.getNameNode().namesystem;
        MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
        ObjectName mxbeanName = new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo");
        List<String> hosts = new ArrayList<>();
        for (DataNode dn : cluster.getDataNodes()) {
            hosts.add(dn.getDisplayName());
        }
        hostsFileWriter.initIncludeHosts(hosts.toArray(new String[hosts.size()]));
        fsn.getBlockManager().getDatanodeManager().refreshNodes(conf);
        cluster.stopDataNode(0);
        while (fsn.getBlockManager().getDatanodeManager().getNumLiveDataNodes() != 2) {
            Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS);
        }
        // get attribute DeadNodes
        String deadNodeInfo = (String) (mbs.getAttribute(mxbeanName, "DeadNodes"));
        assertEquals(fsn.getDeadNodes(), deadNodeInfo);
        Map<String, Map<String, Object>> deadNodes = (Map<String, Map<String, Object>>) JSON.parse(deadNodeInfo);
        assertTrue(deadNodes.size() > 0);
        for (Map<String, Object> deadNode : deadNodes.values()) {
            assertTrue(deadNode.containsKey("lastContact"));
            assertTrue(deadNode.containsKey("adminState"));
            assertTrue(deadNode.containsKey("xferaddr"));
        }
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
        hostsFileWriter.cleanup();
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) HostsFileWriter(org.apache.hadoop.hdfs.util.HostsFileWriter) ArrayList(java.util.ArrayList) ObjectName(javax.management.ObjectName) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) Map(java.util.Map) HashMap(java.util.HashMap) MBeanServer(javax.management.MBeanServer) Test(org.junit.Test)

Example 12 with HostsFileWriter

use of org.apache.hadoop.hdfs.util.HostsFileWriter in project hadoop by apache.

the class TestNameNodeMXBean method testMaintenanceNodes.

@Test(timeout = 120000)
public void testMaintenanceNodes() throws Exception {
    LOG.info("Starting testMaintenanceNodes");
    int expirationInMs = 30 * 1000;
    Configuration conf = new Configuration();
    conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, expirationInMs);
    conf.setClass(DFSConfigKeys.DFS_NAMENODE_HOSTS_PROVIDER_CLASSNAME_KEY, CombinedHostFileManager.class, HostConfigManager.class);
    MiniDFSCluster cluster = null;
    HostsFileWriter hostsFileWriter = new HostsFileWriter();
    hostsFileWriter.initialize(conf, "temp/TestNameNodeMXBean");
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
        cluster.waitActive();
        FSNamesystem fsn = cluster.getNameNode().namesystem;
        MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
        ObjectName mxbeanName = new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo");
        List<String> hosts = new ArrayList<>();
        for (DataNode dn : cluster.getDataNodes()) {
            hosts.add(dn.getDisplayName());
        }
        hostsFileWriter.initIncludeHosts(hosts.toArray(new String[hosts.size()]));
        fsn.getBlockManager().getDatanodeManager().refreshNodes(conf);
        // 1. Verify nodes for DatanodeReportType.LIVE state
        String liveNodesInfo = (String) (mbs.getAttribute(mxbeanName, "LiveNodes"));
        LOG.info("Live Nodes: " + liveNodesInfo);
        Map<String, Map<String, Object>> liveNodes = (Map<String, Map<String, Object>>) JSON.parse(liveNodesInfo);
        assertEquals(fsn.getLiveNodes(), liveNodesInfo);
        assertEquals(fsn.getNumLiveDataNodes(), liveNodes.size());
        for (Map<String, Object> liveNode : liveNodes.values()) {
            assertTrue(liveNode.containsKey("lastContact"));
            assertTrue(liveNode.containsKey("xferaddr"));
        }
        // Add the 1st DataNode to Maintenance list
        Map<String, Long> maintenanceNodes = new HashMap<>();
        maintenanceNodes.put(cluster.getDataNodes().get(0).getDisplayName(), Time.now() + expirationInMs);
        hostsFileWriter.initOutOfServiceHosts(null, maintenanceNodes);
        fsn.getBlockManager().getDatanodeManager().refreshNodes(conf);
        boolean recheck = true;
        while (recheck) {
            // 2. Verify nodes for DatanodeReportType.ENTERING_MAINTENANCE state
            String enteringMaintenanceNodesInfo = (String) (mbs.getAttribute(mxbeanName, "EnteringMaintenanceNodes"));
            Map<String, Map<String, Object>> enteringMaintenanceNodes = (Map<String, Map<String, Object>>) JSON.parse(enteringMaintenanceNodesInfo);
            if (enteringMaintenanceNodes.size() <= 0) {
                LOG.info("Waiting for a node to Enter Maintenance state!");
                Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS);
                continue;
            }
            LOG.info("Nodes entering Maintenance: " + enteringMaintenanceNodesInfo);
            recheck = false;
            assertEquals(fsn.getEnteringMaintenanceNodes(), enteringMaintenanceNodesInfo);
            assertEquals(fsn.getNumEnteringMaintenanceDataNodes(), enteringMaintenanceNodes.size());
            assertEquals(0, fsn.getNumInMaintenanceLiveDataNodes());
            assertEquals(0, fsn.getNumInMaintenanceDeadDataNodes());
        }
        // and perform state transition
        while (fsn.getNumInMaintenanceLiveDataNodes() != 1) {
            Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS);
        }
        // 3. Verify nodes for AdminStates.IN_MAINTENANCE state
        String enteringMaintenanceNodesInfo = (String) (mbs.getAttribute(mxbeanName, "EnteringMaintenanceNodes"));
        Map<String, Map<String, Object>> enteringMaintenanceNodes = (Map<String, Map<String, Object>>) JSON.parse(enteringMaintenanceNodesInfo);
        assertEquals(0, enteringMaintenanceNodes.size());
        assertEquals(fsn.getEnteringMaintenanceNodes(), enteringMaintenanceNodesInfo);
        assertEquals(1, fsn.getNumInMaintenanceLiveDataNodes());
        assertEquals(0, fsn.getNumInMaintenanceDeadDataNodes());
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
        hostsFileWriter.cleanup();
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) HostsFileWriter(org.apache.hadoop.hdfs.util.HostsFileWriter) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ObjectName(javax.management.ObjectName) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) Map(java.util.Map) HashMap(java.util.HashMap) MBeanServer(javax.management.MBeanServer) Test(org.junit.Test)

Example 13 with HostsFileWriter

use of org.apache.hadoop.hdfs.util.HostsFileWriter in project hadoop by apache.

the class TestGetConf method TestGetConfExcludeCommand.

@Test
public void TestGetConfExcludeCommand() throws Exception {
    HdfsConfiguration conf = new HdfsConfiguration();
    // Set up the hosts/exclude files.
    HostsFileWriter hostsFileWriter = new HostsFileWriter();
    hostsFileWriter.initialize(conf, "GetConf");
    Path excludeFile = hostsFileWriter.getExcludeFile();
    String[] args = { "-excludeFile" };
    String ret = runTool(conf, args, true);
    assertEquals(excludeFile.toUri().getPath(), ret.trim());
    hostsFileWriter.cleanup();
}
Also used : Path(org.apache.hadoop.fs.Path) HostsFileWriter(org.apache.hadoop.hdfs.util.HostsFileWriter) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Test(org.junit.Test)

Example 14 with HostsFileWriter

use of org.apache.hadoop.hdfs.util.HostsFileWriter in project hadoop by apache.

the class TestGetConf method TestGetConfIncludeCommand.

@Test
public void TestGetConfIncludeCommand() throws Exception {
    HdfsConfiguration conf = new HdfsConfiguration();
    // Set up the hosts/exclude files.
    HostsFileWriter hostsFileWriter = new HostsFileWriter();
    hostsFileWriter.initialize(conf, "GetConf");
    Path hostsFile = hostsFileWriter.getIncludeFile();
    // Setup conf
    String[] args = { "-includeFile" };
    String ret = runTool(conf, args, true);
    assertEquals(hostsFile.toUri().getPath(), ret.trim());
    hostsFileWriter.cleanup();
}
Also used : Path(org.apache.hadoop.fs.Path) HostsFileWriter(org.apache.hadoop.hdfs.util.HostsFileWriter) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Test(org.junit.Test)

Aggregations

HostsFileWriter (org.apache.hadoop.hdfs.util.HostsFileWriter)14 Test (org.junit.Test)11 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)10 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)10 Configuration (org.apache.hadoop.conf.Configuration)7 Path (org.apache.hadoop.fs.Path)6 MBeanServer (javax.management.MBeanServer)5 ObjectName (javax.management.ObjectName)5 ArrayList (java.util.ArrayList)3 HashMap (java.util.HashMap)3 Map (java.util.Map)3 BlockLocation (org.apache.hadoop.fs.BlockLocation)3 FileSystem (org.apache.hadoop.fs.FileSystem)3 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)3 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)3 IOException (java.io.IOException)2 DatanodeAdminProperties (org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties)2 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)2 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)2 Before (org.junit.Before)2