Search in sources :

Example 6 with HostsFileWriter

use of org.apache.hadoop.hdfs.util.HostsFileWriter in project hadoop by apache.

the class TestFsck method testUpgradeDomain.

private void testUpgradeDomain(boolean defineUpgradeDomain, boolean displayUpgradeDomain) throws Exception {
    final short replFactor = 1;
    final short numDN = 1;
    final long blockSize = 512;
    final long fileSize = 1024;
    final String upgradeDomain = "ud1";
    final String[] racks = { "/rack1" };
    final String[] hosts = { "127.0.0.1" };
    HostsFileWriter hostsFileWriter = new HostsFileWriter();
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, replFactor);
    if (defineUpgradeDomain) {
        conf.setClass(DFSConfigKeys.DFS_NAMENODE_HOSTS_PROVIDER_CLASSNAME_KEY, CombinedHostFileManager.class, HostConfigManager.class);
        hostsFileWriter.initialize(conf, "temp/fsckupgradedomain");
    }
    DistributedFileSystem dfs;
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDN).hosts(hosts).racks(racks).build();
    cluster.waitClusterUp();
    dfs = cluster.getFileSystem();
    // Configure the upgrade domain on the datanode
    if (defineUpgradeDomain) {
        DatanodeAdminProperties dnProp = new DatanodeAdminProperties();
        DatanodeID datanodeID = cluster.getDataNodes().get(0).getDatanodeId();
        dnProp.setHostName(datanodeID.getHostName());
        dnProp.setPort(datanodeID.getXferPort());
        dnProp.setUpgradeDomain(upgradeDomain);
        hostsFileWriter.initIncludeHosts(new DatanodeAdminProperties[] { dnProp });
        cluster.getFileSystem().refreshNodes();
    }
    // create files
    final String testFile = new String("/testfile");
    final Path path = new Path(testFile);
    DFSTestUtil.createFile(dfs, path, fileSize, replFactor, 1000L);
    DFSTestUtil.waitReplication(dfs, path, replFactor);
    try {
        String fsckOut = runFsck(conf, 0, true, testFile, "-files", "-blocks", displayUpgradeDomain ? "-upgradedomains" : "-locations");
        assertTrue(fsckOut.contains(NamenodeFsck.HEALTHY_STATUS));
        String udValue = defineUpgradeDomain ? upgradeDomain : NamenodeFsck.UNDEFINED;
        assertEquals(displayUpgradeDomain, fsckOut.contains("(ud=" + udValue + ")"));
    } finally {
        if (defineUpgradeDomain) {
            hostsFileWriter.cleanup();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HostsFileWriter(org.apache.hadoop.hdfs.util.HostsFileWriter) Matchers.anyString(org.mockito.Matchers.anyString) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) DatanodeAdminProperties(org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties)

Example 7 with HostsFileWriter

use of org.apache.hadoop.hdfs.util.HostsFileWriter in project hadoop by apache.

the class TestNameNodeMXBean method testDecommissioningNodes.

@Test(timeout = 120000)
public void testDecommissioningNodes() throws Exception {
    Configuration conf = new Configuration();
    conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 30);
    MiniDFSCluster cluster = null;
    HostsFileWriter hostsFileWriter = new HostsFileWriter();
    hostsFileWriter.initialize(conf, "temp/TestNameNodeMXBean");
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
        cluster.waitActive();
        FSNamesystem fsn = cluster.getNameNode().namesystem;
        MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
        ObjectName mxbeanName = new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo");
        List<String> hosts = new ArrayList<>();
        for (DataNode dn : cluster.getDataNodes()) {
            hosts.add(dn.getDisplayName());
        }
        hostsFileWriter.initIncludeHosts(hosts.toArray(new String[hosts.size()]));
        fsn.getBlockManager().getDatanodeManager().refreshNodes(conf);
        // 1. Verify Live nodes
        String liveNodesInfo = (String) (mbs.getAttribute(mxbeanName, "LiveNodes"));
        Map<String, Map<String, Object>> liveNodes = (Map<String, Map<String, Object>>) JSON.parse(liveNodesInfo);
        assertEquals(fsn.getLiveNodes(), liveNodesInfo);
        assertEquals(fsn.getNumLiveDataNodes(), liveNodes.size());
        for (Map<String, Object> liveNode : liveNodes.values()) {
            assertTrue(liveNode.containsKey("lastContact"));
            assertTrue(liveNode.containsKey("xferaddr"));
        }
        // Add the 1st DataNode to Decommission list
        hostsFileWriter.initExcludeHost(cluster.getDataNodes().get(0).getDisplayName());
        fsn.getBlockManager().getDatanodeManager().refreshNodes(conf);
        // Wait for the DecommissionManager to complete refresh nodes
        GenericTestUtils.waitFor(new Supplier<Boolean>() {

            @Override
            public Boolean get() {
                try {
                    String decomNodesInfo = (String) (mbs.getAttribute(mxbeanName, "DecomNodes"));
                    Map<String, Map<String, Object>> decomNodes = (Map<String, Map<String, Object>>) JSON.parse(decomNodesInfo);
                    if (decomNodes.size() > 0) {
                        return true;
                    }
                } catch (Exception e) {
                    return false;
                }
                return false;
            }
        }, 1000, 60000);
        // 2. Verify Decommission InProgress nodes
        String decomNodesInfo = (String) (mbs.getAttribute(mxbeanName, "DecomNodes"));
        Map<String, Map<String, Object>> decomNodes = (Map<String, Map<String, Object>>) JSON.parse(decomNodesInfo);
        assertEquals(fsn.getDecomNodes(), decomNodesInfo);
        assertEquals(fsn.getNumDecommissioningDataNodes(), decomNodes.size());
        assertEquals(0, fsn.getNumDecomLiveDataNodes());
        assertEquals(0, fsn.getNumDecomDeadDataNodes());
        // Wait for the DecommissionManager to complete check
        GenericTestUtils.waitFor(new Supplier<Boolean>() {

            @Override
            public Boolean get() {
                if (fsn.getNumDecomLiveDataNodes() == 1) {
                    return true;
                }
                return false;
            }
        }, 1000, 60000);
        // 3. Verify Decommissioned nodes
        decomNodesInfo = (String) (mbs.getAttribute(mxbeanName, "DecomNodes"));
        decomNodes = (Map<String, Map<String, Object>>) JSON.parse(decomNodesInfo);
        assertEquals(0, decomNodes.size());
        assertEquals(fsn.getDecomNodes(), decomNodesInfo);
        assertEquals(1, fsn.getNumDecomLiveDataNodes());
        assertEquals(0, fsn.getNumDecomDeadDataNodes());
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
        hostsFileWriter.cleanup();
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) HostsFileWriter(org.apache.hadoop.hdfs.util.HostsFileWriter) ArrayList(java.util.ArrayList) BindException(java.net.BindException) IOException(java.io.IOException) ObjectName(javax.management.ObjectName) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) Map(java.util.Map) HashMap(java.util.HashMap) MBeanServer(javax.management.MBeanServer) Test(org.junit.Test)

Example 8 with HostsFileWriter

use of org.apache.hadoop.hdfs.util.HostsFileWriter in project hadoop by apache.

the class TestHostsFiles method testHostsExcludeInUI.

@Test
public void testHostsExcludeInUI() throws Exception {
    Configuration conf = getConf();
    short REPLICATION_FACTOR = 2;
    final Path filePath = new Path("/testFile");
    HostsFileWriter hostsFileWriter = new HostsFileWriter();
    hostsFileWriter.initialize(conf, "temp/decommission");
    // Two blocks and four racks
    String[] racks = { "/rack1", "/rack1", "/rack2", "/rack2" };
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(racks.length).racks(racks).build();
    final FSNamesystem ns = cluster.getNameNode().getNamesystem();
    try {
        // Create a file with one block
        final FileSystem fs = cluster.getFileSystem();
        DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
        ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
        DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
        // Decommission one of the hosts with the block, this should cause 
        // the block to get replicated to another host on the same rack,
        // otherwise the rack policy is violated.
        BlockLocation[] locs = fs.getFileBlockLocations(fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
        String name = locs[0].getNames()[0];
        LOG.info("adding '" + name + "' to decommission");
        hostsFileWriter.initExcludeHost(name);
        ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
        DFSTestUtil.waitForDecommission(fs, name);
        // Check the block still has sufficient # replicas across racks
        DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
        MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
        ObjectName mxbeanName = new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo");
        String nodes = (String) mbs.getAttribute(mxbeanName, "LiveNodes");
        assertTrue("Live nodes should contain the decommissioned node", nodes.contains("Decommissioned"));
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
        hostsFileWriter.cleanup();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) HostsFileWriter(org.apache.hadoop.hdfs.util.HostsFileWriter) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) BlockLocation(org.apache.hadoop.fs.BlockLocation) ObjectName(javax.management.ObjectName) FileSystem(org.apache.hadoop.fs.FileSystem) MBeanServer(javax.management.MBeanServer) Test(org.junit.Test)

Example 9 with HostsFileWriter

use of org.apache.hadoop.hdfs.util.HostsFileWriter in project hadoop by apache.

the class TestDecommissioningStatus method setUp.

@Before
public void setUp() throws Exception {
    conf = new HdfsConfiguration();
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, false);
    // Set up the hosts/exclude files.
    hostsFileWriter = new HostsFileWriter();
    hostsFileWriter.initialize(conf, "work-dir/decommission");
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
    conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, 4);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1000);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 1);
    conf.setLong(DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY, 1);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
    cluster.waitActive();
    fileSys = cluster.getFileSystem();
    cluster.getNamesystem().getBlockManager().getDatanodeManager().setHeartbeatExpireInterval(3000);
    Logger.getLogger(DecommissionManager.class).setLevel(Level.DEBUG);
}
Also used : DecommissionManager(org.apache.hadoop.hdfs.server.blockmanagement.DecommissionManager) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HostsFileWriter(org.apache.hadoop.hdfs.util.HostsFileWriter) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Before(org.junit.Before)

Example 10 with HostsFileWriter

use of org.apache.hadoop.hdfs.util.HostsFileWriter in project hadoop by apache.

the class TestHostsFiles method testHostsIncludeForDeadCount.

@Test
public void testHostsIncludeForDeadCount() throws Exception {
    Configuration conf = getConf();
    HostsFileWriter hostsFileWriter = new HostsFileWriter();
    hostsFileWriter.initialize(conf, "temp/decommission");
    hostsFileWriter.initIncludeHosts(new String[] { "localhost:52", "127.0.0.1:7777" });
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
        final FSNamesystem ns = cluster.getNameNode().getNamesystem();
        assertTrue(ns.getNumDeadDataNodes() == 2);
        assertTrue(ns.getNumLiveDataNodes() == 0);
        // Testing using MBeans
        MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
        ObjectName mxbeanName = new ObjectName("Hadoop:service=NameNode,name=FSNamesystemState");
        String nodes = mbs.getAttribute(mxbeanName, "NumDeadDataNodes") + "";
        assertTrue((Integer) mbs.getAttribute(mxbeanName, "NumDeadDataNodes") == 2);
        assertTrue((Integer) mbs.getAttribute(mxbeanName, "NumLiveDataNodes") == 0);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
        hostsFileWriter.cleanup();
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) HostsFileWriter(org.apache.hadoop.hdfs.util.HostsFileWriter) MBeanServer(javax.management.MBeanServer) ObjectName(javax.management.ObjectName) Test(org.junit.Test)

Aggregations

HostsFileWriter (org.apache.hadoop.hdfs.util.HostsFileWriter)14 Test (org.junit.Test)11 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)10 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)10 Configuration (org.apache.hadoop.conf.Configuration)7 Path (org.apache.hadoop.fs.Path)6 MBeanServer (javax.management.MBeanServer)5 ObjectName (javax.management.ObjectName)5 ArrayList (java.util.ArrayList)3 HashMap (java.util.HashMap)3 Map (java.util.Map)3 BlockLocation (org.apache.hadoop.fs.BlockLocation)3 FileSystem (org.apache.hadoop.fs.FileSystem)3 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)3 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)3 IOException (java.io.IOException)2 DatanodeAdminProperties (org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties)2 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)2 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)2 Before (org.junit.Before)2