Search in sources :

Example 1 with HostsFileWriter

use of org.apache.hadoop.hdfs.util.HostsFileWriter in project hadoop by apache.

the class TestDatanodeReport method testDatanodeReportWithUpgradeDomain.

/**
   * This test verifies upgrade domain is set according to the JSON host file.
   */
@Test
public void testDatanodeReportWithUpgradeDomain() throws Exception {
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, // 0.5s
    500);
    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
    conf.setClass(DFSConfigKeys.DFS_NAMENODE_HOSTS_PROVIDER_CLASSNAME_KEY, CombinedHostFileManager.class, HostConfigManager.class);
    HostsFileWriter hostsFileWriter = new HostsFileWriter();
    hostsFileWriter.initialize(conf, "temp/datanodeReport");
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    final DFSClient client = cluster.getFileSystem().dfs;
    final String ud1 = "ud1";
    final String ud2 = "ud2";
    try {
        //wait until the cluster is up
        cluster.waitActive();
        DatanodeAdminProperties datanode = new DatanodeAdminProperties();
        datanode.setHostName(cluster.getDataNodes().get(0).getDatanodeId().getHostName());
        datanode.setUpgradeDomain(ud1);
        hostsFileWriter.initIncludeHosts(new DatanodeAdminProperties[] { datanode });
        client.refreshNodes();
        DatanodeInfo[] all = client.datanodeReport(DatanodeReportType.ALL);
        assertEquals(all[0].getUpgradeDomain(), ud1);
        datanode.setUpgradeDomain(null);
        hostsFileWriter.initIncludeHosts(new DatanodeAdminProperties[] { datanode });
        client.refreshNodes();
        all = client.datanodeReport(DatanodeReportType.ALL);
        assertEquals(all[0].getUpgradeDomain(), null);
        datanode.setUpgradeDomain(ud2);
        hostsFileWriter.initIncludeHosts(new DatanodeAdminProperties[] { datanode });
        client.refreshNodes();
        all = client.datanodeReport(DatanodeReportType.ALL);
        assertEquals(all[0].getUpgradeDomain(), ud2);
    } finally {
        cluster.shutdown();
    }
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) HostsFileWriter(org.apache.hadoop.hdfs.util.HostsFileWriter) DatanodeAdminProperties(org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties) Test(org.junit.Test)

Example 2 with HostsFileWriter

use of org.apache.hadoop.hdfs.util.HostsFileWriter in project hadoop by apache.

the class TestStartup method testNNRestart.

/**
   * This test tests hosts include list contains host names.  After namenode
   * restarts, the still alive datanodes should not have any trouble in getting
   * registrant again.
   */
@Test
public void testNNRestart() throws IOException, InterruptedException {
    MiniDFSCluster cluster = null;
    // heartbeat interval in seconds
    int HEARTBEAT_INTERVAL = 1;
    HostsFileWriter hostsFileWriter = new HostsFileWriter();
    hostsFileWriter.initialize(config, "work-dir/restartnn");
    byte[] b = { 127, 0, 0, 1 };
    InetAddress inetAddress = InetAddress.getByAddress(b);
    hostsFileWriter.initIncludeHosts(new String[] { inetAddress.getHostName() });
    int numDatanodes = 1;
    try {
        cluster = new MiniDFSCluster.Builder(config).numDataNodes(numDatanodes).setupHostsFile(true).build();
        cluster.waitActive();
        cluster.restartNameNode();
        NamenodeProtocols nn = cluster.getNameNodeRpc();
        assertNotNull(nn);
        assertTrue(cluster.isDataNodeUp());
        DatanodeInfo[] info = nn.getDatanodeReport(DatanodeReportType.LIVE);
        for (int i = 0; i < 5 && info.length != numDatanodes; i++) {
            Thread.sleep(HEARTBEAT_INTERVAL * 1000);
            info = nn.getDatanodeReport(DatanodeReportType.LIVE);
        }
        assertEquals("Number of live nodes should be " + numDatanodes, numDatanodes, info.length);
    } catch (IOException e) {
        fail(StringUtils.stringifyException(e));
        throw e;
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
        hostsFileWriter.cleanup();
    }
}
Also used : NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HostsFileWriter(org.apache.hadoop.hdfs.util.HostsFileWriter) IOException(java.io.IOException) InetAddress(java.net.InetAddress) Test(org.junit.Test)

Example 3 with HostsFileWriter

use of org.apache.hadoop.hdfs.util.HostsFileWriter in project hadoop by apache.

the class AdminStatesBaseTest method setup.

@Before
public void setup() throws IOException {
    // Set up the hosts/exclude files.
    hostsFileWriter = new HostsFileWriter();
    conf = new HdfsConfiguration();
    if (useCombinedHostFileManager) {
        conf.setClass(DFSConfigKeys.DFS_NAMENODE_HOSTS_PROVIDER_CLASSNAME_KEY, CombinedHostFileManager.class, HostConfigManager.class);
    }
    // Setup conf
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, false);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 200);
    conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, HEARTBEAT_INTERVAL);
    conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, BLOCKREPORT_INTERVAL_MSEC);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, NAMENODE_REPLICATION_INTERVAL);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 1);
    hostsFileWriter.initialize(conf, "temp/admin");
}
Also used : HostsFileWriter(org.apache.hadoop.hdfs.util.HostsFileWriter) Before(org.junit.Before)

Example 4 with HostsFileWriter

use of org.apache.hadoop.hdfs.util.HostsFileWriter in project hadoop by apache.

the class TestBlocksWithNotEnoughRacks method testNodeDecomissionRespectsRackPolicy.

/*
   * Test that rack policy is still respected when blocks are replicated
   * due to node decommissioning.
   */
@Test
public void testNodeDecomissionRespectsRackPolicy() throws Exception {
    Configuration conf = getConf();
    short REPLICATION_FACTOR = 2;
    final Path filePath = new Path("/testFile");
    HostsFileWriter hostsFileWriter = new HostsFileWriter();
    hostsFileWriter.initialize(conf, "temp/decommission");
    // Two blocks and four racks
    String[] racks = { "/rack1", "/rack1", "/rack2", "/rack2" };
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(racks.length).racks(racks).build();
    final FSNamesystem ns = cluster.getNameNode().getNamesystem();
    try {
        // Create a file with one block
        final FileSystem fs = cluster.getFileSystem();
        DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
        ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
        DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
        // Decommission one of the hosts with the block, this should cause 
        // the block to get replicated to another host on the same rack,
        // otherwise the rack policy is violated.
        BlockLocation[] locs = fs.getFileBlockLocations(fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
        String name = locs[0].getNames()[0];
        hostsFileWriter.initExcludeHost(name);
        ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
        DFSTestUtil.waitForDecommission(fs, name);
        // Check the block still has sufficient # replicas across racks
        DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
    } finally {
        cluster.shutdown();
        hostsFileWriter.cleanup();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) HostsFileWriter(org.apache.hadoop.hdfs.util.HostsFileWriter) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) BlockLocation(org.apache.hadoop.fs.BlockLocation) FileSystem(org.apache.hadoop.fs.FileSystem) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) Test(org.junit.Test)

Example 5 with HostsFileWriter

use of org.apache.hadoop.hdfs.util.HostsFileWriter in project hadoop by apache.

the class TestBlocksWithNotEnoughRacks method testNodeDecomissionWithOverreplicationRespectsRackPolicy.

/*
   * Test that rack policy is still respected when blocks are replicated
   * due to node decommissioning, when the blocks are over-replicated.
   */
@Test
public void testNodeDecomissionWithOverreplicationRespectsRackPolicy() throws Exception {
    Configuration conf = getConf();
    short REPLICATION_FACTOR = 5;
    final Path filePath = new Path("/testFile");
    HostsFileWriter hostsFileWriter = new HostsFileWriter();
    hostsFileWriter.initialize(conf, "temp/decommission");
    // All hosts are on two racks, only one host on /rack2
    String[] racks = { "/rack1", "/rack2", "/rack1", "/rack1", "/rack1" };
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(racks.length).racks(racks).build();
    final FSNamesystem ns = cluster.getNameNode().getNamesystem();
    try {
        final FileSystem fs = cluster.getFileSystem();
        DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
        ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
        DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
        // Lower the replication factor so the blocks are over replicated
        REPLICATION_FACTOR = 2;
        fs.setReplication(filePath, REPLICATION_FACTOR);
        // Decommission one of the hosts with the block that is not on
        // the lone host on rack2 (if we decomission that host it would
        // be impossible to respect the rack policy).
        BlockLocation[] locs = fs.getFileBlockLocations(fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
        for (String top : locs[0].getTopologyPaths()) {
            if (!top.startsWith("/rack2")) {
                String name = top.substring("/rack1".length() + 1);
                hostsFileWriter.initExcludeHost(name);
                ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
                DFSTestUtil.waitForDecommission(fs, name);
                break;
            }
        }
        // Check the block still has sufficient # replicas across racks,
        // ie we didn't remove the replica on the host on /rack1.
        DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
    } finally {
        cluster.shutdown();
        hostsFileWriter.cleanup();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) HostsFileWriter(org.apache.hadoop.hdfs.util.HostsFileWriter) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) BlockLocation(org.apache.hadoop.fs.BlockLocation) FileSystem(org.apache.hadoop.fs.FileSystem) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) Test(org.junit.Test)

Aggregations

HostsFileWriter (org.apache.hadoop.hdfs.util.HostsFileWriter)14 Test (org.junit.Test)11 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)10 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)10 Configuration (org.apache.hadoop.conf.Configuration)7 Path (org.apache.hadoop.fs.Path)6 MBeanServer (javax.management.MBeanServer)5 ObjectName (javax.management.ObjectName)5 ArrayList (java.util.ArrayList)3 HashMap (java.util.HashMap)3 Map (java.util.Map)3 BlockLocation (org.apache.hadoop.fs.BlockLocation)3 FileSystem (org.apache.hadoop.fs.FileSystem)3 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)3 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)3 IOException (java.io.IOException)2 DatanodeAdminProperties (org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties)2 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)2 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)2 Before (org.junit.Before)2