Search in sources :

Example 1 with Cli

use of org.apache.hadoop.hdfs.server.balancer.Balancer.Cli in project hadoop by apache.

the class TestBalancer method testManyBalancerSimultaneously.

/**
   * Test running many balancer simultaneously.
   *
   * Case-1: First balancer is running. Now, running second one should get
   * "Another balancer is running. Exiting.." IOException and fail immediately
   *
   * Case-2: When running second balancer 'balancer.id' file exists but the
   * lease doesn't exists. Now, the second balancer should run successfully.
   */
@Test(timeout = 100000)
public void testManyBalancerSimultaneously() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    initConf(conf);
    // add an empty node with half of the capacities(4 * CAPACITY) & the same
    // rack
    long[] capacities = new long[] { 4 * CAPACITY };
    String[] racks = new String[] { RACK0 };
    long newCapacity = 2 * CAPACITY;
    String newRack = RACK0;
    LOG.info("capacities = " + long2String(capacities));
    LOG.info("racks      = " + Arrays.asList(racks));
    LOG.info("newCapacity= " + newCapacity);
    LOG.info("newRack    = " + newRack);
    LOG.info("useTool    = " + false);
    assertEquals(capacities.length, racks.length);
    int numOfDatanodes = capacities.length;
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(capacities.length).racks(racks).simulatedCapacities(capacities).build();
    cluster.waitActive();
    client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy();
    long totalCapacity = sum(capacities);
    // fill up the cluster to be 30% full
    final long totalUsedSpace = totalCapacity * 3 / 10;
    createFile(cluster, filePath, totalUsedSpace / numOfDatanodes, (short) numOfDatanodes, 0);
    // start up an empty node with the same capacity and on the same rack
    cluster.startDataNodes(conf, 1, true, null, new String[] { newRack }, new long[] { newCapacity });
    // Case1: Simulate first balancer by creating 'balancer.id' file. It
    // will keep this file until the balancing operation is completed.
    FileSystem fs = cluster.getFileSystem(0);
    final FSDataOutputStream out = fs.create(Balancer.BALANCER_ID_PATH, false);
    out.writeBytes(InetAddress.getLocalHost().getHostName());
    out.hflush();
    assertTrue("'balancer.id' file doesn't exist!", fs.exists(Balancer.BALANCER_ID_PATH));
    // start second balancer
    final String[] args = { "-policy", "datanode" };
    final Tool tool = new Cli();
    tool.setConf(conf);
    // start balancing
    int exitCode = tool.run(args);
    assertEquals("Exit status code mismatches", ExitStatus.IO_EXCEPTION.getExitCode(), exitCode);
    // Case2: Release lease so that another balancer would be able to
    // perform balancing.
    out.close();
    assertTrue("'balancer.id' file doesn't exist!", fs.exists(Balancer.BALANCER_ID_PATH));
    // start balancing
    exitCode = tool.run(args);
    assertEquals("Exit status code mismatches", ExitStatus.SUCCESS.getExitCode(), exitCode);
}
Also used : Cli(org.apache.hadoop.hdfs.server.balancer.Balancer.Cli) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Tool(org.apache.hadoop.util.Tool) Test(org.junit.Test)

Example 2 with Cli

use of org.apache.hadoop.hdfs.server.balancer.Balancer.Cli in project hadoop by apache.

the class TestBalancer method runBalancerCli.

private void runBalancerCli(Configuration conf, long totalUsedSpace, long totalCapacity, BalancerParameters p, boolean useFile, int expectedExcludedNodes) throws Exception {
    waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster);
    List<String> args = new ArrayList<String>();
    args.add("-policy");
    args.add("datanode");
    File excludeHostsFile = null;
    if (!p.getExcludedNodes().isEmpty()) {
        args.add("-exclude");
        if (useFile) {
            excludeHostsFile = new File("exclude-hosts-file");
            PrintWriter pw = new PrintWriter(excludeHostsFile);
            for (String host : p.getExcludedNodes()) {
                pw.write(host + "\n");
            }
            pw.close();
            args.add("-f");
            args.add("exclude-hosts-file");
        } else {
            args.add(StringUtils.join(p.getExcludedNodes(), ','));
        }
    }
    File includeHostsFile = null;
    if (!p.getIncludedNodes().isEmpty()) {
        args.add("-include");
        if (useFile) {
            includeHostsFile = new File("include-hosts-file");
            PrintWriter pw = new PrintWriter(includeHostsFile);
            for (String host : p.getIncludedNodes()) {
                pw.write(host + "\n");
            }
            pw.close();
            args.add("-f");
            args.add("include-hosts-file");
        } else {
            args.add(StringUtils.join(p.getIncludedNodes(), ','));
        }
    }
    final Tool tool = new Cli();
    tool.setConf(conf);
    // start rebalancing
    final int r = tool.run(args.toArray(new String[0]));
    assertEquals("Tools should exit 0 on success", 0, r);
    waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster);
    LOG.info("Rebalancing with default ctor.");
    waitForBalancer(totalUsedSpace, totalCapacity, client, cluster, p, expectedExcludedNodes);
    if (excludeHostsFile != null && excludeHostsFile.exists()) {
        excludeHostsFile.delete();
    }
    if (includeHostsFile != null && includeHostsFile.exists()) {
        includeHostsFile.delete();
    }
}
Also used : Cli(org.apache.hadoop.hdfs.server.balancer.Balancer.Cli) ArrayList(java.util.ArrayList) File(java.io.File) PrintWriter(java.io.PrintWriter) Tool(org.apache.hadoop.util.Tool)

Aggregations

Cli (org.apache.hadoop.hdfs.server.balancer.Balancer.Cli)2 Tool (org.apache.hadoop.util.Tool)2 File (java.io.File)1 PrintWriter (java.io.PrintWriter)1 ArrayList (java.util.ArrayList)1 Configuration (org.apache.hadoop.conf.Configuration)1 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)1 FileSystem (org.apache.hadoop.fs.FileSystem)1 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)1 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)1 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)1 Test (org.junit.Test)1