Search in sources :

Example 1 with BalancerParameters

use of org.apache.hadoop.hdfs.server.balancer.BalancerParameters in project hadoop by apache.

the class TestBalancerWithMultipleNameNodes method unevenDistribution.

/**
   * First start a cluster and fill the cluster up to a certain size. Then
   * redistribute blocks according the required distribution. Finally, balance
   * the cluster.
   *
   * @param nNameNodes Number of NameNodes
   * @param nNameNodesToBalance Number of NameNodes to run the balancer on
   * @param distributionPerNN The distribution for each NameNode.
   * @param capacities Capacities of the datanodes
   * @param racks Rack names
   * @param conf Configuration
   */
private void unevenDistribution(final int nNameNodes, final int nNameNodesToBalance, long[] distributionPerNN, long[] capacities, String[] racks, Configuration conf) throws Exception {
    LOG.info("UNEVEN 0");
    final int nDataNodes = distributionPerNN.length;
    if (capacities.length != nDataNodes || racks.length != nDataNodes) {
        throw new IllegalArgumentException("Array length is not the same");
    }
    if (nNameNodesToBalance > nNameNodes) {
        throw new IllegalArgumentException("Number of namenodes to balance is " + "greater than the number of namenodes.");
    }
    // calculate total space that need to be filled
    final long usedSpacePerNN = TestBalancer.sum(distributionPerNN);
    // fill the cluster
    final ExtendedBlock[][] blocks;
    {
        LOG.info("UNEVEN 1");
        final MiniDFSCluster cluster = new MiniDFSCluster.Builder(new Configuration(conf)).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(nNameNodes)).numDataNodes(nDataNodes).racks(racks).simulatedCapacities(capacities).build();
        LOG.info("UNEVEN 2");
        try {
            cluster.waitActive();
            DFSTestUtil.setFederatedConfiguration(cluster, conf);
            LOG.info("UNEVEN 3");
            final Suite s = new Suite(cluster, nNameNodes, nDataNodes, null, conf);
            blocks = generateBlocks(s, usedSpacePerNN);
            LOG.info("UNEVEN 4");
        } finally {
            cluster.shutdown();
        }
    }
    conf.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, "0.0f");
    {
        LOG.info("UNEVEN 10");
        final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(nNameNodes)).numDataNodes(nDataNodes).racks(racks).simulatedCapacities(capacities).format(false).build();
        LOG.info("UNEVEN 11");
        try {
            cluster.waitActive();
            LOG.info("UNEVEN 12");
            Set<String> blockpools = new HashSet<String>();
            for (int i = 0; i < nNameNodesToBalance; i++) {
                blockpools.add(cluster.getNamesystem(i).getBlockPoolId());
            }
            BalancerParameters.Builder b = new BalancerParameters.Builder();
            b.setBlockpools(blockpools);
            BalancerParameters params = b.build();
            final Suite s = new Suite(cluster, nNameNodes, nDataNodes, params, conf);
            for (int n = 0; n < nNameNodes; n++) {
                // redistribute blocks
                final Block[][] blocksDN = TestBalancer.distributeBlocks(blocks[n], s.replication, distributionPerNN);
                for (int d = 0; d < blocksDN.length; d++) cluster.injectBlocks(n, d, Arrays.asList(blocksDN[d]));
                LOG.info("UNEVEN 13: n=" + n);
            }
            final long totalCapacity = TestBalancer.sum(capacities);
            final long totalUsed = nNameNodes * usedSpacePerNN;
            LOG.info("UNEVEN 14");
            runBalancer(s, totalUsed, totalCapacity);
            LOG.info("UNEVEN 15");
        } finally {
            cluster.shutdown();
        }
        LOG.info("UNEVEN 16");
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HashSet(java.util.HashSet) Set(java.util.Set) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) BalancerParameters(org.apache.hadoop.hdfs.server.balancer.BalancerParameters) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Block(org.apache.hadoop.hdfs.protocol.Block)

Aggregations

HashSet (java.util.HashSet)1 Set (java.util.Set)1 Configuration (org.apache.hadoop.conf.Configuration)1 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)1 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)1 Block (org.apache.hadoop.hdfs.protocol.Block)1 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)1 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)1 BalancerParameters (org.apache.hadoop.hdfs.server.balancer.BalancerParameters)1