Search in sources :

Example 1 with MiniDFSClusterWithNodeGroup

use of org.apache.hadoop.hdfs.MiniDFSClusterWithNodeGroup in project hadoop by apache.

the class TestBalancerWithNodeGroup method testBalancerWithRackLocality.

/**
   * Create a cluster with even distribution, and a new empty node is added to
   * the cluster, then test rack locality for balancer policy. 
   */
@Test(timeout = 60000)
public void testBalancerWithRackLocality() throws Exception {
    Configuration conf = createConf();
    long[] capacities = new long[] { CAPACITY, CAPACITY };
    String[] racks = new String[] { RACK0, RACK1 };
    String[] nodeGroups = new String[] { NODEGROUP0, NODEGROUP1 };
    int numOfDatanodes = capacities.length;
    assertEquals(numOfDatanodes, racks.length);
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf).numDataNodes(capacities.length).racks(racks).simulatedCapacities(capacities);
    MiniDFSClusterWithNodeGroup.setNodeGroups(nodeGroups);
    cluster = new MiniDFSClusterWithNodeGroup(builder);
    try {
        cluster.waitActive();
        client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy();
        long totalCapacity = TestBalancer.sum(capacities);
        // fill up the cluster to be 30% full
        long totalUsedSpace = totalCapacity * 3 / 10;
        long length = totalUsedSpace / numOfDatanodes;
        TestBalancer.createFile(cluster, filePath, length, (short) numOfDatanodes, 0);
        LocatedBlocks lbs = client.getBlockLocations(filePath.toUri().getPath(), 0, length);
        Set<ExtendedBlock> before = getBlocksOnRack(lbs.getLocatedBlocks(), RACK0);
        long newCapacity = CAPACITY;
        String newRack = RACK1;
        String newNodeGroup = NODEGROUP2;
        // start up an empty node with the same capacity and on the same rack
        cluster.startDataNodes(conf, 1, true, null, new String[] { newRack }, new long[] { newCapacity }, new String[] { newNodeGroup });
        totalCapacity += newCapacity;
        // run balancer and validate results
        runBalancerCanFinish(conf, totalUsedSpace, totalCapacity);
        lbs = client.getBlockLocations(filePath.toUri().getPath(), 0, length);
        Set<ExtendedBlock> after = getBlocksOnRack(lbs.getLocatedBlocks(), RACK0);
        assertEquals(before, after);
    } finally {
        cluster.shutdown();
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) MiniDFSClusterWithNodeGroup(org.apache.hadoop.hdfs.MiniDFSClusterWithNodeGroup) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Test(org.junit.Test)

Example 2 with MiniDFSClusterWithNodeGroup

use of org.apache.hadoop.hdfs.MiniDFSClusterWithNodeGroup in project hadoop by apache.

the class TestBalancerWithNodeGroup method testBalancerEndInNoMoveProgress.

/**
   * Create a 4 nodes cluster: 2 nodes (n0, n1) in RACK0/NODEGROUP0, 1 node (n2)
   * in RACK1/NODEGROUP1 and 1 node (n3) in RACK1/NODEGROUP2. Fill the cluster 
   * to 60% and 3 replicas, so n2 and n3 will have replica for all blocks according
   * to replica placement policy with NodeGroup. As a result, n2 and n3 will be
   * filled with 80% (60% x 4 / 3), and no blocks can be migrated from n2 and n3
   * to n0 or n1 as balancer policy with node group. Thus, we expect the balancer
   * to end in 5 iterations without move block process.
   */
@Test(timeout = 60000)
public void testBalancerEndInNoMoveProgress() throws Exception {
    Configuration conf = createConf();
    long[] capacities = new long[] { CAPACITY, CAPACITY, CAPACITY, CAPACITY };
    String[] racks = new String[] { RACK0, RACK0, RACK1, RACK1 };
    String[] nodeGroups = new String[] { NODEGROUP0, NODEGROUP0, NODEGROUP1, NODEGROUP2 };
    int numOfDatanodes = capacities.length;
    assertEquals(numOfDatanodes, racks.length);
    assertEquals(numOfDatanodes, nodeGroups.length);
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf).numDataNodes(capacities.length).racks(racks).simulatedCapacities(capacities);
    MiniDFSClusterWithNodeGroup.setNodeGroups(nodeGroups);
    cluster = new MiniDFSClusterWithNodeGroup(builder);
    try {
        cluster.waitActive();
        client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy();
        long totalCapacity = TestBalancer.sum(capacities);
        // fill up the cluster to be 60% full
        long totalUsedSpace = totalCapacity * 6 / 10;
        TestBalancer.createFile(cluster, filePath, totalUsedSpace / 3, (short) (3), 0);
        // run balancer which can finish in 5 iterations with no block movement.
        runBalancerCanFinish(conf, totalUsedSpace, totalCapacity);
    } finally {
        cluster.shutdown();
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) MiniDFSClusterWithNodeGroup(org.apache.hadoop.hdfs.MiniDFSClusterWithNodeGroup) Test(org.junit.Test)

Example 3 with MiniDFSClusterWithNodeGroup

use of org.apache.hadoop.hdfs.MiniDFSClusterWithNodeGroup in project hadoop by apache.

the class TestBalancerWithNodeGroup method testBalancerWithNodeGroup.

/**
   * Create a cluster with even distribution, and a new empty node is added to
   * the cluster, then test node-group locality for balancer policy.
   */
@Test(timeout = 60000)
public void testBalancerWithNodeGroup() throws Exception {
    Configuration conf = createConf();
    long[] capacities = new long[] { CAPACITY, CAPACITY, CAPACITY, CAPACITY };
    String[] racks = new String[] { RACK0, RACK0, RACK1, RACK1 };
    String[] nodeGroups = new String[] { NODEGROUP0, NODEGROUP0, NODEGROUP1, NODEGROUP2 };
    int numOfDatanodes = capacities.length;
    assertEquals(numOfDatanodes, racks.length);
    assertEquals(numOfDatanodes, nodeGroups.length);
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf).numDataNodes(capacities.length).racks(racks).simulatedCapacities(capacities);
    MiniDFSClusterWithNodeGroup.setNodeGroups(nodeGroups);
    cluster = new MiniDFSClusterWithNodeGroup(builder);
    try {
        cluster.waitActive();
        client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy();
        long totalCapacity = TestBalancer.sum(capacities);
        // fill up the cluster to be 20% full
        long totalUsedSpace = totalCapacity * 2 / 10;
        TestBalancer.createFile(cluster, filePath, totalUsedSpace / (numOfDatanodes / 2), (short) (numOfDatanodes / 2), 0);
        long newCapacity = CAPACITY;
        String newRack = RACK1;
        String newNodeGroup = NODEGROUP2;
        // start up an empty node with the same capacity and on NODEGROUP2
        cluster.startDataNodes(conf, 1, true, null, new String[] { newRack }, new long[] { newCapacity }, new String[] { newNodeGroup });
        totalCapacity += newCapacity;
        // run balancer and validate results
        runBalancer(conf, totalUsedSpace, totalCapacity);
    } finally {
        cluster.shutdown();
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) MiniDFSClusterWithNodeGroup(org.apache.hadoop.hdfs.MiniDFSClusterWithNodeGroup) Test(org.junit.Test)

Aggregations

Configuration (org.apache.hadoop.conf.Configuration)3 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)3 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)3 MiniDFSClusterWithNodeGroup (org.apache.hadoop.hdfs.MiniDFSClusterWithNodeGroup)3 Test (org.junit.Test)3 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)1 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)1