Search in sources :

Example 16 with DiskBalancerDataNode

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode in project hadoop by apache.

the class TestDataModels method testNoBalancingNeededEvenDataSpread.

@Test
public void testNoBalancingNeededEvenDataSpread() throws Exception {
    DiskBalancerTestUtil util = new DiskBalancerTestUtil();
    DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
    // create two disks which have exactly same data and isBalancing should
    // say we don't need to balance.
    DiskBalancerVolume v1 = util.createRandomVolume(StorageType.SSD);
    v1.setCapacity(DiskBalancerTestUtil.TB);
    v1.setReserved(100 * DiskBalancerTestUtil.GB);
    v1.setUsed(500 * DiskBalancerTestUtil.GB);
    DiskBalancerVolume v2 = util.createRandomVolume(StorageType.SSD);
    v2.setCapacity(DiskBalancerTestUtil.TB);
    v2.setReserved(100 * DiskBalancerTestUtil.GB);
    v2.setUsed(500 * DiskBalancerTestUtil.GB);
    node.addVolume(v1);
    node.addVolume(v2);
    for (DiskBalancerVolumeSet vsets : node.getVolumeSets().values()) {
        Assert.assertFalse(vsets.isBalancingNeeded(10.0f));
    }
}
Also used : DiskBalancerVolumeSet(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet) DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Example 17 with DiskBalancerDataNode

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode in project hadoop by apache.

the class TestDataModels method testNoBalancingNeededTransientDisks.

@Test
public void testNoBalancingNeededTransientDisks() throws Exception {
    DiskBalancerTestUtil util = new DiskBalancerTestUtil();
    DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
    // create two disks which have different data sizes, but
    // transient. isBalancing should say no balancing needed.
    DiskBalancerVolume v1 = util.createRandomVolume(StorageType.RAM_DISK);
    v1.setCapacity(DiskBalancerTestUtil.TB);
    v1.setReserved(100 * DiskBalancerTestUtil.GB);
    v1.setUsed(1 * DiskBalancerTestUtil.GB);
    DiskBalancerVolume v2 = util.createRandomVolume(StorageType.RAM_DISK);
    v2.setCapacity(DiskBalancerTestUtil.TB);
    v2.setReserved(100 * DiskBalancerTestUtil.GB);
    v2.setUsed(500 * DiskBalancerTestUtil.GB);
    node.addVolume(v1);
    node.addVolume(v2);
    for (DiskBalancerVolumeSet vsets : node.getVolumeSets().values()) {
        Assert.assertFalse(vsets.isBalancingNeeded(10.0f));
    }
}
Also used : DiskBalancerVolumeSet(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet) DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Example 18 with DiskBalancerDataNode

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode in project hadoop by apache.

the class TestDataModels method testNoBalancingNeededFailedDisks.

@Test
public void testNoBalancingNeededFailedDisks() throws Exception {
    DiskBalancerTestUtil util = new DiskBalancerTestUtil();
    DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
    // create two disks which have which are normal disks, but fail
    // one of them. VolumeSet should say no balancing needed.
    DiskBalancerVolume v1 = util.createRandomVolume(StorageType.SSD);
    v1.setCapacity(DiskBalancerTestUtil.TB);
    v1.setReserved(100 * DiskBalancerTestUtil.GB);
    v1.setUsed(1 * DiskBalancerTestUtil.GB);
    v1.setFailed(true);
    DiskBalancerVolume v2 = util.createRandomVolume(StorageType.SSD);
    v2.setCapacity(DiskBalancerTestUtil.TB);
    v2.setReserved(100 * DiskBalancerTestUtil.GB);
    v2.setUsed(500 * DiskBalancerTestUtil.GB);
    node.addVolume(v1);
    node.addVolume(v2);
    for (DiskBalancerVolumeSet vsets : node.getVolumeSets().values()) {
        Assert.assertFalse(vsets.isBalancingNeeded(10.0f));
    }
}
Also used : DiskBalancerVolumeSet(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet) DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Example 19 with DiskBalancerDataNode

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode in project hadoop by apache.

the class TestDataModels method testDiskQueues.

@Test
public void testDiskQueues() throws Exception {
    DiskBalancerTestUtil util = new DiskBalancerTestUtil();
    DiskBalancerDataNode node = util.createRandomDataNode(new StorageType[] { StorageType.DISK, StorageType.RAM_DISK }, 3);
    TreeSet<DiskBalancerVolume> sortedQueue = node.getVolumeSets().get(StorageType.DISK.toString()).getSortedQueue();
    List<DiskBalancerVolume> reverseList = new LinkedList<>();
    List<DiskBalancerVolume> highList = new LinkedList<>();
    int queueSize = sortedQueue.size();
    for (int x = 0; x < queueSize; x++) {
        reverseList.add(sortedQueue.first());
        highList.add(sortedQueue.first());
    }
    Collections.reverse(reverseList);
    for (int x = 0; x < queueSize; x++) {
        Assert.assertEquals(reverseList.get(x).getCapacity(), highList.get(x).getCapacity());
        Assert.assertEquals(reverseList.get(x).getReserved(), highList.get(x).getReserved());
        Assert.assertEquals(reverseList.get(x).getUsed(), highList.get(x).getUsed());
    }
}
Also used : DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume) LinkedList(java.util.LinkedList) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Example 20 with DiskBalancerDataNode

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode in project hadoop by apache.

the class TestDataModels method testNeedBalancingUnevenDataSpread.

@Test
public void testNeedBalancingUnevenDataSpread() throws Exception {
    DiskBalancerTestUtil util = new DiskBalancerTestUtil();
    DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
    DiskBalancerVolume v1 = util.createRandomVolume(StorageType.SSD);
    v1.setCapacity(DiskBalancerTestUtil.TB);
    v1.setReserved(100 * DiskBalancerTestUtil.GB);
    v1.setUsed(0);
    DiskBalancerVolume v2 = util.createRandomVolume(StorageType.SSD);
    v2.setCapacity(DiskBalancerTestUtil.TB);
    v2.setReserved(100 * DiskBalancerTestUtil.GB);
    v2.setUsed(500 * DiskBalancerTestUtil.GB);
    node.addVolume(v1);
    node.addVolume(v2);
    for (DiskBalancerVolumeSet vsets : node.getVolumeSets().values()) {
        Assert.assertTrue(vsets.isBalancingNeeded(10.0f));
    }
}
Also used : DiskBalancerVolumeSet(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet) DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Aggregations

DiskBalancerDataNode (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode)27 Test (org.junit.Test)19 DiskBalancerVolume (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume)16 DiskBalancerCluster (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster)13 NodePlan (org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan)12 GreedyPlanner (org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner)11 NullConnector (org.apache.hadoop.hdfs.server.diskbalancer.connectors.NullConnector)10 DiskBalancerVolumeSet (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet)7 Step (org.apache.hadoop.hdfs.server.diskbalancer.planner.Step)5 ClusterConnector (org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector)3 LinkedList (java.util.LinkedList)2 DiskBalancerException (org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException)2 URI (java.net.URI)1 StrBuilder (org.apache.commons.lang.text.StrBuilder)1 Configuration (org.apache.hadoop.conf.Configuration)1 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)1 Path (org.apache.hadoop.fs.Path)1 StorageType (org.apache.hadoop.fs.StorageType)1 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)1 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)1