Search in sources :

Example 21 with DiskBalancerVolume

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume in project hadoop by apache.

the class TestDataModels method testNoBalancingNeededTransientDisks.

@Test
public void testNoBalancingNeededTransientDisks() throws Exception {
    DiskBalancerTestUtil util = new DiskBalancerTestUtil();
    DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
    // create two disks which have different data sizes, but
    // transient. isBalancing should say no balancing needed.
    DiskBalancerVolume v1 = util.createRandomVolume(StorageType.RAM_DISK);
    v1.setCapacity(DiskBalancerTestUtil.TB);
    v1.setReserved(100 * DiskBalancerTestUtil.GB);
    v1.setUsed(1 * DiskBalancerTestUtil.GB);
    DiskBalancerVolume v2 = util.createRandomVolume(StorageType.RAM_DISK);
    v2.setCapacity(DiskBalancerTestUtil.TB);
    v2.setReserved(100 * DiskBalancerTestUtil.GB);
    v2.setUsed(500 * DiskBalancerTestUtil.GB);
    node.addVolume(v1);
    node.addVolume(v2);
    for (DiskBalancerVolumeSet vsets : node.getVolumeSets().values()) {
        Assert.assertFalse(vsets.isBalancingNeeded(10.0f));
    }
}
Also used : DiskBalancerVolumeSet(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet) DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Example 22 with DiskBalancerVolume

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume in project hadoop by apache.

the class TestDataModels method testCreateRandomVolume.

@Test
public void testCreateRandomVolume() throws Exception {
    DiskBalancerTestUtil util = new DiskBalancerTestUtil();
    DiskBalancerVolume vol = util.createRandomVolume(StorageType.DISK);
    Assert.assertNotNull(vol.getUuid());
    Assert.assertNotNull(vol.getPath());
    Assert.assertNotNull(vol.getStorageType());
    Assert.assertFalse(vol.isFailed());
    Assert.assertFalse(vol.isTransient());
    Assert.assertTrue(vol.getCapacity() > 0);
    Assert.assertTrue((vol.getCapacity() - vol.getReserved()) > 0);
    Assert.assertTrue((vol.getReserved() + vol.getUsed()) < vol.getCapacity());
}
Also used : DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume) Test(org.junit.Test)

Example 23 with DiskBalancerVolume

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume in project hadoop by apache.

the class TestDataModels method testNoBalancingNeededFailedDisks.

@Test
public void testNoBalancingNeededFailedDisks() throws Exception {
    DiskBalancerTestUtil util = new DiskBalancerTestUtil();
    DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
    // create two disks which have which are normal disks, but fail
    // one of them. VolumeSet should say no balancing needed.
    DiskBalancerVolume v1 = util.createRandomVolume(StorageType.SSD);
    v1.setCapacity(DiskBalancerTestUtil.TB);
    v1.setReserved(100 * DiskBalancerTestUtil.GB);
    v1.setUsed(1 * DiskBalancerTestUtil.GB);
    v1.setFailed(true);
    DiskBalancerVolume v2 = util.createRandomVolume(StorageType.SSD);
    v2.setCapacity(DiskBalancerTestUtil.TB);
    v2.setReserved(100 * DiskBalancerTestUtil.GB);
    v2.setUsed(500 * DiskBalancerTestUtil.GB);
    node.addVolume(v1);
    node.addVolume(v2);
    for (DiskBalancerVolumeSet vsets : node.getVolumeSets().values()) {
        Assert.assertFalse(vsets.isBalancingNeeded(10.0f));
    }
}
Also used : DiskBalancerVolumeSet(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet) DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Example 24 with DiskBalancerVolume

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume in project hadoop by apache.

the class TestDataModels method testDiskQueues.

@Test
public void testDiskQueues() throws Exception {
    DiskBalancerTestUtil util = new DiskBalancerTestUtil();
    DiskBalancerDataNode node = util.createRandomDataNode(new StorageType[] { StorageType.DISK, StorageType.RAM_DISK }, 3);
    TreeSet<DiskBalancerVolume> sortedQueue = node.getVolumeSets().get(StorageType.DISK.toString()).getSortedQueue();
    List<DiskBalancerVolume> reverseList = new LinkedList<>();
    List<DiskBalancerVolume> highList = new LinkedList<>();
    int queueSize = sortedQueue.size();
    for (int x = 0; x < queueSize; x++) {
        reverseList.add(sortedQueue.first());
        highList.add(sortedQueue.first());
    }
    Collections.reverse(reverseList);
    for (int x = 0; x < queueSize; x++) {
        Assert.assertEquals(reverseList.get(x).getCapacity(), highList.get(x).getCapacity());
        Assert.assertEquals(reverseList.get(x).getReserved(), highList.get(x).getReserved());
        Assert.assertEquals(reverseList.get(x).getUsed(), highList.get(x).getUsed());
    }
}
Also used : DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume) LinkedList(java.util.LinkedList) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Example 25 with DiskBalancerVolume

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume in project hadoop by apache.

the class TestDataModels method testNeedBalancingUnevenDataSpread.

@Test
public void testNeedBalancingUnevenDataSpread() throws Exception {
    DiskBalancerTestUtil util = new DiskBalancerTestUtil();
    DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
    DiskBalancerVolume v1 = util.createRandomVolume(StorageType.SSD);
    v1.setCapacity(DiskBalancerTestUtil.TB);
    v1.setReserved(100 * DiskBalancerTestUtil.GB);
    v1.setUsed(0);
    DiskBalancerVolume v2 = util.createRandomVolume(StorageType.SSD);
    v2.setCapacity(DiskBalancerTestUtil.TB);
    v2.setReserved(100 * DiskBalancerTestUtil.GB);
    v2.setUsed(500 * DiskBalancerTestUtil.GB);
    node.addVolume(v1);
    node.addVolume(v2);
    for (DiskBalancerVolumeSet vsets : node.getVolumeSets().values()) {
        Assert.assertTrue(vsets.isBalancingNeeded(10.0f));
    }
}
Also used : DiskBalancerVolumeSet(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet) DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Aggregations

DiskBalancerVolume (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume)25 Test (org.junit.Test)17 DiskBalancerDataNode (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode)16 NullConnector (org.apache.hadoop.hdfs.server.diskbalancer.connectors.NullConnector)10 DiskBalancerCluster (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster)10 DiskBalancerVolumeSet (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet)10 GreedyPlanner (org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner)10 NodePlan (org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan)10 Step (org.apache.hadoop.hdfs.server.diskbalancer.planner.Step)5 LinkedList (java.util.LinkedList)1 StorageType (org.apache.hadoop.fs.StorageType)1 ClientDatanodeProtocol (org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol)1 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)1 DatanodeStorageReport (org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport)1 StorageReport (org.apache.hadoop.hdfs.server.protocol.StorageReport)1