use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume in project hadoop by apache.
the class TestDataModels method testNoBalancingNeededTransientDisks.
@Test
public void testNoBalancingNeededTransientDisks() throws Exception {
DiskBalancerTestUtil util = new DiskBalancerTestUtil();
DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
// create two disks which have different data sizes, but
// transient. isBalancing should say no balancing needed.
DiskBalancerVolume v1 = util.createRandomVolume(StorageType.RAM_DISK);
v1.setCapacity(DiskBalancerTestUtil.TB);
v1.setReserved(100 * DiskBalancerTestUtil.GB);
v1.setUsed(1 * DiskBalancerTestUtil.GB);
DiskBalancerVolume v2 = util.createRandomVolume(StorageType.RAM_DISK);
v2.setCapacity(DiskBalancerTestUtil.TB);
v2.setReserved(100 * DiskBalancerTestUtil.GB);
v2.setUsed(500 * DiskBalancerTestUtil.GB);
node.addVolume(v1);
node.addVolume(v2);
for (DiskBalancerVolumeSet vsets : node.getVolumeSets().values()) {
Assert.assertFalse(vsets.isBalancingNeeded(10.0f));
}
}
use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume in project hadoop by apache.
the class TestDataModels method testCreateRandomVolume.
@Test
public void testCreateRandomVolume() throws Exception {
DiskBalancerTestUtil util = new DiskBalancerTestUtil();
DiskBalancerVolume vol = util.createRandomVolume(StorageType.DISK);
Assert.assertNotNull(vol.getUuid());
Assert.assertNotNull(vol.getPath());
Assert.assertNotNull(vol.getStorageType());
Assert.assertFalse(vol.isFailed());
Assert.assertFalse(vol.isTransient());
Assert.assertTrue(vol.getCapacity() > 0);
Assert.assertTrue((vol.getCapacity() - vol.getReserved()) > 0);
Assert.assertTrue((vol.getReserved() + vol.getUsed()) < vol.getCapacity());
}
use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume in project hadoop by apache.
the class TestDataModels method testNoBalancingNeededFailedDisks.
@Test
public void testNoBalancingNeededFailedDisks() throws Exception {
DiskBalancerTestUtil util = new DiskBalancerTestUtil();
DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
// create two disks which have which are normal disks, but fail
// one of them. VolumeSet should say no balancing needed.
DiskBalancerVolume v1 = util.createRandomVolume(StorageType.SSD);
v1.setCapacity(DiskBalancerTestUtil.TB);
v1.setReserved(100 * DiskBalancerTestUtil.GB);
v1.setUsed(1 * DiskBalancerTestUtil.GB);
v1.setFailed(true);
DiskBalancerVolume v2 = util.createRandomVolume(StorageType.SSD);
v2.setCapacity(DiskBalancerTestUtil.TB);
v2.setReserved(100 * DiskBalancerTestUtil.GB);
v2.setUsed(500 * DiskBalancerTestUtil.GB);
node.addVolume(v1);
node.addVolume(v2);
for (DiskBalancerVolumeSet vsets : node.getVolumeSets().values()) {
Assert.assertFalse(vsets.isBalancingNeeded(10.0f));
}
}
use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume in project hadoop by apache.
the class TestDataModels method testDiskQueues.
@Test
public void testDiskQueues() throws Exception {
DiskBalancerTestUtil util = new DiskBalancerTestUtil();
DiskBalancerDataNode node = util.createRandomDataNode(new StorageType[] { StorageType.DISK, StorageType.RAM_DISK }, 3);
TreeSet<DiskBalancerVolume> sortedQueue = node.getVolumeSets().get(StorageType.DISK.toString()).getSortedQueue();
List<DiskBalancerVolume> reverseList = new LinkedList<>();
List<DiskBalancerVolume> highList = new LinkedList<>();
int queueSize = sortedQueue.size();
for (int x = 0; x < queueSize; x++) {
reverseList.add(sortedQueue.first());
highList.add(sortedQueue.first());
}
Collections.reverse(reverseList);
for (int x = 0; x < queueSize; x++) {
Assert.assertEquals(reverseList.get(x).getCapacity(), highList.get(x).getCapacity());
Assert.assertEquals(reverseList.get(x).getReserved(), highList.get(x).getReserved());
Assert.assertEquals(reverseList.get(x).getUsed(), highList.get(x).getUsed());
}
}
use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume in project hadoop by apache.
the class TestDataModels method testNeedBalancingUnevenDataSpread.
@Test
public void testNeedBalancingUnevenDataSpread() throws Exception {
DiskBalancerTestUtil util = new DiskBalancerTestUtil();
DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
DiskBalancerVolume v1 = util.createRandomVolume(StorageType.SSD);
v1.setCapacity(DiskBalancerTestUtil.TB);
v1.setReserved(100 * DiskBalancerTestUtil.GB);
v1.setUsed(0);
DiskBalancerVolume v2 = util.createRandomVolume(StorageType.SSD);
v2.setCapacity(DiskBalancerTestUtil.TB);
v2.setReserved(100 * DiskBalancerTestUtil.GB);
v2.setUsed(500 * DiskBalancerTestUtil.GB);
node.addVolume(v1);
node.addVolume(v2);
for (DiskBalancerVolumeSet vsets : node.getVolumeSets().values()) {
Assert.assertTrue(vsets.isBalancingNeeded(10.0f));
}
}
Aggregations