Search in sources :

Example 6 with DiskBalancerVolume

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume in project hadoop by apache.

the class TestPlanner method testGreedyPlannerTwoVolume.

@Test
public void testGreedyPlannerTwoVolume() throws Exception {
    NullConnector nullConnector = new NullConnector();
    DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
    DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
    DiskBalancerVolume volume30 = createVolume("volume30", 100, 30);
    DiskBalancerVolume volume10 = createVolume("volume10", 100, 10);
    node.addVolume(volume10);
    node.addVolume(volume30);
    nullConnector.addNode(node);
    cluster.readClusterInfo();
    Assert.assertEquals(1, cluster.getNodes().size());
    GreedyPlanner planner = new GreedyPlanner(5.0f, node);
    NodePlan plan = new NodePlan(node.getDataNodeUUID(), node.getDataNodePort());
    planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
    // We should have only one planned move from
    // volume30 to volume10 of 10 GB Size.
    assertEquals(1, plan.getVolumeSetPlans().size());
    Step step = plan.getVolumeSetPlans().get(0);
    assertEquals("volume30", step.getSourceVolume().getPath());
    assertEquals("volume10", step.getDestinationVolume().getPath());
    assertEquals("10 G", step.getSizeString(step.getBytesToMove()));
}
Also used : NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) DiskBalancerCluster(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster) NullConnector(org.apache.hadoop.hdfs.server.diskbalancer.connectors.NullConnector) DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume) GreedyPlanner(org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner) Step(org.apache.hadoop.hdfs.server.diskbalancer.planner.Step) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Example 7 with DiskBalancerVolume

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume in project hadoop by apache.

the class TestPlanner method testPlannerScale.

@Test
public void testPlannerScale() throws Exception {
    // it is rare to see more than 48 disks
    final int diskCount = 256;
    DiskBalancerTestUtil util = new DiskBalancerTestUtil();
    DiskBalancerVolumeSet vSet = util.createRandomVolumeSet(StorageType.SSD, diskCount);
    NullConnector nullConnector = new NullConnector();
    DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
    DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
    int diskNum = 0;
    for (DiskBalancerVolume vol : vSet.getVolumes()) {
        vol.setPath("volume" + diskNum++);
        node.addVolume(vol);
    }
    nullConnector.addNode(node);
    cluster.readClusterInfo();
    GreedyPlanner newPlanner = new GreedyPlanner(01.0f, node);
    NodePlan newPlan = new NodePlan(node.getDataNodeName(), node.getDataNodePort());
    newPlanner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), newPlan);
    // Assuming that our random disks at least generated one step
    assertTrue("No Steps Generated from random disks, very unlikely", newPlan.getVolumeSetPlans().size() > 0);
    assertTrue("Steps Generated less than disk count - false", newPlan.getVolumeSetPlans().size() < diskCount);
    LOG.info("Number of steps are : %d%n", newPlan.getVolumeSetPlans().size());
}
Also used : DiskBalancerVolumeSet(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet) NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) DiskBalancerCluster(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster) NullConnector(org.apache.hadoop.hdfs.server.diskbalancer.connectors.NullConnector) DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume) GreedyPlanner(org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Example 8 with DiskBalancerVolume

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume in project hadoop by apache.

the class TestPlanner method createVolume.

private DiskBalancerVolume createVolume(String path, int capacityInGB, int usedInGB) {
    DiskBalancerTestUtil util = new DiskBalancerTestUtil();
    DiskBalancerVolume volume = util.createRandomVolume(StorageType.SSD);
    volume.setPath(path);
    volume.setCapacity(capacityInGB * DiskBalancerTestUtil.GB);
    volume.setReserved(0);
    volume.setUsed(usedInGB * DiskBalancerTestUtil.GB);
    return volume;
}
Also used : DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume)

Example 9 with DiskBalancerVolume

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume in project hadoop by apache.

the class TestPlanner method testGreedyPlannerLargeDisksWithData.

@Test
public void testGreedyPlannerLargeDisksWithData() throws Exception {
    NullConnector nullConnector = new NullConnector();
    DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
    DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
    // All disks have same capacity of data
    DiskBalancerVolume volume1 = createVolume("volume1", 1968, 88);
    DiskBalancerVolume volume2 = createVolume("volume2", 1968, 88);
    DiskBalancerVolume volume3 = createVolume("volume3", 1968, 111);
    DiskBalancerVolume volume4 = createVolume("volume4", 1968, 111);
    DiskBalancerVolume volume5 = createVolume("volume5", 1968, 30);
    DiskBalancerVolume volume6 = createVolume("volume6", 1563, 30);
    DiskBalancerVolume volume7 = createVolume("volume7", 1563, 30);
    DiskBalancerVolume volume8 = createVolume("volume8", 1563, 30);
    DiskBalancerVolume volume9 = createVolume("volume9", 1563, 210);
    node.addVolume(volume1);
    node.addVolume(volume2);
    node.addVolume(volume3);
    node.addVolume(volume4);
    node.addVolume(volume5);
    node.addVolume(volume6);
    node.addVolume(volume7);
    node.addVolume(volume8);
    node.addVolume(volume9);
    nullConnector.addNode(node);
    cluster.readClusterInfo();
    Assert.assertEquals(1, cluster.getNodes().size());
    GreedyPlanner planner = new GreedyPlanner(1.0f, node);
    NodePlan plan = new NodePlan(node.getDataNodeName(), node.getDataNodePort());
    planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
    assertTrue(plan.getVolumeSetPlans().size() > 2);
}
Also used : NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) DiskBalancerCluster(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster) NullConnector(org.apache.hadoop.hdfs.server.diskbalancer.connectors.NullConnector) DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume) GreedyPlanner(org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Example 10 with DiskBalancerVolume

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume in project hadoop by apache.

the class TestPlanner method testGreedyPlannerEqualizeData.

/**
   * In this test we pass 3 volumes with 30, 20 and 10 GB of data used. We
   * expect the planner to print out 20 GB on each volume.
   * <p/>
   * That is the plan should say move 10 GB from volume30 to volume10.
   */
@Test
public void testGreedyPlannerEqualizeData() throws Exception {
    NullConnector nullConnector = new NullConnector();
    DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
    DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
    DiskBalancerVolume volume30 = createVolume("volume30", 100, 30);
    DiskBalancerVolume volume20 = createVolume("volume20", 100, 20);
    DiskBalancerVolume volume10 = createVolume("volume10", 100, 10);
    node.addVolume(volume10);
    node.addVolume(volume20);
    node.addVolume(volume30);
    nullConnector.addNode(node);
    cluster.readClusterInfo();
    Assert.assertEquals(1, cluster.getNodes().size());
    GreedyPlanner planner = new GreedyPlanner(5.0f, node);
    NodePlan plan = new NodePlan(node.getDataNodeUUID(), node.getDataNodePort());
    planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
    // We should have only one planned move from
    // volume30 to volume10 of 10 GB Size.
    assertEquals(1, plan.getVolumeSetPlans().size());
    Step step = plan.getVolumeSetPlans().get(0);
    assertEquals("volume30", step.getSourceVolume().getPath());
    assertEquals("volume10", step.getDestinationVolume().getPath());
    assertEquals("10 G", step.getSizeString(step.getBytesToMove()));
}
Also used : NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) DiskBalancerCluster(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster) NullConnector(org.apache.hadoop.hdfs.server.diskbalancer.connectors.NullConnector) DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume) GreedyPlanner(org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner) Step(org.apache.hadoop.hdfs.server.diskbalancer.planner.Step) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Aggregations

DiskBalancerVolume (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume)25 Test (org.junit.Test)17 DiskBalancerDataNode (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode)16 NullConnector (org.apache.hadoop.hdfs.server.diskbalancer.connectors.NullConnector)10 DiskBalancerCluster (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster)10 DiskBalancerVolumeSet (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet)10 GreedyPlanner (org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner)10 NodePlan (org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan)10 Step (org.apache.hadoop.hdfs.server.diskbalancer.planner.Step)5 LinkedList (java.util.LinkedList)1 StorageType (org.apache.hadoop.fs.StorageType)1 ClientDatanodeProtocol (org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol)1 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)1 DatanodeStorageReport (org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport)1 StorageReport (org.apache.hadoop.hdfs.server.protocol.StorageReport)1