Search in sources :

Example 26 with NodePlan

use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.

the class TestPlanner method testGreedyPlannerLargeDisksWithData.

@Test
public void testGreedyPlannerLargeDisksWithData() throws Exception {
    NullConnector nullConnector = new NullConnector();
    DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
    DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
    // All disks have same capacity of data
    DiskBalancerVolume volume1 = createVolume("volume1", 1968, 88);
    DiskBalancerVolume volume2 = createVolume("volume2", 1968, 88);
    DiskBalancerVolume volume3 = createVolume("volume3", 1968, 111);
    DiskBalancerVolume volume4 = createVolume("volume4", 1968, 111);
    DiskBalancerVolume volume5 = createVolume("volume5", 1968, 30);
    DiskBalancerVolume volume6 = createVolume("volume6", 1563, 30);
    DiskBalancerVolume volume7 = createVolume("volume7", 1563, 30);
    DiskBalancerVolume volume8 = createVolume("volume8", 1563, 30);
    DiskBalancerVolume volume9 = createVolume("volume9", 1563, 210);
    node.addVolume(volume1);
    node.addVolume(volume2);
    node.addVolume(volume3);
    node.addVolume(volume4);
    node.addVolume(volume5);
    node.addVolume(volume6);
    node.addVolume(volume7);
    node.addVolume(volume8);
    node.addVolume(volume9);
    nullConnector.addNode(node);
    cluster.readClusterInfo();
    Assert.assertEquals(1, cluster.getNodes().size());
    GreedyPlanner planner = new GreedyPlanner(1.0f, node);
    NodePlan plan = new NodePlan(node.getDataNodeName(), node.getDataNodePort());
    planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
    assertTrue(plan.getVolumeSetPlans().size() > 2);
}
Also used : NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) DiskBalancerCluster(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster) NullConnector(org.apache.hadoop.hdfs.server.diskbalancer.connectors.NullConnector) DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume) GreedyPlanner(org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Example 27 with NodePlan

use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.

the class TestPlanner method testGreedyPlannerEqualizeData.

/**
   * In this test we pass 3 volumes with 30, 20 and 10 GB of data used. We
   * expect the planner to print out 20 GB on each volume.
   * <p/>
   * That is the plan should say move 10 GB from volume30 to volume10.
   */
@Test
public void testGreedyPlannerEqualizeData() throws Exception {
    NullConnector nullConnector = new NullConnector();
    DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
    DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
    DiskBalancerVolume volume30 = createVolume("volume30", 100, 30);
    DiskBalancerVolume volume20 = createVolume("volume20", 100, 20);
    DiskBalancerVolume volume10 = createVolume("volume10", 100, 10);
    node.addVolume(volume10);
    node.addVolume(volume20);
    node.addVolume(volume30);
    nullConnector.addNode(node);
    cluster.readClusterInfo();
    Assert.assertEquals(1, cluster.getNodes().size());
    GreedyPlanner planner = new GreedyPlanner(5.0f, node);
    NodePlan plan = new NodePlan(node.getDataNodeUUID(), node.getDataNodePort());
    planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
    // We should have only one planned move from
    // volume30 to volume10 of 10 GB Size.
    assertEquals(1, plan.getVolumeSetPlans().size());
    Step step = plan.getVolumeSetPlans().get(0);
    assertEquals("volume30", step.getSourceVolume().getPath());
    assertEquals("volume10", step.getDestinationVolume().getPath());
    assertEquals("10 G", step.getSizeString(step.getBytesToMove()));
}
Also used : NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) DiskBalancerCluster(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster) NullConnector(org.apache.hadoop.hdfs.server.diskbalancer.connectors.NullConnector) DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume) GreedyPlanner(org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner) Step(org.apache.hadoop.hdfs.server.diskbalancer.planner.Step) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Example 28 with NodePlan

use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.

the class TestPlanner method testGreedyPlannerBalanceVolumeSet.

@Test
public void testGreedyPlannerBalanceVolumeSet() throws Exception {
    URI clusterJson = getClass().getResource("/diskBalancer/data-cluster-3node-3disk.json").toURI();
    ClusterConnector jsonConnector = ConnectorFactory.getCluster(clusterJson, null);
    DiskBalancerCluster cluster = new DiskBalancerCluster(jsonConnector);
    cluster.readClusterInfo();
    Assert.assertEquals(3, cluster.getNodes().size());
    cluster.setNodesToProcess(cluster.getNodes());
    DiskBalancerDataNode node = cluster.getNodes().get(0);
    GreedyPlanner planner = new GreedyPlanner(10.0f, node);
    NodePlan plan = new NodePlan(node.getDataNodeName(), node.getDataNodePort());
    planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
}
Also used : NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) ClusterConnector(org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector) DiskBalancerCluster(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster) GreedyPlanner(org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner) URI(java.net.URI) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Example 29 with NodePlan

use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.

the class TestPlanner method testGreedyPlannerPlanWithDifferentDiskSizes.

@Test
public void testGreedyPlannerPlanWithDifferentDiskSizes() throws Exception {
    NullConnector nullConnector = new NullConnector();
    DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
    DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
    DiskBalancerVolume volume1 = createVolume("volume100", 1000, 100);
    DiskBalancerVolume volume2 = createVolume("volume0-1", 500, 0);
    DiskBalancerVolume volume3 = createVolume("volume0-2", 250, 0);
    node.addVolume(volume1);
    node.addVolume(volume2);
    node.addVolume(volume3);
    nullConnector.addNode(node);
    cluster.readClusterInfo();
    Assert.assertEquals(1, cluster.getNodes().size());
    GreedyPlanner newPlanner = new GreedyPlanner(01.0f, node);
    NodePlan newPlan = new NodePlan(node.getDataNodeName(), node.getDataNodePort());
    newPlanner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), newPlan);
    assertEquals(2, newPlan.getVolumeSetPlans().size());
    for (Step step : newPlan.getVolumeSetPlans()) {
        if (step.getDestinationVolume().getPath().equals("volume0-1")) {
            assertEquals("volume100", step.getSourceVolume().getPath());
            assertEquals("28.5 G", step.getSizeString(step.getBytesToMove()));
        }
        if (step.getDestinationVolume().getPath().equals("volume0-2")) {
            assertEquals("volume100", step.getSourceVolume().getPath());
            assertEquals("14.3 G", step.getSizeString(step.getBytesToMove()));
        }
    }
    Step step = newPlan.getVolumeSetPlans().get(0);
    assertEquals(0.05714f, step.getIdealStorage(), 0.001f);
}
Also used : NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) DiskBalancerCluster(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster) NullConnector(org.apache.hadoop.hdfs.server.diskbalancer.connectors.NullConnector) DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume) GreedyPlanner(org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner) Step(org.apache.hadoop.hdfs.server.diskbalancer.planner.Step) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Example 30 with NodePlan

use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.

the class TestPlanner method testGreedyPlannerThresholdTest.

@Test
public void testGreedyPlannerThresholdTest() throws Exception {
    NullConnector nullConnector = new NullConnector();
    DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
    DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
    DiskBalancerVolume volume1 = createVolume("volume100", 1000, 100);
    DiskBalancerVolume volume2 = createVolume("volume0-1", 300, 0);
    DiskBalancerVolume volume3 = createVolume("volume0-2", 300, 0);
    node.addVolume(volume1);
    node.addVolume(volume2);
    node.addVolume(volume3);
    nullConnector.addNode(node);
    cluster.readClusterInfo();
    Assert.assertEquals(1, cluster.getNodes().size());
    GreedyPlanner planner = new GreedyPlanner(10.0f, node);
    NodePlan plan = new NodePlan(node.getDataNodeName(), node.getDataNodePort());
    planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
    //We should see NO moves since the total data on the volume100
    // is less than or equal to threashold value that we pass, which is 10%
    assertEquals(0, plan.getVolumeSetPlans().size());
    // for this new planner we are passing 1% as as threshold value
    // hence planner must move data if possible.
    GreedyPlanner newPlanner = new GreedyPlanner(01.0f, node);
    NodePlan newPlan = new NodePlan(node.getDataNodeName(), node.getDataNodePort());
    newPlanner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), newPlan);
    assertEquals(2, newPlan.getVolumeSetPlans().size());
    // Move size should say move 19 GB
    // Here is how the math works out.
    // TotalCapacity = 1000 + 300 + 300 = 1600 GB
    // TotolUsed = 100
    // Expected data% on each disk = 0.0625
    // On Disk (volume0-1) = 300 * 0.0625 - 18.75 -- We round it up
    // in the display string -- hence 18.8 GB, it will be same on volume 2 too.
    // since they are equal sized disks with same used capacity
    Step step = newPlan.getVolumeSetPlans().get(0);
    assertEquals("volume100", step.getSourceVolume().getPath());
    assertTrue(step.getSizeString(step.getBytesToMove()).matches("18.[6|7|8] G"));
    step = newPlan.getVolumeSetPlans().get(1);
    assertEquals("volume100", step.getSourceVolume().getPath());
    assertTrue(step.getSizeString(step.getBytesToMove()).matches("18.[6|7|8] G"));
}
Also used : NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) DiskBalancerCluster(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster) NullConnector(org.apache.hadoop.hdfs.server.diskbalancer.connectors.NullConnector) DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume) GreedyPlanner(org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner) Step(org.apache.hadoop.hdfs.server.diskbalancer.planner.Step) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Aggregations

NodePlan (org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan)41 Test (org.junit.Test)33 DiskBalancerDataNode (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode)21 DiskBalancerCluster (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster)13 NullConnector (org.apache.hadoop.hdfs.server.diskbalancer.connectors.NullConnector)11 GreedyPlanner (org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner)11 DiskBalancerVolume (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume)10 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)9 DiskBalancer (org.apache.hadoop.hdfs.server.datanode.DiskBalancer)8 Step (org.apache.hadoop.hdfs.server.diskbalancer.planner.Step)8 Configuration (org.apache.hadoop.conf.Configuration)3 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)3 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)3 DiskBalancerWorkStatus (org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus)3 DiskBalancerException (org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException)3 IOException (java.io.IOException)2 URI (java.net.URI)2 ClientDatanodeProtocol (org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol)2 ClusterConnector (org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector)2 DiskBalancerVolumeSet (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet)2