Search in sources :

Example 11 with DiskBalancerVolume

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume in project hadoop by apache.

the class TestPlanner method testGreedyPlannerPlanWithDifferentDiskSizes.

@Test
public void testGreedyPlannerPlanWithDifferentDiskSizes() throws Exception {
    NullConnector nullConnector = new NullConnector();
    DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
    DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
    DiskBalancerVolume volume1 = createVolume("volume100", 1000, 100);
    DiskBalancerVolume volume2 = createVolume("volume0-1", 500, 0);
    DiskBalancerVolume volume3 = createVolume("volume0-2", 250, 0);
    node.addVolume(volume1);
    node.addVolume(volume2);
    node.addVolume(volume3);
    nullConnector.addNode(node);
    cluster.readClusterInfo();
    Assert.assertEquals(1, cluster.getNodes().size());
    GreedyPlanner newPlanner = new GreedyPlanner(01.0f, node);
    NodePlan newPlan = new NodePlan(node.getDataNodeName(), node.getDataNodePort());
    newPlanner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), newPlan);
    assertEquals(2, newPlan.getVolumeSetPlans().size());
    for (Step step : newPlan.getVolumeSetPlans()) {
        if (step.getDestinationVolume().getPath().equals("volume0-1")) {
            assertEquals("volume100", step.getSourceVolume().getPath());
            assertEquals("28.5 G", step.getSizeString(step.getBytesToMove()));
        }
        if (step.getDestinationVolume().getPath().equals("volume0-2")) {
            assertEquals("volume100", step.getSourceVolume().getPath());
            assertEquals("14.3 G", step.getSizeString(step.getBytesToMove()));
        }
    }
    Step step = newPlan.getVolumeSetPlans().get(0);
    assertEquals(0.05714f, step.getIdealStorage(), 0.001f);
}
Also used : NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) DiskBalancerCluster(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster) NullConnector(org.apache.hadoop.hdfs.server.diskbalancer.connectors.NullConnector) DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume) GreedyPlanner(org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner) Step(org.apache.hadoop.hdfs.server.diskbalancer.planner.Step) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Example 12 with DiskBalancerVolume

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume in project hadoop by apache.

the class TestPlanner method testGreedyPlannerThresholdTest.

@Test
public void testGreedyPlannerThresholdTest() throws Exception {
    NullConnector nullConnector = new NullConnector();
    DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
    DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
    DiskBalancerVolume volume1 = createVolume("volume100", 1000, 100);
    DiskBalancerVolume volume2 = createVolume("volume0-1", 300, 0);
    DiskBalancerVolume volume3 = createVolume("volume0-2", 300, 0);
    node.addVolume(volume1);
    node.addVolume(volume2);
    node.addVolume(volume3);
    nullConnector.addNode(node);
    cluster.readClusterInfo();
    Assert.assertEquals(1, cluster.getNodes().size());
    GreedyPlanner planner = new GreedyPlanner(10.0f, node);
    NodePlan plan = new NodePlan(node.getDataNodeName(), node.getDataNodePort());
    planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
    //We should see NO moves since the total data on the volume100
    // is less than or equal to threashold value that we pass, which is 10%
    assertEquals(0, plan.getVolumeSetPlans().size());
    // for this new planner we are passing 1% as as threshold value
    // hence planner must move data if possible.
    GreedyPlanner newPlanner = new GreedyPlanner(01.0f, node);
    NodePlan newPlan = new NodePlan(node.getDataNodeName(), node.getDataNodePort());
    newPlanner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), newPlan);
    assertEquals(2, newPlan.getVolumeSetPlans().size());
    // Move size should say move 19 GB
    // Here is how the math works out.
    // TotalCapacity = 1000 + 300 + 300 = 1600 GB
    // TotolUsed = 100
    // Expected data% on each disk = 0.0625
    // On Disk (volume0-1) = 300 * 0.0625 - 18.75 -- We round it up
    // in the display string -- hence 18.8 GB, it will be same on volume 2 too.
    // since they are equal sized disks with same used capacity
    Step step = newPlan.getVolumeSetPlans().get(0);
    assertEquals("volume100", step.getSourceVolume().getPath());
    assertTrue(step.getSizeString(step.getBytesToMove()).matches("18.[6|7|8] G"));
    step = newPlan.getVolumeSetPlans().get(1);
    assertEquals("volume100", step.getSourceVolume().getPath());
    assertTrue(step.getSizeString(step.getBytesToMove()).matches("18.[6|7|8] G"));
}
Also used : NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) DiskBalancerCluster(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster) NullConnector(org.apache.hadoop.hdfs.server.diskbalancer.connectors.NullConnector) DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume) GreedyPlanner(org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner) Step(org.apache.hadoop.hdfs.server.diskbalancer.planner.Step) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Example 13 with DiskBalancerVolume

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume in project hadoop by apache.

the class TestPlanner method testGreedyPlannerMoveFromSingleDisk.

@Test
public void testGreedyPlannerMoveFromSingleDisk() throws Exception {
    NullConnector nullConnector = new NullConnector();
    DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
    DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
    // All disks have same capacity of data
    DiskBalancerVolume volume1 = createVolume("volume100", 200, 100);
    DiskBalancerVolume volume2 = createVolume("volume0-1", 200, 0);
    DiskBalancerVolume volume3 = createVolume("volume0-2", 200, 0);
    node.addVolume(volume1);
    node.addVolume(volume2);
    node.addVolume(volume3);
    nullConnector.addNode(node);
    cluster.readClusterInfo();
    Assert.assertEquals(1, cluster.getNodes().size());
    GreedyPlanner planner = new GreedyPlanner(10.0f, node);
    NodePlan plan = new NodePlan(node.getDataNodeName(), node.getDataNodePort());
    planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
    // We should see 2 move plans. One from volume100 to volume0-1
    // and another from volume100 to volume0-2
    assertEquals(2, plan.getVolumeSetPlans().size());
    Step step = plan.getVolumeSetPlans().get(0);
    assertEquals("volume100", step.getSourceVolume().getPath());
    assertTrue(step.getSizeString(step.getBytesToMove()).matches("33.[2|3|4] G"));
    step = plan.getVolumeSetPlans().get(1);
    assertEquals("volume100", step.getSourceVolume().getPath());
    assertTrue(step.getSizeString(step.getBytesToMove()).matches("33.[2|3|4] G"));
}
Also used : NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) DiskBalancerCluster(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster) NullConnector(org.apache.hadoop.hdfs.server.diskbalancer.connectors.NullConnector) DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume) GreedyPlanner(org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner) Step(org.apache.hadoop.hdfs.server.diskbalancer.planner.Step) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Example 14 with DiskBalancerVolume

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume in project hadoop by apache.

the class Command method populatePathNames.

/**
   * Reads the Physical path of the disks we are balancing. This is needed to
   * make the disk balancer human friendly and not used in balancing.
   *
   * @param node - Disk Balancer Node.
   */
protected void populatePathNames(DiskBalancerDataNode node) throws IOException {
    // invoke rpc call to dataNode.
    if (getClusterURI().getScheme().startsWith("file")) {
        return;
    }
    String dnAddress = node.getDataNodeIP() + ":" + node.getDataNodePort();
    ClientDatanodeProtocol dnClient = getDataNodeProxy(dnAddress);
    String volumeNameJson = dnClient.getDiskBalancerSetting(DiskBalancerConstants.DISKBALANCER_VOLUME_NAME);
    @SuppressWarnings("unchecked") Map<String, String> volumeMap = READER.readValue(volumeNameJson);
    for (DiskBalancerVolumeSet set : node.getVolumeSets().values()) {
        for (DiskBalancerVolume vol : set.getVolumes()) {
            if (volumeMap.containsKey(vol.getUuid())) {
                vol.setPath(volumeMap.get(vol.getUuid()));
            }
        }
    }
}
Also used : DiskBalancerVolumeSet(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet) DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume) ClientDatanodeProtocol(org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol)

Example 15 with DiskBalancerVolume

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume in project hadoop by apache.

the class ReportCommand method recordNodeReport.

/**
   * Put node report lines to string buffer.
   */
private void recordNodeReport(StrBuilder result, DiskBalancerDataNode dbdn, final String nodeFormat, final String volumeFormat) throws Exception {
    final String trueStr = "True";
    final String falseStr = "False";
    // get storage path of datanode
    populatePathNames(dbdn);
    result.appendln(String.format(nodeFormat, dbdn.getDataNodeName(), dbdn.getDataNodeIP(), dbdn.getDataNodePort(), dbdn.getDataNodeUUID(), dbdn.getVolumeCount(), dbdn.getNodeDataDensity()));
    List<String> volumeList = Lists.newArrayList();
    for (DiskBalancerVolumeSet vset : dbdn.getVolumeSets().values()) {
        for (DiskBalancerVolume vol : vset.getVolumes()) {
            volumeList.add(String.format(volumeFormat, vol.getStorageType(), vol.getPath(), vol.getUsedRatio(), vol.getUsed(), vol.getCapacity(), vol.getFreeRatio(), vol.getFreeSpace(), vol.getCapacity(), vol.isFailed() ? trueStr : falseStr, vol.isReadOnly() ? trueStr : falseStr, vol.isSkip() ? trueStr : falseStr, vol.isTransient() ? trueStr : falseStr));
        }
    }
    Collections.sort(volumeList);
    result.appendln(StringUtils.join(volumeList.toArray(), System.lineSeparator()));
}
Also used : DiskBalancerVolumeSet(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet) DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume)

Aggregations

DiskBalancerVolume (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume)25 Test (org.junit.Test)17 DiskBalancerDataNode (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode)16 NullConnector (org.apache.hadoop.hdfs.server.diskbalancer.connectors.NullConnector)10 DiskBalancerCluster (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster)10 DiskBalancerVolumeSet (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet)10 GreedyPlanner (org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner)10 NodePlan (org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan)10 Step (org.apache.hadoop.hdfs.server.diskbalancer.planner.Step)5 LinkedList (java.util.LinkedList)1 StorageType (org.apache.hadoop.fs.StorageType)1 ClientDatanodeProtocol (org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol)1 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)1 DatanodeStorageReport (org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport)1 StorageReport (org.apache.hadoop.hdfs.server.protocol.StorageReport)1