Search in sources :

Example 1 with DiskBalancerVolumeSet

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet in project hadoop by apache.

the class DiskBalancerTestUtil method createRandomVolumeSet.

/**
   * Creates a RandomVolumeSet.
   *
   * @param type      - Storage Type
   * @param diskCount - How many disks you need.
   * @return volumeSet
   * @throws Exception
   */
public DiskBalancerVolumeSet createRandomVolumeSet(StorageType type, int diskCount) throws Exception {
    Preconditions.checkState(diskCount > 0);
    DiskBalancerVolumeSet volumeSet = new DiskBalancerVolumeSet(type.isTransient());
    for (int x = 0; x < diskCount; x++) {
        volumeSet.addVolume(createRandomVolume(type));
    }
    assert (volumeSet.getVolumeCount() == diskCount);
    return volumeSet;
}
Also used : DiskBalancerVolumeSet(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet)

Example 2 with DiskBalancerVolumeSet

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet in project hadoop by apache.

the class TestDataModels method testCreateRandomVolumeSet.

@Test
public void testCreateRandomVolumeSet() throws Exception {
    DiskBalancerTestUtil util = new DiskBalancerTestUtil();
    DiskBalancerVolumeSet vSet = util.createRandomVolumeSet(StorageType.SSD, 10);
    Assert.assertEquals(10, vSet.getVolumeCount());
    Assert.assertEquals(StorageType.SSD.toString(), vSet.getVolumes().get(0).getStorageType());
}
Also used : DiskBalancerVolumeSet(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet) Test(org.junit.Test)

Example 3 with DiskBalancerVolumeSet

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet in project hadoop by apache.

the class TestPlanner method testNodePlanSerialize.

@Test
public void testNodePlanSerialize() throws Exception {
    final int diskCount = 12;
    DiskBalancerTestUtil util = new DiskBalancerTestUtil();
    DiskBalancerVolumeSet vSet = util.createRandomVolumeSet(StorageType.SSD, diskCount);
    NullConnector nullConnector = new NullConnector();
    DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
    DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
    int diskNum = 0;
    for (DiskBalancerVolume vol : vSet.getVolumes()) {
        vol.setPath("volume" + diskNum++);
        node.addVolume(vol);
    }
    nullConnector.addNode(node);
    cluster.readClusterInfo();
    GreedyPlanner newPlanner = new GreedyPlanner(01.0f, node);
    NodePlan newPlan = new NodePlan(node.getDataNodeName(), node.getDataNodePort());
    newPlanner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), newPlan);
    String planString = newPlan.toJson();
    assertNotNull(planString);
    NodePlan copy = NodePlan.parseJson(planString);
    assertNotNull(copy);
    assertEquals(newPlan.getVolumeSetPlans().size(), copy.getVolumeSetPlans().size());
}
Also used : DiskBalancerVolumeSet(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet) NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) DiskBalancerCluster(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster) NullConnector(org.apache.hadoop.hdfs.server.diskbalancer.connectors.NullConnector) DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume) GreedyPlanner(org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Example 4 with DiskBalancerVolumeSet

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet in project hadoop by apache.

the class TestPlanner method testPlannerScale.

@Test
public void testPlannerScale() throws Exception {
    // it is rare to see more than 48 disks
    final int diskCount = 256;
    DiskBalancerTestUtil util = new DiskBalancerTestUtil();
    DiskBalancerVolumeSet vSet = util.createRandomVolumeSet(StorageType.SSD, diskCount);
    NullConnector nullConnector = new NullConnector();
    DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
    DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
    int diskNum = 0;
    for (DiskBalancerVolume vol : vSet.getVolumes()) {
        vol.setPath("volume" + diskNum++);
        node.addVolume(vol);
    }
    nullConnector.addNode(node);
    cluster.readClusterInfo();
    GreedyPlanner newPlanner = new GreedyPlanner(01.0f, node);
    NodePlan newPlan = new NodePlan(node.getDataNodeName(), node.getDataNodePort());
    newPlanner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), newPlan);
    // Assuming that our random disks at least generated one step
    assertTrue("No Steps Generated from random disks, very unlikely", newPlan.getVolumeSetPlans().size() > 0);
    assertTrue("Steps Generated less than disk count - false", newPlan.getVolumeSetPlans().size() < diskCount);
    LOG.info("Number of steps are : %d%n", newPlan.getVolumeSetPlans().size());
}
Also used : DiskBalancerVolumeSet(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet) NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) DiskBalancerCluster(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster) NullConnector(org.apache.hadoop.hdfs.server.diskbalancer.connectors.NullConnector) DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume) GreedyPlanner(org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Example 5 with DiskBalancerVolumeSet

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet in project hadoop by apache.

the class Command method populatePathNames.

/**
   * Reads the Physical path of the disks we are balancing. This is needed to
   * make the disk balancer human friendly and not used in balancing.
   *
   * @param node - Disk Balancer Node.
   */
protected void populatePathNames(DiskBalancerDataNode node) throws IOException {
    // invoke rpc call to dataNode.
    if (getClusterURI().getScheme().startsWith("file")) {
        return;
    }
    String dnAddress = node.getDataNodeIP() + ":" + node.getDataNodePort();
    ClientDatanodeProtocol dnClient = getDataNodeProxy(dnAddress);
    String volumeNameJson = dnClient.getDiskBalancerSetting(DiskBalancerConstants.DISKBALANCER_VOLUME_NAME);
    @SuppressWarnings("unchecked") Map<String, String> volumeMap = READER.readValue(volumeNameJson);
    for (DiskBalancerVolumeSet set : node.getVolumeSets().values()) {
        for (DiskBalancerVolume vol : set.getVolumes()) {
            if (volumeMap.containsKey(vol.getUuid())) {
                vol.setPath(volumeMap.get(vol.getUuid()));
            }
        }
    }
}
Also used : DiskBalancerVolumeSet(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet) DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume) ClientDatanodeProtocol(org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol)

Aggregations

DiskBalancerVolumeSet (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet)13 DiskBalancerVolume (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume)10 DiskBalancerDataNode (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode)7 Test (org.junit.Test)7 NullConnector (org.apache.hadoop.hdfs.server.diskbalancer.connectors.NullConnector)2 DiskBalancerCluster (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster)2 GreedyPlanner (org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner)2 NodePlan (org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan)2 StorageType (org.apache.hadoop.fs.StorageType)1 ClientDatanodeProtocol (org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol)1