Search in sources :

Example 1 with DiskBalancerVolume

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume in project hadoop by apache.

the class DBNameNodeConnector method getVolumeInfoFromStorageReports.

/**
   * Reads the relevant fields from each storage volume and populate the
   * DiskBalancer Node.
   *
   * @param node    - Disk Balancer Node
   * @param reports - Array of StorageReport
   */
private void getVolumeInfoFromStorageReports(DiskBalancerDataNode node, StorageReport[] reports) throws Exception {
    Preconditions.checkNotNull(node);
    Preconditions.checkNotNull(reports);
    for (StorageReport report : reports) {
        DatanodeStorage storage = report.getStorage();
        DiskBalancerVolume volume = new DiskBalancerVolume();
        volume.setCapacity(report.getCapacity());
        volume.setFailed(report.isFailed());
        volume.setUsed(report.getDfsUsed());
        // TODO : Should we do BlockPool level balancing at all ?
        // Does it make sense ? Balancer does do that. Right now
        // we only deal with volumes and not blockPools
        volume.setUsed(report.getDfsUsed());
        volume.setUuid(storage.getStorageID());
        // we will skip this volume for disk balancer if
        // it is read-only since we will not be able to delete
        // or if it is already failed.
        volume.setSkip((storage.getState() == DatanodeStorage.State.READ_ONLY_SHARED) || report.isFailed());
        volume.setStorageType(storage.getStorageType().name());
        volume.setIsTransient(storage.getStorageType().isTransient());
        node.addVolume(volume);
    }
}
Also used : DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) DatanodeStorageReport(org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport) StorageReport(org.apache.hadoop.hdfs.server.protocol.StorageReport) DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume)

Example 2 with DiskBalancerVolume

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume in project hadoop by apache.

the class TestDataModels method testVolumeSerialize.

@Test
public void testVolumeSerialize() throws Exception {
    DiskBalancerTestUtil util = new DiskBalancerTestUtil();
    DiskBalancerVolume volume = util.createRandomVolume(StorageType.DISK);
    String originalString = volume.toJson();
    DiskBalancerVolume parsedVolume = DiskBalancerVolume.parseJson(originalString);
    String parsedString = parsedVolume.toJson();
    Assert.assertEquals(originalString, parsedString);
}
Also used : DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume) Test(org.junit.Test)

Example 3 with DiskBalancerVolume

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume in project hadoop by apache.

the class TestPlanner method testGreedyPlannerEqualDisksNoMoves.

@Test
public void testGreedyPlannerEqualDisksNoMoves() throws Exception {
    NullConnector nullConnector = new NullConnector();
    DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
    DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
    // All disks have same capacity of data
    DiskBalancerVolume volume1 = createVolume("volume1", 100, 30);
    DiskBalancerVolume volume2 = createVolume("volume2", 100, 30);
    DiskBalancerVolume volume3 = createVolume("volume3", 100, 30);
    node.addVolume(volume1);
    node.addVolume(volume2);
    node.addVolume(volume3);
    nullConnector.addNode(node);
    cluster.readClusterInfo();
    Assert.assertEquals(1, cluster.getNodes().size());
    GreedyPlanner planner = new GreedyPlanner(10.0f, node);
    NodePlan plan = new NodePlan(node.getDataNodeName(), node.getDataNodePort());
    planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
    // since we have same size of data in all disks , we should have
    // no moves planned.
    assertEquals(0, plan.getVolumeSetPlans().size());
}
Also used : NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) DiskBalancerCluster(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster) NullConnector(org.apache.hadoop.hdfs.server.diskbalancer.connectors.NullConnector) DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume) GreedyPlanner(org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Example 4 with DiskBalancerVolume

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume in project hadoop by apache.

the class TestPlanner method testNodePlanSerialize.

@Test
public void testNodePlanSerialize() throws Exception {
    final int diskCount = 12;
    DiskBalancerTestUtil util = new DiskBalancerTestUtil();
    DiskBalancerVolumeSet vSet = util.createRandomVolumeSet(StorageType.SSD, diskCount);
    NullConnector nullConnector = new NullConnector();
    DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
    DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
    int diskNum = 0;
    for (DiskBalancerVolume vol : vSet.getVolumes()) {
        vol.setPath("volume" + diskNum++);
        node.addVolume(vol);
    }
    nullConnector.addNode(node);
    cluster.readClusterInfo();
    GreedyPlanner newPlanner = new GreedyPlanner(01.0f, node);
    NodePlan newPlan = new NodePlan(node.getDataNodeName(), node.getDataNodePort());
    newPlanner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), newPlan);
    String planString = newPlan.toJson();
    assertNotNull(planString);
    NodePlan copy = NodePlan.parseJson(planString);
    assertNotNull(copy);
    assertEquals(newPlan.getVolumeSetPlans().size(), copy.getVolumeSetPlans().size());
}
Also used : DiskBalancerVolumeSet(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet) NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) DiskBalancerCluster(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster) NullConnector(org.apache.hadoop.hdfs.server.diskbalancer.connectors.NullConnector) DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume) GreedyPlanner(org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Example 5 with DiskBalancerVolume

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume in project hadoop by apache.

the class TestPlanner method testGreedyPlannerOneVolumeNoPlanTest.

@Test
public void testGreedyPlannerOneVolumeNoPlanTest() throws Exception {
    NullConnector nullConnector = new NullConnector();
    DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
    DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
    DiskBalancerVolume volume30 = createVolume("volume30", 100, 30);
    node.addVolume(volume30);
    nullConnector.addNode(node);
    cluster.readClusterInfo();
    Assert.assertEquals(1, cluster.getNodes().size());
    GreedyPlanner planner = new GreedyPlanner(10.0f, node);
    NodePlan plan = new NodePlan(node.getDataNodeName(), node.getDataNodePort());
    planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
    // With a single volume we should not have any plans for moves.
    assertEquals(0, plan.getVolumeSetPlans().size());
}
Also used : NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) DiskBalancerCluster(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster) NullConnector(org.apache.hadoop.hdfs.server.diskbalancer.connectors.NullConnector) DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume) GreedyPlanner(org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Aggregations

DiskBalancerVolume (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume)25 Test (org.junit.Test)17 DiskBalancerDataNode (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode)16 NullConnector (org.apache.hadoop.hdfs.server.diskbalancer.connectors.NullConnector)10 DiskBalancerCluster (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster)10 DiskBalancerVolumeSet (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet)10 GreedyPlanner (org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner)10 NodePlan (org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan)10 Step (org.apache.hadoop.hdfs.server.diskbalancer.planner.Step)5 LinkedList (java.util.LinkedList)1 StorageType (org.apache.hadoop.fs.StorageType)1 ClientDatanodeProtocol (org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol)1 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)1 DatanodeStorageReport (org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport)1 StorageReport (org.apache.hadoop.hdfs.server.protocol.StorageReport)1