Search in sources :

Example 6 with DiskBalancerDataNode

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode in project hadoop by apache.

the class TestDiskBalancer method testDiskBalancerNameNodeConnectivity.

@Test
public void testDiskBalancerNameNodeConnectivity() throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
    final int numDatanodes = 2;
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
    try {
        cluster.waitActive();
        ClusterConnector nameNodeConnector = ConnectorFactory.getCluster(cluster.getFileSystem(0).getUri(), conf);
        DiskBalancerCluster diskBalancerCluster = new DiskBalancerCluster(nameNodeConnector);
        diskBalancerCluster.readClusterInfo();
        assertEquals(diskBalancerCluster.getNodes().size(), numDatanodes);
        DataNode dnNode = cluster.getDataNodes().get(0);
        DiskBalancerDataNode dbDnNode = diskBalancerCluster.getNodeByUUID(dnNode.getDatanodeUuid());
        assertEquals(dnNode.getDatanodeUuid(), dbDnNode.getDataNodeUUID());
        assertEquals(dnNode.getDatanodeId().getIpAddr(), dbDnNode.getDataNodeIP());
        assertEquals(dnNode.getDatanodeId().getHostName(), dbDnNode.getDataNodeName());
        try (FsDatasetSpi.FsVolumeReferences ref = dnNode.getFSDataset().getFsVolumeReferences()) {
            assertEquals(ref.size(), dbDnNode.getVolumeCount());
        }
    } finally {
        cluster.shutdown();
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) ClusterConnector(org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector) DiskBalancerCluster(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Example 7 with DiskBalancerDataNode

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode in project hadoop by apache.

the class TestPlanner method testGreedyPlannerEqualDisksNoMoves.

@Test
public void testGreedyPlannerEqualDisksNoMoves() throws Exception {
    NullConnector nullConnector = new NullConnector();
    DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
    DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
    // All disks have same capacity of data
    DiskBalancerVolume volume1 = createVolume("volume1", 100, 30);
    DiskBalancerVolume volume2 = createVolume("volume2", 100, 30);
    DiskBalancerVolume volume3 = createVolume("volume3", 100, 30);
    node.addVolume(volume1);
    node.addVolume(volume2);
    node.addVolume(volume3);
    nullConnector.addNode(node);
    cluster.readClusterInfo();
    Assert.assertEquals(1, cluster.getNodes().size());
    GreedyPlanner planner = new GreedyPlanner(10.0f, node);
    NodePlan plan = new NodePlan(node.getDataNodeName(), node.getDataNodePort());
    planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
    // since we have same size of data in all disks , we should have
    // no moves planned.
    assertEquals(0, plan.getVolumeSetPlans().size());
}
Also used : NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) DiskBalancerCluster(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster) NullConnector(org.apache.hadoop.hdfs.server.diskbalancer.connectors.NullConnector) DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume) GreedyPlanner(org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Example 8 with DiskBalancerDataNode

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode in project hadoop by apache.

the class TestPlanner method testNodePlanSerialize.

@Test
public void testNodePlanSerialize() throws Exception {
    final int diskCount = 12;
    DiskBalancerTestUtil util = new DiskBalancerTestUtil();
    DiskBalancerVolumeSet vSet = util.createRandomVolumeSet(StorageType.SSD, diskCount);
    NullConnector nullConnector = new NullConnector();
    DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
    DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
    int diskNum = 0;
    for (DiskBalancerVolume vol : vSet.getVolumes()) {
        vol.setPath("volume" + diskNum++);
        node.addVolume(vol);
    }
    nullConnector.addNode(node);
    cluster.readClusterInfo();
    GreedyPlanner newPlanner = new GreedyPlanner(01.0f, node);
    NodePlan newPlan = new NodePlan(node.getDataNodeName(), node.getDataNodePort());
    newPlanner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), newPlan);
    String planString = newPlan.toJson();
    assertNotNull(planString);
    NodePlan copy = NodePlan.parseJson(planString);
    assertNotNull(copy);
    assertEquals(newPlan.getVolumeSetPlans().size(), copy.getVolumeSetPlans().size());
}
Also used : DiskBalancerVolumeSet(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet) NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) DiskBalancerCluster(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster) NullConnector(org.apache.hadoop.hdfs.server.diskbalancer.connectors.NullConnector) DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume) GreedyPlanner(org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Example 9 with DiskBalancerDataNode

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode in project hadoop by apache.

the class TestPlanner method testGreedyPlannerOneVolumeNoPlanTest.

@Test
public void testGreedyPlannerOneVolumeNoPlanTest() throws Exception {
    NullConnector nullConnector = new NullConnector();
    DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
    DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
    DiskBalancerVolume volume30 = createVolume("volume30", 100, 30);
    node.addVolume(volume30);
    nullConnector.addNode(node);
    cluster.readClusterInfo();
    Assert.assertEquals(1, cluster.getNodes().size());
    GreedyPlanner planner = new GreedyPlanner(10.0f, node);
    NodePlan plan = new NodePlan(node.getDataNodeName(), node.getDataNodePort());
    planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
    // With a single volume we should not have any plans for moves.
    assertEquals(0, plan.getVolumeSetPlans().size());
}
Also used : NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) DiskBalancerCluster(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster) NullConnector(org.apache.hadoop.hdfs.server.diskbalancer.connectors.NullConnector) DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume) GreedyPlanner(org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Example 10 with DiskBalancerDataNode

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode in project hadoop by apache.

the class TestPlanner method testGreedyPlannerTwoVolume.

@Test
public void testGreedyPlannerTwoVolume() throws Exception {
    NullConnector nullConnector = new NullConnector();
    DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
    DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
    DiskBalancerVolume volume30 = createVolume("volume30", 100, 30);
    DiskBalancerVolume volume10 = createVolume("volume10", 100, 10);
    node.addVolume(volume10);
    node.addVolume(volume30);
    nullConnector.addNode(node);
    cluster.readClusterInfo();
    Assert.assertEquals(1, cluster.getNodes().size());
    GreedyPlanner planner = new GreedyPlanner(5.0f, node);
    NodePlan plan = new NodePlan(node.getDataNodeUUID(), node.getDataNodePort());
    planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
    // We should have only one planned move from
    // volume30 to volume10 of 10 GB Size.
    assertEquals(1, plan.getVolumeSetPlans().size());
    Step step = plan.getVolumeSetPlans().get(0);
    assertEquals("volume30", step.getSourceVolume().getPath());
    assertEquals("volume10", step.getDestinationVolume().getPath());
    assertEquals("10 G", step.getSizeString(step.getBytesToMove()));
}
Also used : NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) DiskBalancerCluster(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster) NullConnector(org.apache.hadoop.hdfs.server.diskbalancer.connectors.NullConnector) DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume) GreedyPlanner(org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner) Step(org.apache.hadoop.hdfs.server.diskbalancer.planner.Step) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Aggregations

DiskBalancerDataNode (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode)27 Test (org.junit.Test)19 DiskBalancerVolume (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume)16 DiskBalancerCluster (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster)13 NodePlan (org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan)12 GreedyPlanner (org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner)11 NullConnector (org.apache.hadoop.hdfs.server.diskbalancer.connectors.NullConnector)10 DiskBalancerVolumeSet (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet)7 Step (org.apache.hadoop.hdfs.server.diskbalancer.planner.Step)5 ClusterConnector (org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector)3 LinkedList (java.util.LinkedList)2 DiskBalancerException (org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException)2 URI (java.net.URI)1 StrBuilder (org.apache.commons.lang.text.StrBuilder)1 Configuration (org.apache.hadoop.conf.Configuration)1 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)1 Path (org.apache.hadoop.fs.Path)1 StorageType (org.apache.hadoop.fs.StorageType)1 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)1 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)1