Search in sources :

Example 11 with DiskBalancerCluster

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster in project hadoop by apache.

the class JsonNodeConnector method getNodes.

/**
   * getNodes function connects to a cluster definition file
   * and returns nodes defined in that file.
   *
   * @return Array of DiskBalancerDataNodes
   */
@Override
public List<DiskBalancerDataNode> getNodes() throws Exception {
    Preconditions.checkNotNull(this.clusterURI);
    String dataFilePath = this.clusterURI.getPath();
    LOG.info("Reading cluster info from file : " + dataFilePath);
    DiskBalancerCluster cluster = READER.readValue(new File(dataFilePath));
    String message = String.format("Found %d node(s)", cluster.getNodes().size());
    LOG.info(message);
    return cluster.getNodes();
}
Also used : DiskBalancerCluster(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster) File(java.io.File)

Example 12 with DiskBalancerCluster

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster in project hadoop by apache.

the class TestConnectors method testJsonConnector.

@Test
public void testJsonConnector() throws Exception {
    cluster.waitActive();
    ClusterConnector nameNodeConnector = ConnectorFactory.getCluster(cluster.getFileSystem(0).getUri(), conf);
    DiskBalancerCluster diskBalancerCluster = new DiskBalancerCluster(nameNodeConnector);
    diskBalancerCluster.readClusterInfo();
    String diskBalancerJson = diskBalancerCluster.toJson();
    DiskBalancerCluster serializedCluster = DiskBalancerCluster.parseJson(diskBalancerJson);
    Assert.assertEquals("Parsed cluster is not equal to persisted info.", diskBalancerCluster.getNodes().size(), serializedCluster.getNodes().size());
}
Also used : ClusterConnector(org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector) DiskBalancerCluster(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster) Test(org.junit.Test)

Example 13 with DiskBalancerCluster

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster in project hadoop by apache.

the class TestConnectors method testNameNodeConnector.

@Test
public void testNameNodeConnector() throws Exception {
    cluster.waitActive();
    ClusterConnector nameNodeConnector = ConnectorFactory.getCluster(cluster.getFileSystem(0).getUri(), conf);
    DiskBalancerCluster diskBalancerCluster = new DiskBalancerCluster(nameNodeConnector);
    diskBalancerCluster.readClusterInfo();
    Assert.assertEquals("Expected number of Datanodes not found.", numDatanodes, diskBalancerCluster.getNodes().size());
    Assert.assertEquals("Expected number of volumes not found.", volumeCount, diskBalancerCluster.getNodes().get(0).getVolumeCount());
}
Also used : ClusterConnector(org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector) DiskBalancerCluster(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster) Test(org.junit.Test)

Example 14 with DiskBalancerCluster

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster in project hadoop by apache.

the class TestDataModels method testClusterSerialize.

@Test
public void testClusterSerialize() throws Exception {
    DiskBalancerTestUtil util = new DiskBalancerTestUtil();
    // Create a Cluster with 3 datanodes, 3 disk types and 3 disks in each type
    // that is 9 disks in each machine.
    DiskBalancerCluster cluster = util.createRandCluster(3, new StorageType[] { StorageType.DISK, StorageType.RAM_DISK, StorageType.SSD }, 3);
    DiskBalancerCluster newCluster = DiskBalancerCluster.parseJson(cluster.toJson());
    Assert.assertEquals(cluster.getNodes(), newCluster.getNodes());
    Assert.assertEquals(cluster.getNodes().size(), newCluster.getNodes().size());
}
Also used : DiskBalancerCluster(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster) Test(org.junit.Test)

Example 15 with DiskBalancerCluster

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster in project hadoop by apache.

the class TestPlanner method testPlannerScale.

@Test
public void testPlannerScale() throws Exception {
    // it is rare to see more than 48 disks
    final int diskCount = 256;
    DiskBalancerTestUtil util = new DiskBalancerTestUtil();
    DiskBalancerVolumeSet vSet = util.createRandomVolumeSet(StorageType.SSD, diskCount);
    NullConnector nullConnector = new NullConnector();
    DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
    DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
    int diskNum = 0;
    for (DiskBalancerVolume vol : vSet.getVolumes()) {
        vol.setPath("volume" + diskNum++);
        node.addVolume(vol);
    }
    nullConnector.addNode(node);
    cluster.readClusterInfo();
    GreedyPlanner newPlanner = new GreedyPlanner(01.0f, node);
    NodePlan newPlan = new NodePlan(node.getDataNodeName(), node.getDataNodePort());
    newPlanner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), newPlan);
    // Assuming that our random disks at least generated one step
    assertTrue("No Steps Generated from random disks, very unlikely", newPlan.getVolumeSetPlans().size() > 0);
    assertTrue("Steps Generated less than disk count - false", newPlan.getVolumeSetPlans().size() < diskCount);
    LOG.info("Number of steps are : %d%n", newPlan.getVolumeSetPlans().size());
}
Also used : DiskBalancerVolumeSet(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet) NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) DiskBalancerCluster(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster) NullConnector(org.apache.hadoop.hdfs.server.diskbalancer.connectors.NullConnector) DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume) GreedyPlanner(org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Aggregations

DiskBalancerCluster (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster)22 Test (org.junit.Test)19 DiskBalancerDataNode (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode)13 NodePlan (org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan)13 NullConnector (org.apache.hadoop.hdfs.server.diskbalancer.connectors.NullConnector)12 GreedyPlanner (org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner)11 DiskBalancerVolume (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume)10 ClusterConnector (org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector)8 Step (org.apache.hadoop.hdfs.server.diskbalancer.planner.Step)5 URI (java.net.URI)2 DiskBalancerVolumeSet (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet)2 File (java.io.File)1 Configuration (org.apache.hadoop.conf.Configuration)1 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)1 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)1 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)1 FsDatasetSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi)1