Search in sources :

Example 1 with DiskBalancerCluster

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster in project hadoop by apache.

the class Command method readClusterInfo.

/**
   * Process the URI and return the cluster with nodes setup. This is used in
   * all commands.
   *
   * @param cmd - CommandLine
   * @return DiskBalancerCluster
   * @throws Exception
   */
protected DiskBalancerCluster readClusterInfo(CommandLine cmd) throws Exception {
    Preconditions.checkNotNull(cmd);
    setClusterURI(FileSystem.getDefaultUri(getConf()));
    LOG.debug("using name node URI : {}", this.getClusterURI());
    ClusterConnector connector = ConnectorFactory.getCluster(this.clusterURI, getConf());
    cluster = new DiskBalancerCluster(connector);
    LOG.debug("Reading cluster info");
    cluster.readClusterInfo();
    return cluster;
}
Also used : ClusterConnector(org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector) DiskBalancerCluster(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster)

Example 2 with DiskBalancerCluster

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster in project hadoop by apache.

the class TestDiskBalancerCommand method testGetNodeList.

@Test(timeout = 60000)
public void testGetNodeList() throws Exception {
    ClusterConnector jsonConnector = ConnectorFactory.getCluster(clusterJson, conf);
    DiskBalancerCluster diskBalancerCluster = new DiskBalancerCluster(jsonConnector);
    diskBalancerCluster.readClusterInfo();
    int nodeNum = 5;
    StringBuilder listArg = new StringBuilder();
    for (int i = 0; i < nodeNum; i++) {
        listArg.append(diskBalancerCluster.getNodes().get(i).getDataNodeUUID()).append(",");
    }
    ReportCommand command = new ReportCommand(conf, null);
    command.setCluster(diskBalancerCluster);
    List<DiskBalancerDataNode> nodeList = command.getNodes(listArg.toString());
    assertEquals(nodeNum, nodeList.size());
}
Also used : ClusterConnector(org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector) DiskBalancerCluster(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Example 3 with DiskBalancerCluster

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster in project hadoop by apache.

the class DiskBalancerTestUtil method createRandCluster.

/**
   * Creates a RandomCluster.
   *
   * @param dataNodeCount - How many nodes you need
   * @param diskTypes     - StorageTypes you need in each node
   * @param diskCount     - How many disks you need of each type.
   * @return Cluster
   * @throws Exception
   */
public DiskBalancerCluster createRandCluster(int dataNodeCount, StorageType[] diskTypes, int diskCount) throws Exception {
    Preconditions.checkState(diskTypes.length > 0);
    Preconditions.checkState(diskCount > 0);
    Preconditions.checkState(dataNodeCount > 0);
    NullConnector nullConnector = new NullConnector();
    DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
    // from the connector.
    for (int x = 0; x < dataNodeCount; x++) {
        nullConnector.addNode(createRandomDataNode(diskTypes, diskCount));
    }
    // with this call we have populated the cluster info
    cluster.readClusterInfo();
    return cluster;
}
Also used : DiskBalancerCluster(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster) NullConnector(org.apache.hadoop.hdfs.server.diskbalancer.connectors.NullConnector)

Example 4 with DiskBalancerCluster

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster in project hadoop by apache.

the class TestDiskBalancer method testDiskBalancerNameNodeConnectivity.

@Test
public void testDiskBalancerNameNodeConnectivity() throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
    final int numDatanodes = 2;
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
    try {
        cluster.waitActive();
        ClusterConnector nameNodeConnector = ConnectorFactory.getCluster(cluster.getFileSystem(0).getUri(), conf);
        DiskBalancerCluster diskBalancerCluster = new DiskBalancerCluster(nameNodeConnector);
        diskBalancerCluster.readClusterInfo();
        assertEquals(diskBalancerCluster.getNodes().size(), numDatanodes);
        DataNode dnNode = cluster.getDataNodes().get(0);
        DiskBalancerDataNode dbDnNode = diskBalancerCluster.getNodeByUUID(dnNode.getDatanodeUuid());
        assertEquals(dnNode.getDatanodeUuid(), dbDnNode.getDataNodeUUID());
        assertEquals(dnNode.getDatanodeId().getIpAddr(), dbDnNode.getDataNodeIP());
        assertEquals(dnNode.getDatanodeId().getHostName(), dbDnNode.getDataNodeName());
        try (FsDatasetSpi.FsVolumeReferences ref = dnNode.getFSDataset().getFsVolumeReferences()) {
            assertEquals(ref.size(), dbDnNode.getVolumeCount());
        }
    } finally {
        cluster.shutdown();
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) ClusterConnector(org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector) DiskBalancerCluster(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Example 5 with DiskBalancerCluster

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster in project hadoop by apache.

the class TestPlanner method testGreedyPlannerEqualDisksNoMoves.

@Test
public void testGreedyPlannerEqualDisksNoMoves() throws Exception {
    NullConnector nullConnector = new NullConnector();
    DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
    DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
    // All disks have same capacity of data
    DiskBalancerVolume volume1 = createVolume("volume1", 100, 30);
    DiskBalancerVolume volume2 = createVolume("volume2", 100, 30);
    DiskBalancerVolume volume3 = createVolume("volume3", 100, 30);
    node.addVolume(volume1);
    node.addVolume(volume2);
    node.addVolume(volume3);
    nullConnector.addNode(node);
    cluster.readClusterInfo();
    Assert.assertEquals(1, cluster.getNodes().size());
    GreedyPlanner planner = new GreedyPlanner(10.0f, node);
    NodePlan plan = new NodePlan(node.getDataNodeName(), node.getDataNodePort());
    planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
    // since we have same size of data in all disks , we should have
    // no moves planned.
    assertEquals(0, plan.getVolumeSetPlans().size());
}
Also used : NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) DiskBalancerCluster(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster) NullConnector(org.apache.hadoop.hdfs.server.diskbalancer.connectors.NullConnector) DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume) GreedyPlanner(org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Aggregations

DiskBalancerCluster (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster)22 Test (org.junit.Test)19 DiskBalancerDataNode (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode)13 NodePlan (org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan)13 NullConnector (org.apache.hadoop.hdfs.server.diskbalancer.connectors.NullConnector)12 GreedyPlanner (org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner)11 DiskBalancerVolume (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume)10 ClusterConnector (org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector)8 Step (org.apache.hadoop.hdfs.server.diskbalancer.planner.Step)5 URI (java.net.URI)2 DiskBalancerVolumeSet (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet)2 File (java.io.File)1 Configuration (org.apache.hadoop.conf.Configuration)1 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)1 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)1 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)1 FsDatasetSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi)1