use of org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector in project hadoop by apache.
the class Command method readClusterInfo.
/**
* Process the URI and return the cluster with nodes setup. This is used in
* all commands.
*
* @param cmd - CommandLine
* @return DiskBalancerCluster
* @throws Exception
*/
protected DiskBalancerCluster readClusterInfo(CommandLine cmd) throws Exception {
Preconditions.checkNotNull(cmd);
setClusterURI(FileSystem.getDefaultUri(getConf()));
LOG.debug("using name node URI : {}", this.getClusterURI());
ClusterConnector connector = ConnectorFactory.getCluster(this.clusterURI, getConf());
cluster = new DiskBalancerCluster(connector);
LOG.debug("Reading cluster info");
cluster.readClusterInfo();
return cluster;
}
use of org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector in project hadoop by apache.
the class TestDiskBalancerCommand method testGetNodeList.
@Test(timeout = 60000)
public void testGetNodeList() throws Exception {
ClusterConnector jsonConnector = ConnectorFactory.getCluster(clusterJson, conf);
DiskBalancerCluster diskBalancerCluster = new DiskBalancerCluster(jsonConnector);
diskBalancerCluster.readClusterInfo();
int nodeNum = 5;
StringBuilder listArg = new StringBuilder();
for (int i = 0; i < nodeNum; i++) {
listArg.append(diskBalancerCluster.getNodes().get(i).getDataNodeUUID()).append(",");
}
ReportCommand command = new ReportCommand(conf, null);
command.setCluster(diskBalancerCluster);
List<DiskBalancerDataNode> nodeList = command.getNodes(listArg.toString());
assertEquals(nodeNum, nodeList.size());
}
use of org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector in project hadoop by apache.
the class TestDiskBalancer method testDiskBalancerNameNodeConnectivity.
@Test
public void testDiskBalancerNameNodeConnectivity() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
final int numDatanodes = 2;
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
try {
cluster.waitActive();
ClusterConnector nameNodeConnector = ConnectorFactory.getCluster(cluster.getFileSystem(0).getUri(), conf);
DiskBalancerCluster diskBalancerCluster = new DiskBalancerCluster(nameNodeConnector);
diskBalancerCluster.readClusterInfo();
assertEquals(diskBalancerCluster.getNodes().size(), numDatanodes);
DataNode dnNode = cluster.getDataNodes().get(0);
DiskBalancerDataNode dbDnNode = diskBalancerCluster.getNodeByUUID(dnNode.getDatanodeUuid());
assertEquals(dnNode.getDatanodeUuid(), dbDnNode.getDataNodeUUID());
assertEquals(dnNode.getDatanodeId().getIpAddr(), dbDnNode.getDataNodeIP());
assertEquals(dnNode.getDatanodeId().getHostName(), dbDnNode.getDataNodeName());
try (FsDatasetSpi.FsVolumeReferences ref = dnNode.getFSDataset().getFsVolumeReferences()) {
assertEquals(ref.size(), dbDnNode.getVolumeCount());
}
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector in project hadoop by apache.
the class TestPlanner method testGreedyPlannerComputePlan.
@Test
public void testGreedyPlannerComputePlan() throws Exception {
URI clusterJson = getClass().getResource("/diskBalancer/data-cluster-3node-3disk.json").toURI();
ClusterConnector jsonConnector = ConnectorFactory.getCluster(clusterJson, null);
DiskBalancerCluster cluster = new DiskBalancerCluster(jsonConnector);
cluster.readClusterInfo();
Assert.assertEquals(3, cluster.getNodes().size());
cluster.setNodesToProcess(cluster.getNodes());
List<NodePlan> plan = cluster.computePlan(10.0f);
Assert.assertNotNull(plan);
}
use of org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector in project hadoop by apache.
the class TestDiskBalancerCommand method testReadClusterFromJson.
@Test(timeout = 60000)
public void testReadClusterFromJson() throws Exception {
ClusterConnector jsonConnector = ConnectorFactory.getCluster(clusterJson, conf);
DiskBalancerCluster diskBalancerCluster = new DiskBalancerCluster(jsonConnector);
diskBalancerCluster.readClusterInfo();
assertEquals(64, diskBalancerCluster.getNodes().size());
}
Aggregations