use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster in project hadoop by apache.
the class Command method readClusterInfo.
/**
* Process the URI and return the cluster with nodes setup. This is used in
* all commands.
*
* @param cmd - CommandLine
* @return DiskBalancerCluster
* @throws Exception
*/
protected DiskBalancerCluster readClusterInfo(CommandLine cmd) throws Exception {
Preconditions.checkNotNull(cmd);
setClusterURI(FileSystem.getDefaultUri(getConf()));
LOG.debug("using name node URI : {}", this.getClusterURI());
ClusterConnector connector = ConnectorFactory.getCluster(this.clusterURI, getConf());
cluster = new DiskBalancerCluster(connector);
LOG.debug("Reading cluster info");
cluster.readClusterInfo();
return cluster;
}
use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster in project hadoop by apache.
the class TestDiskBalancerCommand method testGetNodeList.
@Test(timeout = 60000)
public void testGetNodeList() throws Exception {
ClusterConnector jsonConnector = ConnectorFactory.getCluster(clusterJson, conf);
DiskBalancerCluster diskBalancerCluster = new DiskBalancerCluster(jsonConnector);
diskBalancerCluster.readClusterInfo();
int nodeNum = 5;
StringBuilder listArg = new StringBuilder();
for (int i = 0; i < nodeNum; i++) {
listArg.append(diskBalancerCluster.getNodes().get(i).getDataNodeUUID()).append(",");
}
ReportCommand command = new ReportCommand(conf, null);
command.setCluster(diskBalancerCluster);
List<DiskBalancerDataNode> nodeList = command.getNodes(listArg.toString());
assertEquals(nodeNum, nodeList.size());
}
use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster in project hadoop by apache.
the class DiskBalancerTestUtil method createRandCluster.
/**
* Creates a RandomCluster.
*
* @param dataNodeCount - How many nodes you need
* @param diskTypes - StorageTypes you need in each node
* @param diskCount - How many disks you need of each type.
* @return Cluster
* @throws Exception
*/
public DiskBalancerCluster createRandCluster(int dataNodeCount, StorageType[] diskTypes, int diskCount) throws Exception {
Preconditions.checkState(diskTypes.length > 0);
Preconditions.checkState(diskCount > 0);
Preconditions.checkState(dataNodeCount > 0);
NullConnector nullConnector = new NullConnector();
DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
// from the connector.
for (int x = 0; x < dataNodeCount; x++) {
nullConnector.addNode(createRandomDataNode(diskTypes, diskCount));
}
// with this call we have populated the cluster info
cluster.readClusterInfo();
return cluster;
}
use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster in project hadoop by apache.
the class TestDiskBalancer method testDiskBalancerNameNodeConnectivity.
@Test
public void testDiskBalancerNameNodeConnectivity() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
final int numDatanodes = 2;
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
try {
cluster.waitActive();
ClusterConnector nameNodeConnector = ConnectorFactory.getCluster(cluster.getFileSystem(0).getUri(), conf);
DiskBalancerCluster diskBalancerCluster = new DiskBalancerCluster(nameNodeConnector);
diskBalancerCluster.readClusterInfo();
assertEquals(diskBalancerCluster.getNodes().size(), numDatanodes);
DataNode dnNode = cluster.getDataNodes().get(0);
DiskBalancerDataNode dbDnNode = diskBalancerCluster.getNodeByUUID(dnNode.getDatanodeUuid());
assertEquals(dnNode.getDatanodeUuid(), dbDnNode.getDataNodeUUID());
assertEquals(dnNode.getDatanodeId().getIpAddr(), dbDnNode.getDataNodeIP());
assertEquals(dnNode.getDatanodeId().getHostName(), dbDnNode.getDataNodeName());
try (FsDatasetSpi.FsVolumeReferences ref = dnNode.getFSDataset().getFsVolumeReferences()) {
assertEquals(ref.size(), dbDnNode.getVolumeCount());
}
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster in project hadoop by apache.
the class TestPlanner method testGreedyPlannerEqualDisksNoMoves.
@Test
public void testGreedyPlannerEqualDisksNoMoves() throws Exception {
NullConnector nullConnector = new NullConnector();
DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
// All disks have same capacity of data
DiskBalancerVolume volume1 = createVolume("volume1", 100, 30);
DiskBalancerVolume volume2 = createVolume("volume2", 100, 30);
DiskBalancerVolume volume3 = createVolume("volume3", 100, 30);
node.addVolume(volume1);
node.addVolume(volume2);
node.addVolume(volume3);
nullConnector.addNode(node);
cluster.readClusterInfo();
Assert.assertEquals(1, cluster.getNodes().size());
GreedyPlanner planner = new GreedyPlanner(10.0f, node);
NodePlan plan = new NodePlan(node.getDataNodeName(), node.getDataNodePort());
planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
// since we have same size of data in all disks , we should have
// no moves planned.
assertEquals(0, plan.getVolumeSetPlans().size());
}
Aggregations