use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster in project hadoop by apache.
the class JsonNodeConnector method getNodes.
/**
* getNodes function connects to a cluster definition file
* and returns nodes defined in that file.
*
* @return Array of DiskBalancerDataNodes
*/
@Override
public List<DiskBalancerDataNode> getNodes() throws Exception {
Preconditions.checkNotNull(this.clusterURI);
String dataFilePath = this.clusterURI.getPath();
LOG.info("Reading cluster info from file : " + dataFilePath);
DiskBalancerCluster cluster = READER.readValue(new File(dataFilePath));
String message = String.format("Found %d node(s)", cluster.getNodes().size());
LOG.info(message);
return cluster.getNodes();
}
use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster in project hadoop by apache.
the class TestConnectors method testJsonConnector.
@Test
public void testJsonConnector() throws Exception {
cluster.waitActive();
ClusterConnector nameNodeConnector = ConnectorFactory.getCluster(cluster.getFileSystem(0).getUri(), conf);
DiskBalancerCluster diskBalancerCluster = new DiskBalancerCluster(nameNodeConnector);
diskBalancerCluster.readClusterInfo();
String diskBalancerJson = diskBalancerCluster.toJson();
DiskBalancerCluster serializedCluster = DiskBalancerCluster.parseJson(diskBalancerJson);
Assert.assertEquals("Parsed cluster is not equal to persisted info.", diskBalancerCluster.getNodes().size(), serializedCluster.getNodes().size());
}
use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster in project hadoop by apache.
the class TestConnectors method testNameNodeConnector.
@Test
public void testNameNodeConnector() throws Exception {
cluster.waitActive();
ClusterConnector nameNodeConnector = ConnectorFactory.getCluster(cluster.getFileSystem(0).getUri(), conf);
DiskBalancerCluster diskBalancerCluster = new DiskBalancerCluster(nameNodeConnector);
diskBalancerCluster.readClusterInfo();
Assert.assertEquals("Expected number of Datanodes not found.", numDatanodes, diskBalancerCluster.getNodes().size());
Assert.assertEquals("Expected number of volumes not found.", volumeCount, diskBalancerCluster.getNodes().get(0).getVolumeCount());
}
use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster in project hadoop by apache.
the class TestDataModels method testClusterSerialize.
@Test
public void testClusterSerialize() throws Exception {
DiskBalancerTestUtil util = new DiskBalancerTestUtil();
// Create a Cluster with 3 datanodes, 3 disk types and 3 disks in each type
// that is 9 disks in each machine.
DiskBalancerCluster cluster = util.createRandCluster(3, new StorageType[] { StorageType.DISK, StorageType.RAM_DISK, StorageType.SSD }, 3);
DiskBalancerCluster newCluster = DiskBalancerCluster.parseJson(cluster.toJson());
Assert.assertEquals(cluster.getNodes(), newCluster.getNodes());
Assert.assertEquals(cluster.getNodes().size(), newCluster.getNodes().size());
}
use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster in project hadoop by apache.
the class TestPlanner method testPlannerScale.
@Test
public void testPlannerScale() throws Exception {
// it is rare to see more than 48 disks
final int diskCount = 256;
DiskBalancerTestUtil util = new DiskBalancerTestUtil();
DiskBalancerVolumeSet vSet = util.createRandomVolumeSet(StorageType.SSD, diskCount);
NullConnector nullConnector = new NullConnector();
DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
int diskNum = 0;
for (DiskBalancerVolume vol : vSet.getVolumes()) {
vol.setPath("volume" + diskNum++);
node.addVolume(vol);
}
nullConnector.addNode(node);
cluster.readClusterInfo();
GreedyPlanner newPlanner = new GreedyPlanner(01.0f, node);
NodePlan newPlan = new NodePlan(node.getDataNodeName(), node.getDataNodePort());
newPlanner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), newPlan);
// Assuming that our random disks at least generated one step
assertTrue("No Steps Generated from random disks, very unlikely", newPlan.getVolumeSetPlans().size() > 0);
assertTrue("Steps Generated less than disk count - false", newPlan.getVolumeSetPlans().size() < diskCount);
LOG.info("Number of steps are : %d%n", newPlan.getVolumeSetPlans().size());
}
Aggregations