use of org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector in project hadoop by apache.
the class TestPlanner method testLoadsCorrectClusterConnector.
@Test
public void testLoadsCorrectClusterConnector() throws Exception {
ClusterConnector connector = ConnectorFactory.getCluster(getClass().getResource("/diskBalancer/data-cluster-3node-3disk.json").toURI(), null);
assertEquals(connector.getClass().toString(), "class org.apache.hadoop.hdfs.server.diskbalancer.connectors." + "JsonNodeConnector");
}
use of org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector in project hadoop by apache.
the class TestPlanner method testGreedyPlannerBalanceVolumeSet.
@Test
public void testGreedyPlannerBalanceVolumeSet() throws Exception {
URI clusterJson = getClass().getResource("/diskBalancer/data-cluster-3node-3disk.json").toURI();
ClusterConnector jsonConnector = ConnectorFactory.getCluster(clusterJson, null);
DiskBalancerCluster cluster = new DiskBalancerCluster(jsonConnector);
cluster.readClusterInfo();
Assert.assertEquals(3, cluster.getNodes().size());
cluster.setNodesToProcess(cluster.getNodes());
DiskBalancerDataNode node = cluster.getNodes().get(0);
GreedyPlanner planner = new GreedyPlanner(10.0f, node);
NodePlan plan = new NodePlan(node.getDataNodeName(), node.getDataNodePort());
planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
}
use of org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector in project hadoop by apache.
the class TestConnectors method testJsonConnector.
@Test
public void testJsonConnector() throws Exception {
cluster.waitActive();
ClusterConnector nameNodeConnector = ConnectorFactory.getCluster(cluster.getFileSystem(0).getUri(), conf);
DiskBalancerCluster diskBalancerCluster = new DiskBalancerCluster(nameNodeConnector);
diskBalancerCluster.readClusterInfo();
String diskBalancerJson = diskBalancerCluster.toJson();
DiskBalancerCluster serializedCluster = DiskBalancerCluster.parseJson(diskBalancerJson);
Assert.assertEquals("Parsed cluster is not equal to persisted info.", diskBalancerCluster.getNodes().size(), serializedCluster.getNodes().size());
}
use of org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector in project hadoop by apache.
the class TestConnectors method testNameNodeConnector.
@Test
public void testNameNodeConnector() throws Exception {
cluster.waitActive();
ClusterConnector nameNodeConnector = ConnectorFactory.getCluster(cluster.getFileSystem(0).getUri(), conf);
DiskBalancerCluster diskBalancerCluster = new DiskBalancerCluster(nameNodeConnector);
diskBalancerCluster.readClusterInfo();
Assert.assertEquals("Expected number of Datanodes not found.", numDatanodes, diskBalancerCluster.getNodes().size());
Assert.assertEquals("Expected number of volumes not found.", volumeCount, diskBalancerCluster.getNodes().get(0).getVolumeCount());
}
Aggregations