use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster in project hadoop by apache.
the class TestPlanner method testGreedyPlannerComputePlan.
@Test
public void testGreedyPlannerComputePlan() throws Exception {
URI clusterJson = getClass().getResource("/diskBalancer/data-cluster-3node-3disk.json").toURI();
ClusterConnector jsonConnector = ConnectorFactory.getCluster(clusterJson, null);
DiskBalancerCluster cluster = new DiskBalancerCluster(jsonConnector);
cluster.readClusterInfo();
Assert.assertEquals(3, cluster.getNodes().size());
cluster.setNodesToProcess(cluster.getNodes());
List<NodePlan> plan = cluster.computePlan(10.0f);
Assert.assertNotNull(plan);
}
use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster in project hadoop by apache.
the class TestPlanner method testNodePlanSerialize.
@Test
public void testNodePlanSerialize() throws Exception {
final int diskCount = 12;
DiskBalancerTestUtil util = new DiskBalancerTestUtil();
DiskBalancerVolumeSet vSet = util.createRandomVolumeSet(StorageType.SSD, diskCount);
NullConnector nullConnector = new NullConnector();
DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
int diskNum = 0;
for (DiskBalancerVolume vol : vSet.getVolumes()) {
vol.setPath("volume" + diskNum++);
node.addVolume(vol);
}
nullConnector.addNode(node);
cluster.readClusterInfo();
GreedyPlanner newPlanner = new GreedyPlanner(01.0f, node);
NodePlan newPlan = new NodePlan(node.getDataNodeName(), node.getDataNodePort());
newPlanner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), newPlan);
String planString = newPlan.toJson();
assertNotNull(planString);
NodePlan copy = NodePlan.parseJson(planString);
assertNotNull(copy);
assertEquals(newPlan.getVolumeSetPlans().size(), copy.getVolumeSetPlans().size());
}
use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster in project hadoop by apache.
the class TestPlanner method testGreedyPlannerOneVolumeNoPlanTest.
@Test
public void testGreedyPlannerOneVolumeNoPlanTest() throws Exception {
NullConnector nullConnector = new NullConnector();
DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
DiskBalancerVolume volume30 = createVolume("volume30", 100, 30);
node.addVolume(volume30);
nullConnector.addNode(node);
cluster.readClusterInfo();
Assert.assertEquals(1, cluster.getNodes().size());
GreedyPlanner planner = new GreedyPlanner(10.0f, node);
NodePlan plan = new NodePlan(node.getDataNodeName(), node.getDataNodePort());
planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
// With a single volume we should not have any plans for moves.
assertEquals(0, plan.getVolumeSetPlans().size());
}
use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster in project hadoop by apache.
the class TestPlanner method testGreedyPlannerTwoVolume.
@Test
public void testGreedyPlannerTwoVolume() throws Exception {
NullConnector nullConnector = new NullConnector();
DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
DiskBalancerVolume volume30 = createVolume("volume30", 100, 30);
DiskBalancerVolume volume10 = createVolume("volume10", 100, 10);
node.addVolume(volume10);
node.addVolume(volume30);
nullConnector.addNode(node);
cluster.readClusterInfo();
Assert.assertEquals(1, cluster.getNodes().size());
GreedyPlanner planner = new GreedyPlanner(5.0f, node);
NodePlan plan = new NodePlan(node.getDataNodeUUID(), node.getDataNodePort());
planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
// We should have only one planned move from
// volume30 to volume10 of 10 GB Size.
assertEquals(1, plan.getVolumeSetPlans().size());
Step step = plan.getVolumeSetPlans().get(0);
assertEquals("volume30", step.getSourceVolume().getPath());
assertEquals("volume10", step.getDestinationVolume().getPath());
assertEquals("10 G", step.getSizeString(step.getBytesToMove()));
}
use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster in project hadoop by apache.
the class TestDiskBalancerCommand method testReadClusterFromJson.
@Test(timeout = 60000)
public void testReadClusterFromJson() throws Exception {
ClusterConnector jsonConnector = ConnectorFactory.getCluster(clusterJson, conf);
DiskBalancerCluster diskBalancerCluster = new DiskBalancerCluster(jsonConnector);
diskBalancerCluster.readClusterInfo();
assertEquals(64, diskBalancerCluster.getNodes().size());
}
Aggregations