use of org.apache.hadoop.hdfs.server.diskbalancer.connectors.NullConnector in project hadoop by apache.
the class TestPlanner method testGreedyPlannerNoVolumeTest.
@Test
public void testGreedyPlannerNoVolumeTest() throws Exception {
NullConnector nullConnector = new NullConnector();
DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
List<NodePlan> planList = cluster.computePlan(10.0f);
assertNotNull(planList);
}
use of org.apache.hadoop.hdfs.server.diskbalancer.connectors.NullConnector in project hadoop by apache.
the class TestPlanner method testGreedyPlannerMoveFromSingleDisk.
@Test
public void testGreedyPlannerMoveFromSingleDisk() throws Exception {
NullConnector nullConnector = new NullConnector();
DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
// All disks have same capacity of data
DiskBalancerVolume volume1 = createVolume("volume100", 200, 100);
DiskBalancerVolume volume2 = createVolume("volume0-1", 200, 0);
DiskBalancerVolume volume3 = createVolume("volume0-2", 200, 0);
node.addVolume(volume1);
node.addVolume(volume2);
node.addVolume(volume3);
nullConnector.addNode(node);
cluster.readClusterInfo();
Assert.assertEquals(1, cluster.getNodes().size());
GreedyPlanner planner = new GreedyPlanner(10.0f, node);
NodePlan plan = new NodePlan(node.getDataNodeName(), node.getDataNodePort());
planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
// We should see 2 move plans. One from volume100 to volume0-1
// and another from volume100 to volume0-2
assertEquals(2, plan.getVolumeSetPlans().size());
Step step = plan.getVolumeSetPlans().get(0);
assertEquals("volume100", step.getSourceVolume().getPath());
assertTrue(step.getSizeString(step.getBytesToMove()).matches("33.[2|3|4] G"));
step = plan.getVolumeSetPlans().get(1);
assertEquals("volume100", step.getSourceVolume().getPath());
assertTrue(step.getSizeString(step.getBytesToMove()).matches("33.[2|3|4] G"));
}
Aggregations