use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.
the class TestDiskBalancerWithMockMover method testCustomBandwidth.
/**
* Test Custom bandwidth.
*
* @throws Exception
*/
@Test
public void testCustomBandwidth() throws Exception {
MockMoverHelper mockMoverHelper = new MockMoverHelper().invoke();
NodePlan plan = mockMoverHelper.getPlan();
DiskBalancer balancer = mockMoverHelper.getBalancer();
for (Step step : plan.getVolumeSetPlans()) {
MoveStep tempStep = (MoveStep) step;
tempStep.setBandwidth(100);
}
executeSubmitPlan(plan, balancer);
DiskBalancerWorkStatus status = balancer.queryWorkStatus();
assertNotNull(status);
DiskBalancerWorkStatus.DiskBalancerWorkEntry entry = balancer.queryWorkStatus().getCurrentState().get(0);
assertEquals(100L, entry.getWorkItem().getBandwidth());
}
use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.
the class TestDiskBalancerWithMockMover method testSubmitWithNullPlan.
@Test
public void testSubmitWithNullPlan() throws Exception {
MockMoverHelper mockMoverHelper = new MockMoverHelper().invoke();
NodePlan plan = mockMoverHelper.getPlan();
DiskBalancer balancer = mockMoverHelper.getBalancer();
String planJson = plan.toJson();
String planID = DigestUtils.shaHex(planJson);
thrown.expect(DiskBalancerException.class);
thrown.expect(new DiskBalancerResultVerifier(DiskBalancerException.Result.INVALID_PLAN));
balancer.submitPlan(planID, 1, "no-plan-file.json", null, false);
}
use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.
the class TestDiskBalancerWithMockMover method testResubmitDiskBalancerPlan.
/**
* Test a second submit plan fails.
*
* @throws Exception
*/
@Test
public void testResubmitDiskBalancerPlan() throws Exception {
MockMoverHelper mockMoverHelper = new MockMoverHelper().invoke();
NodePlan plan = mockMoverHelper.getPlan();
DiskBalancer balancer = mockMoverHelper.getBalancer();
// ask block mover to get stuck in copy block
mockMoverHelper.getBlockMover().setSleep();
executeSubmitPlan(plan, balancer);
thrown.expect(DiskBalancerException.class);
thrown.expect(new DiskBalancerResultVerifier(DiskBalancerException.Result.PLAN_ALREADY_IN_PROGRESS));
executeSubmitPlan(plan, balancer);
// Not needed but this is the cleanup step.
mockMoverHelper.getBlockMover().clearSleep();
}
use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.
the class TestPlanner method testGreedyPlannerEqualDisksNoMoves.
@Test
public void testGreedyPlannerEqualDisksNoMoves() throws Exception {
NullConnector nullConnector = new NullConnector();
DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
// All disks have same capacity of data
DiskBalancerVolume volume1 = createVolume("volume1", 100, 30);
DiskBalancerVolume volume2 = createVolume("volume2", 100, 30);
DiskBalancerVolume volume3 = createVolume("volume3", 100, 30);
node.addVolume(volume1);
node.addVolume(volume2);
node.addVolume(volume3);
nullConnector.addNode(node);
cluster.readClusterInfo();
Assert.assertEquals(1, cluster.getNodes().size());
GreedyPlanner planner = new GreedyPlanner(10.0f, node);
NodePlan plan = new NodePlan(node.getDataNodeName(), node.getDataNodePort());
planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
// since we have same size of data in all disks , we should have
// no moves planned.
assertEquals(0, plan.getVolumeSetPlans().size());
}
use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.
the class TestPlanner method testGreedyPlannerComputePlan.
@Test
public void testGreedyPlannerComputePlan() throws Exception {
URI clusterJson = getClass().getResource("/diskBalancer/data-cluster-3node-3disk.json").toURI();
ClusterConnector jsonConnector = ConnectorFactory.getCluster(clusterJson, null);
DiskBalancerCluster cluster = new DiskBalancerCluster(jsonConnector);
cluster.readClusterInfo();
Assert.assertEquals(3, cluster.getNodes().size());
cluster.setNodesToProcess(cluster.getNodes());
List<NodePlan> plan = cluster.computePlan(10.0f);
Assert.assertNotNull(plan);
}
Aggregations