use of org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus in project hadoop by apache.
the class TestDiskBalancerRPC method testQueryPlanWithoutSubmit.
@Test
public void testQueryPlanWithoutSubmit() throws Exception {
RpcTestHelper rpcTestHelper = new RpcTestHelper().invoke();
DataNode dataNode = rpcTestHelper.getDataNode();
DiskBalancerWorkStatus status = dataNode.queryDiskBalancerPlan();
Assert.assertTrue(status.getResult() == NO_PLAN);
}
use of org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus in project hadoop by apache.
the class TestDiskBalancerRPC method testQueryPlan.
@Test
public void testQueryPlan() throws Exception {
RpcTestHelper rpcTestHelper = new RpcTestHelper().invoke();
DataNode dataNode = rpcTestHelper.getDataNode();
String planHash = rpcTestHelper.getPlanHash();
int planVersion = rpcTestHelper.getPlanVersion();
NodePlan plan = rpcTestHelper.getPlan();
dataNode.submitDiskBalancerPlan(planHash, planVersion, PLAN_FILE, plan.toJson(), false);
DiskBalancerWorkStatus status = dataNode.queryDiskBalancerPlan();
Assert.assertTrue(status.getResult() == PLAN_UNDER_PROGRESS || status.getResult() == PLAN_DONE);
}
use of org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus in project hadoop by apache.
the class TestDiskBalancerWithMockMover method testCancelDiskBalancerPlan.
/**
* Test Cancel Plan.
*
* @throws Exception
*/
@Test
public void testCancelDiskBalancerPlan() throws Exception {
MockMoverHelper mockMoverHelper = new MockMoverHelper().invoke();
NodePlan plan = mockMoverHelper.getPlan();
DiskBalancer balancer = mockMoverHelper.getBalancer();
// ask block mover to delay execution
mockMoverHelper.getBlockMover().setSleep();
executeSubmitPlan(plan, balancer);
String planJson = plan.toJson();
String planID = DigestUtils.shaHex(planJson);
balancer.cancelPlan(planID);
DiskBalancerWorkStatus status = balancer.queryWorkStatus();
assertEquals(DiskBalancerWorkStatus.Result.PLAN_CANCELLED, status.getResult());
executeSubmitPlan(plan, balancer);
// Send a Wrong cancellation request.
char first = planID.charAt(0);
first++;
thrown.expect(DiskBalancerException.class);
thrown.expect(new DiskBalancerResultVerifier(DiskBalancerException.Result.NO_SUCH_PLAN));
balancer.cancelPlan(planID.replace(planID.charAt(0), first));
// Now cancel the real one
balancer.cancelPlan(planID);
// unblock mover.
mockMoverHelper.getBlockMover().clearSleep();
status = balancer.queryWorkStatus();
assertEquals(DiskBalancerWorkStatus.Result.PLAN_CANCELLED, status.getResult());
}
use of org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus in project hadoop by apache.
the class TestDiskBalancerWithMockMover method testDiskBalancerEnabled.
/**
* Checks that Enable flag works correctly.
*
* @throws DiskBalancerException
*/
@Test
public void testDiskBalancerEnabled() throws DiskBalancerException {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
TestMover blockMover = new TestMover(cluster.getDataNodes().get(0).getFSDataset());
DiskBalancer balancer = new DiskBalancerBuilder(conf).setMover(blockMover).build();
DiskBalancerWorkStatus status = balancer.queryWorkStatus();
assertEquals(NO_PLAN, status.getResult());
}
use of org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus in project hadoop by apache.
the class TestDiskBalancerWithMockMover method testCustomBandwidth.
/**
* Test Custom bandwidth.
*
* @throws Exception
*/
@Test
public void testCustomBandwidth() throws Exception {
MockMoverHelper mockMoverHelper = new MockMoverHelper().invoke();
NodePlan plan = mockMoverHelper.getPlan();
DiskBalancer balancer = mockMoverHelper.getBalancer();
for (Step step : plan.getVolumeSetPlans()) {
MoveStep tempStep = (MoveStep) step;
tempStep.setBandwidth(100);
}
executeSubmitPlan(plan, balancer);
DiskBalancerWorkStatus status = balancer.queryWorkStatus();
assertNotNull(status);
DiskBalancerWorkStatus.DiskBalancerWorkEntry entry = balancer.queryWorkStatus().getCurrentState().get(0);
assertEquals(100L, entry.getWorkItem().getBandwidth());
}
Aggregations