use of org.apache.hadoop.hdfs.server.datanode.DiskBalancer in project hadoop by apache.
the class TestDiskBalancerWithMockMover method testResubmitDiskBalancerPlan.
/**
* Test a second submit plan fails.
*
* @throws Exception
*/
@Test
public void testResubmitDiskBalancerPlan() throws Exception {
MockMoverHelper mockMoverHelper = new MockMoverHelper().invoke();
NodePlan plan = mockMoverHelper.getPlan();
DiskBalancer balancer = mockMoverHelper.getBalancer();
// ask block mover to get stuck in copy block
mockMoverHelper.getBlockMover().setSleep();
executeSubmitPlan(plan, balancer);
thrown.expect(DiskBalancerException.class);
thrown.expect(new DiskBalancerResultVerifier(DiskBalancerException.Result.PLAN_ALREADY_IN_PROGRESS));
executeSubmitPlan(plan, balancer);
// Not needed but this is the cleanup step.
mockMoverHelper.getBlockMover().clearSleep();
}
use of org.apache.hadoop.hdfs.server.datanode.DiskBalancer in project hadoop by apache.
the class TestDiskBalancerWithMockMover method testSubmitWithInvalidHash.
@Test
public void testSubmitWithInvalidHash() throws Exception {
MockMoverHelper mockMoverHelper = new MockMoverHelper().invoke();
NodePlan plan = mockMoverHelper.getPlan();
DiskBalancer balancer = mockMoverHelper.getBalancer();
String planJson = plan.toJson();
String planID = DigestUtils.shaHex(planJson);
char repChar = planID.charAt(0);
repChar++;
thrown.expect(DiskBalancerException.class);
thrown.expect(new DiskBalancerResultVerifier(DiskBalancerException.Result.INVALID_PLAN_HASH));
balancer.submitPlan(planID.replace(planID.charAt(0), repChar), 1, PLAN_FILE, planJson, false);
}
use of org.apache.hadoop.hdfs.server.datanode.DiskBalancer in project hadoop by apache.
the class TestDiskBalancerWithMockMover method testSubmitWithOldInvalidVersion.
@Test
public void testSubmitWithOldInvalidVersion() throws Exception {
MockMoverHelper mockMoverHelper = new MockMoverHelper().invoke();
NodePlan plan = mockMoverHelper.getPlan();
DiskBalancer balancer = mockMoverHelper.getBalancer();
thrown.expect(DiskBalancerException.class);
thrown.expect(new DiskBalancerResultVerifier(DiskBalancerException.Result.INVALID_PLAN_VERSION));
// Plan version is invalid -- there is no version 0.
executeSubmitPlan(plan, balancer, 0);
}
use of org.apache.hadoop.hdfs.server.datanode.DiskBalancer in project hadoop by apache.
the class TestDiskBalancerWithMockMover method testSubmitDiskBalancerPlan.
@Test
public void testSubmitDiskBalancerPlan() throws Exception {
MockMoverHelper mockMoverHelper = new MockMoverHelper().invoke();
NodePlan plan = mockMoverHelper.getPlan();
final DiskBalancer balancer = mockMoverHelper.getBalancer();
executeSubmitPlan(plan, balancer);
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
try {
return balancer.queryWorkStatus().getResult() == DiskBalancerWorkStatus.Result.PLAN_DONE;
} catch (IOException ex) {
return false;
}
}
}, 1000, 100000);
// Asserts that submit plan caused an execution in the background.
assertTrue(mockMoverHelper.getBlockMover().getRunCount() == 1);
}
use of org.apache.hadoop.hdfs.server.datanode.DiskBalancer in project hadoop by apache.
the class TestDiskBalancerWithMockMover method testDiskBalancerDisabled.
/**
* Checks that we return the right error if diskbalancer is not enabled.
*/
@Test
public void testDiskBalancerDisabled() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, false);
restartDataNode();
TestMover blockMover = new TestMover(cluster.getDataNodes().get(0).getFSDataset());
DiskBalancer balancer = new DiskBalancerBuilder(conf).setMover(blockMover).build();
thrown.expect(DiskBalancerException.class);
thrown.expect(new DiskBalancerResultVerifier(DiskBalancerException.Result.DISK_BALANCER_NOT_ENABLED));
balancer.queryWorkStatus();
}
Aggregations