use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.
the class TestDiskBalancerRPC method testCancelEmptyPlan.
@Test
public void testCancelEmptyPlan() throws Exception {
RpcTestHelper rpcTestHelper = new RpcTestHelper().invoke();
DataNode dataNode = rpcTestHelper.getDataNode();
String planHash = "";
NodePlan plan = rpcTestHelper.getPlan();
thrown.expect(DiskBalancerException.class);
thrown.expect(new DiskBalancerResultVerifier(Result.NO_SUCH_PLAN));
dataNode.cancelDiskBalancePlan(planHash);
}
use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.
the class TestDiskBalancerWithMockMover method testSubmitWithInvalidHash.
@Test
public void testSubmitWithInvalidHash() throws Exception {
MockMoverHelper mockMoverHelper = new MockMoverHelper().invoke();
NodePlan plan = mockMoverHelper.getPlan();
DiskBalancer balancer = mockMoverHelper.getBalancer();
String planJson = plan.toJson();
String planID = DigestUtils.shaHex(planJson);
char repChar = planID.charAt(0);
repChar++;
thrown.expect(DiskBalancerException.class);
thrown.expect(new DiskBalancerResultVerifier(DiskBalancerException.Result.INVALID_PLAN_HASH));
balancer.submitPlan(planID.replace(planID.charAt(0), repChar), 1, PLAN_FILE, planJson, false);
}
use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.
the class TestDiskBalancerWithMockMover method testSubmitWithOldInvalidVersion.
@Test
public void testSubmitWithOldInvalidVersion() throws Exception {
MockMoverHelper mockMoverHelper = new MockMoverHelper().invoke();
NodePlan plan = mockMoverHelper.getPlan();
DiskBalancer balancer = mockMoverHelper.getBalancer();
thrown.expect(DiskBalancerException.class);
thrown.expect(new DiskBalancerResultVerifier(DiskBalancerException.Result.INVALID_PLAN_VERSION));
// Plan version is invalid -- there is no version 0.
executeSubmitPlan(plan, balancer, 0);
}
use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.
the class TestDiskBalancerWithMockMover method testSubmitDiskBalancerPlan.
@Test
public void testSubmitDiskBalancerPlan() throws Exception {
MockMoverHelper mockMoverHelper = new MockMoverHelper().invoke();
NodePlan plan = mockMoverHelper.getPlan();
final DiskBalancer balancer = mockMoverHelper.getBalancer();
executeSubmitPlan(plan, balancer);
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
try {
return balancer.queryWorkStatus().getResult() == DiskBalancerWorkStatus.Result.PLAN_DONE;
} catch (IOException ex) {
return false;
}
}
}, 1000, 100000);
// Asserts that submit plan caused an execution in the background.
assertTrue(mockMoverHelper.getBlockMover().getRunCount() == 1);
}
use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.
the class TestPlanner method testPlannerScale.
@Test
public void testPlannerScale() throws Exception {
// it is rare to see more than 48 disks
final int diskCount = 256;
DiskBalancerTestUtil util = new DiskBalancerTestUtil();
DiskBalancerVolumeSet vSet = util.createRandomVolumeSet(StorageType.SSD, diskCount);
NullConnector nullConnector = new NullConnector();
DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
int diskNum = 0;
for (DiskBalancerVolume vol : vSet.getVolumes()) {
vol.setPath("volume" + diskNum++);
node.addVolume(vol);
}
nullConnector.addNode(node);
cluster.readClusterInfo();
GreedyPlanner newPlanner = new GreedyPlanner(01.0f, node);
NodePlan newPlan = new NodePlan(node.getDataNodeName(), node.getDataNodePort());
newPlanner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), newPlan);
// Assuming that our random disks at least generated one step
assertTrue("No Steps Generated from random disks, very unlikely", newPlan.getVolumeSetPlans().size() > 0);
assertTrue("Steps Generated less than disk count - false", newPlan.getVolumeSetPlans().size() < diskCount);
LOG.info("Number of steps are : %d%n", newPlan.getVolumeSetPlans().size());
}
Aggregations