use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.
the class TestPlanner method testNodePlanSerialize.
@Test
public void testNodePlanSerialize() throws Exception {
final int diskCount = 12;
DiskBalancerTestUtil util = new DiskBalancerTestUtil();
DiskBalancerVolumeSet vSet = util.createRandomVolumeSet(StorageType.SSD, diskCount);
NullConnector nullConnector = new NullConnector();
DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
int diskNum = 0;
for (DiskBalancerVolume vol : vSet.getVolumes()) {
vol.setPath("volume" + diskNum++);
node.addVolume(vol);
}
nullConnector.addNode(node);
cluster.readClusterInfo();
GreedyPlanner newPlanner = new GreedyPlanner(01.0f, node);
NodePlan newPlan = new NodePlan(node.getDataNodeName(), node.getDataNodePort());
newPlanner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), newPlan);
String planString = newPlan.toJson();
assertNotNull(planString);
NodePlan copy = NodePlan.parseJson(planString);
assertNotNull(copy);
assertEquals(newPlan.getVolumeSetPlans().size(), copy.getVolumeSetPlans().size());
}
use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.
the class TestPlanner method testGreedyPlannerOneVolumeNoPlanTest.
@Test
public void testGreedyPlannerOneVolumeNoPlanTest() throws Exception {
NullConnector nullConnector = new NullConnector();
DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
DiskBalancerVolume volume30 = createVolume("volume30", 100, 30);
node.addVolume(volume30);
nullConnector.addNode(node);
cluster.readClusterInfo();
Assert.assertEquals(1, cluster.getNodes().size());
GreedyPlanner planner = new GreedyPlanner(10.0f, node);
NodePlan plan = new NodePlan(node.getDataNodeName(), node.getDataNodePort());
planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
// With a single volume we should not have any plans for moves.
assertEquals(0, plan.getVolumeSetPlans().size());
}
use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.
the class TestPlanner method testGreedyPlannerTwoVolume.
@Test
public void testGreedyPlannerTwoVolume() throws Exception {
NullConnector nullConnector = new NullConnector();
DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
DiskBalancerVolume volume30 = createVolume("volume30", 100, 30);
DiskBalancerVolume volume10 = createVolume("volume10", 100, 10);
node.addVolume(volume10);
node.addVolume(volume30);
nullConnector.addNode(node);
cluster.readClusterInfo();
Assert.assertEquals(1, cluster.getNodes().size());
GreedyPlanner planner = new GreedyPlanner(5.0f, node);
NodePlan plan = new NodePlan(node.getDataNodeUUID(), node.getDataNodePort());
planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
// We should have only one planned move from
// volume30 to volume10 of 10 GB Size.
assertEquals(1, plan.getVolumeSetPlans().size());
Step step = plan.getVolumeSetPlans().get(0);
assertEquals("volume30", step.getSourceVolume().getPath());
assertEquals("volume10", step.getDestinationVolume().getPath());
assertEquals("10 G", step.getSizeString(step.getBytesToMove()));
}
use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.
the class DiskBalancer method verifyPlan.
/**
* Verifies that user provided plan is valid.
*
* @param planID - SHA-1 of the plan.
* @param planVersion - Version of the plan, for future use.
* @param plan - Plan String in Json.
* @param force - Skip verifying when the plan was generated.
* @return a NodePlan Object.
* @throws DiskBalancerException
*/
private NodePlan verifyPlan(String planID, long planVersion, String plan, boolean force) throws DiskBalancerException {
Preconditions.checkState(lock.isHeldByCurrentThread());
verifyPlanVersion(planVersion);
NodePlan nodePlan = verifyPlanHash(planID, plan);
if (!force) {
verifyTimeStamp(nodePlan);
}
verifyNodeUUID(nodePlan);
return nodePlan;
}
use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.
the class DiskBalancer method submitPlan.
/**
* Takes a client submitted plan and converts into a set of work items that
* can be executed by the blockMover.
*
* @param planId - A SHA-1 of the plan string
* @param planVersion - version of the plan string - for future use.
* @param planFileName - Plan file name
* @param planData - Plan data in json format
* @param force - Skip some validations and execute the plan file.
* @throws DiskBalancerException
*/
public void submitPlan(String planId, long planVersion, String planFileName, String planData, boolean force) throws DiskBalancerException {
lock.lock();
try {
checkDiskBalancerEnabled();
if ((this.future != null) && (!this.future.isDone())) {
LOG.error("Disk Balancer - Executing another plan, submitPlan failed.");
throw new DiskBalancerException("Executing another plan", DiskBalancerException.Result.PLAN_ALREADY_IN_PROGRESS);
}
NodePlan nodePlan = verifyPlan(planId, planVersion, planData, force);
createWorkPlan(nodePlan);
this.planID = planId;
this.planFile = planFileName;
this.currentResult = Result.PLAN_UNDER_PROGRESS;
executePlan();
} finally {
lock.unlock();
}
}
Aggregations