use of org.apache.hadoop.hdfs.server.diskbalancer.planner.Step in project hadoop by apache.
the class PlanCommand method printToScreen.
/**
* Prints a quick summary of the plan to screen.
*
* @param plans - List of NodePlans.
*/
private static void printToScreen(List<NodePlan> plans) {
System.out.println("\nPlan :\n");
System.out.println(StringUtils.repeat("=", 80));
System.out.println(StringUtils.center("Source Disk", 30) + StringUtils.center("Dest.Disk", 30) + StringUtils.center("Size", 10) + StringUtils.center("Type", 10));
for (NodePlan plan : plans) {
for (Step step : plan.getVolumeSetPlans()) {
System.out.println(String.format("%s %s %s %s", StringUtils.center(step.getSourceVolume().getPath(), 30), StringUtils.center(step.getDestinationVolume().getPath(), 30), StringUtils.center(step.getSizeString(step.getBytesToMove()), 10), StringUtils.center(step.getDestinationVolume().getStorageType(), 10)));
}
}
System.out.println(StringUtils.repeat("=", 80));
}
use of org.apache.hadoop.hdfs.server.diskbalancer.planner.Step in project hadoop by apache.
the class TestDiskBalancerWithMockMover method testCustomBandwidth.
/**
* Test Custom bandwidth.
*
* @throws Exception
*/
@Test
public void testCustomBandwidth() throws Exception {
MockMoverHelper mockMoverHelper = new MockMoverHelper().invoke();
NodePlan plan = mockMoverHelper.getPlan();
DiskBalancer balancer = mockMoverHelper.getBalancer();
for (Step step : plan.getVolumeSetPlans()) {
MoveStep tempStep = (MoveStep) step;
tempStep.setBandwidth(100);
}
executeSubmitPlan(plan, balancer);
DiskBalancerWorkStatus status = balancer.queryWorkStatus();
assertNotNull(status);
DiskBalancerWorkStatus.DiskBalancerWorkEntry entry = balancer.queryWorkStatus().getCurrentState().get(0);
assertEquals(100L, entry.getWorkItem().getBandwidth());
}
use of org.apache.hadoop.hdfs.server.diskbalancer.planner.Step in project hadoop by apache.
the class TestPlanner method testGreedyPlannerTwoVolume.
@Test
public void testGreedyPlannerTwoVolume() throws Exception {
NullConnector nullConnector = new NullConnector();
DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
DiskBalancerVolume volume30 = createVolume("volume30", 100, 30);
DiskBalancerVolume volume10 = createVolume("volume10", 100, 10);
node.addVolume(volume10);
node.addVolume(volume30);
nullConnector.addNode(node);
cluster.readClusterInfo();
Assert.assertEquals(1, cluster.getNodes().size());
GreedyPlanner planner = new GreedyPlanner(5.0f, node);
NodePlan plan = new NodePlan(node.getDataNodeUUID(), node.getDataNodePort());
planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
// We should have only one planned move from
// volume30 to volume10 of 10 GB Size.
assertEquals(1, plan.getVolumeSetPlans().size());
Step step = plan.getVolumeSetPlans().get(0);
assertEquals("volume30", step.getSourceVolume().getPath());
assertEquals("volume10", step.getDestinationVolume().getPath());
assertEquals("10 G", step.getSizeString(step.getBytesToMove()));
}
use of org.apache.hadoop.hdfs.server.diskbalancer.planner.Step in project hadoop by apache.
the class DiskBalancer method createWorkPlan.
/**
* Convert a node plan to DiskBalancerWorkItem that Datanode can execute.
*
* @param plan - Node Plan
*/
private void createWorkPlan(NodePlan plan) throws DiskBalancerException {
Preconditions.checkState(lock.isHeldByCurrentThread());
// Cleanup any residual work in the map.
workMap.clear();
Map<String, String> storageIDToVolBasePathMap = getStorageIDToVolumeBasePathMap();
for (Step step : plan.getVolumeSetPlans()) {
String sourceVolUuid = step.getSourceVolume().getUuid();
String destVolUuid = step.getDestinationVolume().getUuid();
String sourceVolBasePath = storageIDToVolBasePathMap.get(sourceVolUuid);
if (sourceVolBasePath == null) {
final String errMsg = "Disk Balancer - Unable to find volume: " + step.getSourceVolume().getPath() + ". SubmitPlan failed.";
LOG.error(errMsg);
throw new DiskBalancerException(errMsg, DiskBalancerException.Result.INVALID_VOLUME);
}
String destVolBasePath = storageIDToVolBasePathMap.get(destVolUuid);
if (destVolBasePath == null) {
final String errMsg = "Disk Balancer - Unable to find volume: " + step.getDestinationVolume().getPath() + ". SubmitPlan failed.";
LOG.error(errMsg);
throw new DiskBalancerException(errMsg, DiskBalancerException.Result.INVALID_VOLUME);
}
VolumePair volumePair = new VolumePair(sourceVolUuid, sourceVolBasePath, destVolUuid, destVolBasePath);
createWorkPlan(volumePair, step);
}
}
use of org.apache.hadoop.hdfs.server.diskbalancer.planner.Step in project hadoop by apache.
the class TestPlanner method testGreedyPlannerEqualizeData.
/**
* In this test we pass 3 volumes with 30, 20 and 10 GB of data used. We
* expect the planner to print out 20 GB on each volume.
* <p/>
* That is the plan should say move 10 GB from volume30 to volume10.
*/
@Test
public void testGreedyPlannerEqualizeData() throws Exception {
NullConnector nullConnector = new NullConnector();
DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
DiskBalancerVolume volume30 = createVolume("volume30", 100, 30);
DiskBalancerVolume volume20 = createVolume("volume20", 100, 20);
DiskBalancerVolume volume10 = createVolume("volume10", 100, 10);
node.addVolume(volume10);
node.addVolume(volume20);
node.addVolume(volume30);
nullConnector.addNode(node);
cluster.readClusterInfo();
Assert.assertEquals(1, cluster.getNodes().size());
GreedyPlanner planner = new GreedyPlanner(5.0f, node);
NodePlan plan = new NodePlan(node.getDataNodeUUID(), node.getDataNodePort());
planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
// We should have only one planned move from
// volume30 to volume10 of 10 GB Size.
assertEquals(1, plan.getVolumeSetPlans().size());
Step step = plan.getVolumeSetPlans().get(0);
assertEquals("volume30", step.getSourceVolume().getPath());
assertEquals("volume10", step.getDestinationVolume().getPath());
assertEquals("10 G", step.getSizeString(step.getBytesToMove()));
}
Aggregations