Search in sources :

Example 1 with Step

use of org.apache.hadoop.hdfs.server.diskbalancer.planner.Step in project hadoop by apache.

the class PlanCommand method printToScreen.

/**
   * Prints a quick summary of the plan to screen.
   *
   * @param plans - List of NodePlans.
   */
private static void printToScreen(List<NodePlan> plans) {
    System.out.println("\nPlan :\n");
    System.out.println(StringUtils.repeat("=", 80));
    System.out.println(StringUtils.center("Source Disk", 30) + StringUtils.center("Dest.Disk", 30) + StringUtils.center("Size", 10) + StringUtils.center("Type", 10));
    for (NodePlan plan : plans) {
        for (Step step : plan.getVolumeSetPlans()) {
            System.out.println(String.format("%s %s %s %s", StringUtils.center(step.getSourceVolume().getPath(), 30), StringUtils.center(step.getDestinationVolume().getPath(), 30), StringUtils.center(step.getSizeString(step.getBytesToMove()), 10), StringUtils.center(step.getDestinationVolume().getStorageType(), 10)));
        }
    }
    System.out.println(StringUtils.repeat("=", 80));
}
Also used : NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) Step(org.apache.hadoop.hdfs.server.diskbalancer.planner.Step)

Example 2 with Step

use of org.apache.hadoop.hdfs.server.diskbalancer.planner.Step in project hadoop by apache.

the class TestDiskBalancerWithMockMover method testCustomBandwidth.

/**
   * Test Custom bandwidth.
   *
   * @throws Exception
   */
@Test
public void testCustomBandwidth() throws Exception {
    MockMoverHelper mockMoverHelper = new MockMoverHelper().invoke();
    NodePlan plan = mockMoverHelper.getPlan();
    DiskBalancer balancer = mockMoverHelper.getBalancer();
    for (Step step : plan.getVolumeSetPlans()) {
        MoveStep tempStep = (MoveStep) step;
        tempStep.setBandwidth(100);
    }
    executeSubmitPlan(plan, balancer);
    DiskBalancerWorkStatus status = balancer.queryWorkStatus();
    assertNotNull(status);
    DiskBalancerWorkStatus.DiskBalancerWorkEntry entry = balancer.queryWorkStatus().getCurrentState().get(0);
    assertEquals(100L, entry.getWorkItem().getBandwidth());
}
Also used : NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) DiskBalancer(org.apache.hadoop.hdfs.server.datanode.DiskBalancer) Step(org.apache.hadoop.hdfs.server.diskbalancer.planner.Step) MoveStep(org.apache.hadoop.hdfs.server.diskbalancer.planner.MoveStep) DiskBalancerWorkStatus(org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus) MoveStep(org.apache.hadoop.hdfs.server.diskbalancer.planner.MoveStep) Test(org.junit.Test)

Example 3 with Step

use of org.apache.hadoop.hdfs.server.diskbalancer.planner.Step in project hadoop by apache.

the class TestPlanner method testGreedyPlannerTwoVolume.

@Test
public void testGreedyPlannerTwoVolume() throws Exception {
    NullConnector nullConnector = new NullConnector();
    DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
    DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
    DiskBalancerVolume volume30 = createVolume("volume30", 100, 30);
    DiskBalancerVolume volume10 = createVolume("volume10", 100, 10);
    node.addVolume(volume10);
    node.addVolume(volume30);
    nullConnector.addNode(node);
    cluster.readClusterInfo();
    Assert.assertEquals(1, cluster.getNodes().size());
    GreedyPlanner planner = new GreedyPlanner(5.0f, node);
    NodePlan plan = new NodePlan(node.getDataNodeUUID(), node.getDataNodePort());
    planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
    // We should have only one planned move from
    // volume30 to volume10 of 10 GB Size.
    assertEquals(1, plan.getVolumeSetPlans().size());
    Step step = plan.getVolumeSetPlans().get(0);
    assertEquals("volume30", step.getSourceVolume().getPath());
    assertEquals("volume10", step.getDestinationVolume().getPath());
    assertEquals("10 G", step.getSizeString(step.getBytesToMove()));
}
Also used : NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) DiskBalancerCluster(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster) NullConnector(org.apache.hadoop.hdfs.server.diskbalancer.connectors.NullConnector) DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume) GreedyPlanner(org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner) Step(org.apache.hadoop.hdfs.server.diskbalancer.planner.Step) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Example 4 with Step

use of org.apache.hadoop.hdfs.server.diskbalancer.planner.Step in project hadoop by apache.

the class DiskBalancer method createWorkPlan.

/**
   * Convert a node plan to DiskBalancerWorkItem that Datanode can execute.
   *
   * @param plan - Node Plan
   */
private void createWorkPlan(NodePlan plan) throws DiskBalancerException {
    Preconditions.checkState(lock.isHeldByCurrentThread());
    // Cleanup any residual work in the map.
    workMap.clear();
    Map<String, String> storageIDToVolBasePathMap = getStorageIDToVolumeBasePathMap();
    for (Step step : plan.getVolumeSetPlans()) {
        String sourceVolUuid = step.getSourceVolume().getUuid();
        String destVolUuid = step.getDestinationVolume().getUuid();
        String sourceVolBasePath = storageIDToVolBasePathMap.get(sourceVolUuid);
        if (sourceVolBasePath == null) {
            final String errMsg = "Disk Balancer - Unable to find volume: " + step.getSourceVolume().getPath() + ". SubmitPlan failed.";
            LOG.error(errMsg);
            throw new DiskBalancerException(errMsg, DiskBalancerException.Result.INVALID_VOLUME);
        }
        String destVolBasePath = storageIDToVolBasePathMap.get(destVolUuid);
        if (destVolBasePath == null) {
            final String errMsg = "Disk Balancer - Unable to find volume: " + step.getDestinationVolume().getPath() + ". SubmitPlan failed.";
            LOG.error(errMsg);
            throw new DiskBalancerException(errMsg, DiskBalancerException.Result.INVALID_VOLUME);
        }
        VolumePair volumePair = new VolumePair(sourceVolUuid, sourceVolBasePath, destVolUuid, destVolBasePath);
        createWorkPlan(volumePair, step);
    }
}
Also used : Step(org.apache.hadoop.hdfs.server.diskbalancer.planner.Step) DiskBalancerException(org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException)

Example 5 with Step

use of org.apache.hadoop.hdfs.server.diskbalancer.planner.Step in project hadoop by apache.

the class TestPlanner method testGreedyPlannerEqualizeData.

/**
   * In this test we pass 3 volumes with 30, 20 and 10 GB of data used. We
   * expect the planner to print out 20 GB on each volume.
   * <p/>
   * That is the plan should say move 10 GB from volume30 to volume10.
   */
@Test
public void testGreedyPlannerEqualizeData() throws Exception {
    NullConnector nullConnector = new NullConnector();
    DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
    DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
    DiskBalancerVolume volume30 = createVolume("volume30", 100, 30);
    DiskBalancerVolume volume20 = createVolume("volume20", 100, 20);
    DiskBalancerVolume volume10 = createVolume("volume10", 100, 10);
    node.addVolume(volume10);
    node.addVolume(volume20);
    node.addVolume(volume30);
    nullConnector.addNode(node);
    cluster.readClusterInfo();
    Assert.assertEquals(1, cluster.getNodes().size());
    GreedyPlanner planner = new GreedyPlanner(5.0f, node);
    NodePlan plan = new NodePlan(node.getDataNodeUUID(), node.getDataNodePort());
    planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
    // We should have only one planned move from
    // volume30 to volume10 of 10 GB Size.
    assertEquals(1, plan.getVolumeSetPlans().size());
    Step step = plan.getVolumeSetPlans().get(0);
    assertEquals("volume30", step.getSourceVolume().getPath());
    assertEquals("volume10", step.getDestinationVolume().getPath());
    assertEquals("10 G", step.getSizeString(step.getBytesToMove()));
}
Also used : NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) DiskBalancerCluster(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster) NullConnector(org.apache.hadoop.hdfs.server.diskbalancer.connectors.NullConnector) DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume) GreedyPlanner(org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner) Step(org.apache.hadoop.hdfs.server.diskbalancer.planner.Step) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Aggregations

Step (org.apache.hadoop.hdfs.server.diskbalancer.planner.Step)9 NodePlan (org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan)8 Test (org.junit.Test)6 NullConnector (org.apache.hadoop.hdfs.server.diskbalancer.connectors.NullConnector)5 DiskBalancerCluster (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster)5 DiskBalancerDataNode (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode)5 DiskBalancerVolume (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume)5 GreedyPlanner (org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner)5 DiskBalancer (org.apache.hadoop.hdfs.server.datanode.DiskBalancer)1 DiskBalancerWorkStatus (org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus)1 DiskBalancerException (org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException)1 MoveStep (org.apache.hadoop.hdfs.server.diskbalancer.planner.MoveStep)1