Search in sources :

Example 11 with NodePlan

use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.

the class TestDiskBalancerWithMockMover method testCustomBandwidth.

/**
   * Test Custom bandwidth.
   *
   * @throws Exception
   */
@Test
public void testCustomBandwidth() throws Exception {
    MockMoverHelper mockMoverHelper = new MockMoverHelper().invoke();
    NodePlan plan = mockMoverHelper.getPlan();
    DiskBalancer balancer = mockMoverHelper.getBalancer();
    for (Step step : plan.getVolumeSetPlans()) {
        MoveStep tempStep = (MoveStep) step;
        tempStep.setBandwidth(100);
    }
    executeSubmitPlan(plan, balancer);
    DiskBalancerWorkStatus status = balancer.queryWorkStatus();
    assertNotNull(status);
    DiskBalancerWorkStatus.DiskBalancerWorkEntry entry = balancer.queryWorkStatus().getCurrentState().get(0);
    assertEquals(100L, entry.getWorkItem().getBandwidth());
}
Also used : NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) DiskBalancer(org.apache.hadoop.hdfs.server.datanode.DiskBalancer) Step(org.apache.hadoop.hdfs.server.diskbalancer.planner.Step) MoveStep(org.apache.hadoop.hdfs.server.diskbalancer.planner.MoveStep) DiskBalancerWorkStatus(org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus) MoveStep(org.apache.hadoop.hdfs.server.diskbalancer.planner.MoveStep) Test(org.junit.Test)

Example 12 with NodePlan

use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.

the class TestDiskBalancerWithMockMover method testSubmitWithNullPlan.

@Test
public void testSubmitWithNullPlan() throws Exception {
    MockMoverHelper mockMoverHelper = new MockMoverHelper().invoke();
    NodePlan plan = mockMoverHelper.getPlan();
    DiskBalancer balancer = mockMoverHelper.getBalancer();
    String planJson = plan.toJson();
    String planID = DigestUtils.shaHex(planJson);
    thrown.expect(DiskBalancerException.class);
    thrown.expect(new DiskBalancerResultVerifier(DiskBalancerException.Result.INVALID_PLAN));
    balancer.submitPlan(planID, 1, "no-plan-file.json", null, false);
}
Also used : NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) DiskBalancer(org.apache.hadoop.hdfs.server.datanode.DiskBalancer) Test(org.junit.Test)

Example 13 with NodePlan

use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.

the class TestDiskBalancerWithMockMover method testResubmitDiskBalancerPlan.

/**
   * Test a second submit plan fails.
   *
   * @throws Exception
   */
@Test
public void testResubmitDiskBalancerPlan() throws Exception {
    MockMoverHelper mockMoverHelper = new MockMoverHelper().invoke();
    NodePlan plan = mockMoverHelper.getPlan();
    DiskBalancer balancer = mockMoverHelper.getBalancer();
    // ask block mover to get stuck in copy block
    mockMoverHelper.getBlockMover().setSleep();
    executeSubmitPlan(plan, balancer);
    thrown.expect(DiskBalancerException.class);
    thrown.expect(new DiskBalancerResultVerifier(DiskBalancerException.Result.PLAN_ALREADY_IN_PROGRESS));
    executeSubmitPlan(plan, balancer);
    // Not needed but this is the cleanup step.
    mockMoverHelper.getBlockMover().clearSleep();
}
Also used : NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) DiskBalancer(org.apache.hadoop.hdfs.server.datanode.DiskBalancer) Test(org.junit.Test)

Example 14 with NodePlan

use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.

the class TestPlanner method testGreedyPlannerEqualDisksNoMoves.

@Test
public void testGreedyPlannerEqualDisksNoMoves() throws Exception {
    NullConnector nullConnector = new NullConnector();
    DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
    DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
    // All disks have same capacity of data
    DiskBalancerVolume volume1 = createVolume("volume1", 100, 30);
    DiskBalancerVolume volume2 = createVolume("volume2", 100, 30);
    DiskBalancerVolume volume3 = createVolume("volume3", 100, 30);
    node.addVolume(volume1);
    node.addVolume(volume2);
    node.addVolume(volume3);
    nullConnector.addNode(node);
    cluster.readClusterInfo();
    Assert.assertEquals(1, cluster.getNodes().size());
    GreedyPlanner planner = new GreedyPlanner(10.0f, node);
    NodePlan plan = new NodePlan(node.getDataNodeName(), node.getDataNodePort());
    planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
    // since we have same size of data in all disks , we should have
    // no moves planned.
    assertEquals(0, plan.getVolumeSetPlans().size());
}
Also used : NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) DiskBalancerCluster(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster) NullConnector(org.apache.hadoop.hdfs.server.diskbalancer.connectors.NullConnector) DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume) GreedyPlanner(org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Example 15 with NodePlan

use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.

the class TestPlanner method testGreedyPlannerComputePlan.

@Test
public void testGreedyPlannerComputePlan() throws Exception {
    URI clusterJson = getClass().getResource("/diskBalancer/data-cluster-3node-3disk.json").toURI();
    ClusterConnector jsonConnector = ConnectorFactory.getCluster(clusterJson, null);
    DiskBalancerCluster cluster = new DiskBalancerCluster(jsonConnector);
    cluster.readClusterInfo();
    Assert.assertEquals(3, cluster.getNodes().size());
    cluster.setNodesToProcess(cluster.getNodes());
    List<NodePlan> plan = cluster.computePlan(10.0f);
    Assert.assertNotNull(plan);
}
Also used : NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) ClusterConnector(org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector) DiskBalancerCluster(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster) URI(java.net.URI) Test(org.junit.Test)

Aggregations

NodePlan (org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan)41 Test (org.junit.Test)33 DiskBalancerDataNode (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode)21 DiskBalancerCluster (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster)13 NullConnector (org.apache.hadoop.hdfs.server.diskbalancer.connectors.NullConnector)11 GreedyPlanner (org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner)11 DiskBalancerVolume (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume)10 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)9 DiskBalancer (org.apache.hadoop.hdfs.server.datanode.DiskBalancer)8 Step (org.apache.hadoop.hdfs.server.diskbalancer.planner.Step)8 Configuration (org.apache.hadoop.conf.Configuration)3 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)3 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)3 DiskBalancerWorkStatus (org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus)3 DiskBalancerException (org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException)3 IOException (java.io.IOException)2 URI (java.net.URI)2 ClientDatanodeProtocol (org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol)2 ClusterConnector (org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector)2 DiskBalancerVolumeSet (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet)2