Search in sources :

Example 21 with NodePlan

use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.

the class TestDiskBalancerRPC method testCancelEmptyPlan.

@Test
public void testCancelEmptyPlan() throws Exception {
    RpcTestHelper rpcTestHelper = new RpcTestHelper().invoke();
    DataNode dataNode = rpcTestHelper.getDataNode();
    String planHash = "";
    NodePlan plan = rpcTestHelper.getPlan();
    thrown.expect(DiskBalancerException.class);
    thrown.expect(new DiskBalancerResultVerifier(Result.NO_SUCH_PLAN));
    dataNode.cancelDiskBalancePlan(planHash);
}
Also used : NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Example 22 with NodePlan

use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.

the class TestDiskBalancerWithMockMover method testSubmitWithInvalidHash.

@Test
public void testSubmitWithInvalidHash() throws Exception {
    MockMoverHelper mockMoverHelper = new MockMoverHelper().invoke();
    NodePlan plan = mockMoverHelper.getPlan();
    DiskBalancer balancer = mockMoverHelper.getBalancer();
    String planJson = plan.toJson();
    String planID = DigestUtils.shaHex(planJson);
    char repChar = planID.charAt(0);
    repChar++;
    thrown.expect(DiskBalancerException.class);
    thrown.expect(new DiskBalancerResultVerifier(DiskBalancerException.Result.INVALID_PLAN_HASH));
    balancer.submitPlan(planID.replace(planID.charAt(0), repChar), 1, PLAN_FILE, planJson, false);
}
Also used : NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) DiskBalancer(org.apache.hadoop.hdfs.server.datanode.DiskBalancer) Test(org.junit.Test)

Example 23 with NodePlan

use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.

the class TestDiskBalancerWithMockMover method testSubmitWithOldInvalidVersion.

@Test
public void testSubmitWithOldInvalidVersion() throws Exception {
    MockMoverHelper mockMoverHelper = new MockMoverHelper().invoke();
    NodePlan plan = mockMoverHelper.getPlan();
    DiskBalancer balancer = mockMoverHelper.getBalancer();
    thrown.expect(DiskBalancerException.class);
    thrown.expect(new DiskBalancerResultVerifier(DiskBalancerException.Result.INVALID_PLAN_VERSION));
    // Plan version is invalid -- there is no version 0.
    executeSubmitPlan(plan, balancer, 0);
}
Also used : NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) DiskBalancer(org.apache.hadoop.hdfs.server.datanode.DiskBalancer) Test(org.junit.Test)

Example 24 with NodePlan

use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.

the class TestDiskBalancerWithMockMover method testSubmitDiskBalancerPlan.

@Test
public void testSubmitDiskBalancerPlan() throws Exception {
    MockMoverHelper mockMoverHelper = new MockMoverHelper().invoke();
    NodePlan plan = mockMoverHelper.getPlan();
    final DiskBalancer balancer = mockMoverHelper.getBalancer();
    executeSubmitPlan(plan, balancer);
    GenericTestUtils.waitFor(new Supplier<Boolean>() {

        @Override
        public Boolean get() {
            try {
                return balancer.queryWorkStatus().getResult() == DiskBalancerWorkStatus.Result.PLAN_DONE;
            } catch (IOException ex) {
                return false;
            }
        }
    }, 1000, 100000);
    // Asserts that submit plan caused an execution in the background.
    assertTrue(mockMoverHelper.getBlockMover().getRunCount() == 1);
}
Also used : NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) DiskBalancer(org.apache.hadoop.hdfs.server.datanode.DiskBalancer) IOException(java.io.IOException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Test(org.junit.Test)

Example 25 with NodePlan

use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.

the class TestPlanner method testPlannerScale.

@Test
public void testPlannerScale() throws Exception {
    // it is rare to see more than 48 disks
    final int diskCount = 256;
    DiskBalancerTestUtil util = new DiskBalancerTestUtil();
    DiskBalancerVolumeSet vSet = util.createRandomVolumeSet(StorageType.SSD, diskCount);
    NullConnector nullConnector = new NullConnector();
    DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
    DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
    int diskNum = 0;
    for (DiskBalancerVolume vol : vSet.getVolumes()) {
        vol.setPath("volume" + diskNum++);
        node.addVolume(vol);
    }
    nullConnector.addNode(node);
    cluster.readClusterInfo();
    GreedyPlanner newPlanner = new GreedyPlanner(01.0f, node);
    NodePlan newPlan = new NodePlan(node.getDataNodeName(), node.getDataNodePort());
    newPlanner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), newPlan);
    // Assuming that our random disks at least generated one step
    assertTrue("No Steps Generated from random disks, very unlikely", newPlan.getVolumeSetPlans().size() > 0);
    assertTrue("Steps Generated less than disk count - false", newPlan.getVolumeSetPlans().size() < diskCount);
    LOG.info("Number of steps are : %d%n", newPlan.getVolumeSetPlans().size());
}
Also used : DiskBalancerVolumeSet(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet) NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) DiskBalancerCluster(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster) NullConnector(org.apache.hadoop.hdfs.server.diskbalancer.connectors.NullConnector) DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume) GreedyPlanner(org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Aggregations

NodePlan (org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan)41 Test (org.junit.Test)33 DiskBalancerDataNode (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode)21 DiskBalancerCluster (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster)13 NullConnector (org.apache.hadoop.hdfs.server.diskbalancer.connectors.NullConnector)11 GreedyPlanner (org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner)11 DiskBalancerVolume (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume)10 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)9 DiskBalancer (org.apache.hadoop.hdfs.server.datanode.DiskBalancer)8 Step (org.apache.hadoop.hdfs.server.diskbalancer.planner.Step)8 Configuration (org.apache.hadoop.conf.Configuration)3 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)3 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)3 DiskBalancerWorkStatus (org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus)3 DiskBalancerException (org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException)3 IOException (java.io.IOException)2 URI (java.net.URI)2 ClientDatanodeProtocol (org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol)2 ClusterConnector (org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector)2 DiskBalancerVolumeSet (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet)2