Search in sources :

Example 31 with NodePlan

use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.

the class TestPlanner method testGreedyPlannerNoVolumeTest.

@Test
public void testGreedyPlannerNoVolumeTest() throws Exception {
    NullConnector nullConnector = new NullConnector();
    DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
    List<NodePlan> planList = cluster.computePlan(10.0f);
    assertNotNull(planList);
}
Also used : NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) DiskBalancerCluster(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster) NullConnector(org.apache.hadoop.hdfs.server.diskbalancer.connectors.NullConnector) Test(org.junit.Test)

Example 32 with NodePlan

use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.

the class TestPlanner method testGreedyPlannerMoveFromSingleDisk.

@Test
public void testGreedyPlannerMoveFromSingleDisk() throws Exception {
    NullConnector nullConnector = new NullConnector();
    DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
    DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
    // All disks have same capacity of data
    DiskBalancerVolume volume1 = createVolume("volume100", 200, 100);
    DiskBalancerVolume volume2 = createVolume("volume0-1", 200, 0);
    DiskBalancerVolume volume3 = createVolume("volume0-2", 200, 0);
    node.addVolume(volume1);
    node.addVolume(volume2);
    node.addVolume(volume3);
    nullConnector.addNode(node);
    cluster.readClusterInfo();
    Assert.assertEquals(1, cluster.getNodes().size());
    GreedyPlanner planner = new GreedyPlanner(10.0f, node);
    NodePlan plan = new NodePlan(node.getDataNodeName(), node.getDataNodePort());
    planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
    // We should see 2 move plans. One from volume100 to volume0-1
    // and another from volume100 to volume0-2
    assertEquals(2, plan.getVolumeSetPlans().size());
    Step step = plan.getVolumeSetPlans().get(0);
    assertEquals("volume100", step.getSourceVolume().getPath());
    assertTrue(step.getSizeString(step.getBytesToMove()).matches("33.[2|3|4] G"));
    step = plan.getVolumeSetPlans().get(1);
    assertEquals("volume100", step.getSourceVolume().getPath());
    assertTrue(step.getSizeString(step.getBytesToMove()).matches("33.[2|3|4] G"));
}
Also used : NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) DiskBalancerCluster(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster) NullConnector(org.apache.hadoop.hdfs.server.diskbalancer.connectors.NullConnector) DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume) GreedyPlanner(org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner) Step(org.apache.hadoop.hdfs.server.diskbalancer.planner.Step) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Example 33 with NodePlan

use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.

the class TestDiskBalancer method testDiskBalancerEndToEnd.

/**
   * This test simulates a real Data node working with DiskBalancer.
   * <p>
   * Here is the overview of this test.
   * <p>
   * 1. Write a bunch of blocks and move them to one disk to create imbalance.
   * 2. Rewrite  the capacity of the disks in DiskBalancer Model so that planner
   * will produce a move plan. 3. Execute the move plan and wait unitl the plan
   * is done. 4. Verify the source disk has blocks now.
   *
   * @throws Exception
   */
@Test
public void testDiskBalancerEndToEnd() throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
    final int blockCount = 100;
    final int blockSize = 1024;
    final int diskCount = 2;
    final int dataNodeCount = 1;
    final int dataNodeIndex = 0;
    final int sourceDiskIndex = 0;
    final long cap = blockSize * 2L * blockCount;
    MiniDFSCluster cluster = new ClusterBuilder().setBlockCount(blockCount).setBlockSize(blockSize).setDiskCount(diskCount).setNumDatanodes(dataNodeCount).setConf(conf).setCapacities(new long[] { cap, cap }).build();
    try {
        DataMover dataMover = new DataMover(cluster, dataNodeIndex, sourceDiskIndex, conf, blockSize, blockCount);
        dataMover.moveDataToSourceDisk();
        NodePlan plan = dataMover.generatePlan();
        dataMover.executePlan(plan);
        dataMover.verifyPlanExectionDone();
        dataMover.verifyAllVolumesHaveData();
        dataMover.verifyTolerance(plan, 0, sourceDiskIndex, 10);
    } finally {
        cluster.shutdown();
    }
}
Also used : NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Test(org.junit.Test)

Example 34 with NodePlan

use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.

the class TestDiskBalancer method testDiskBalancerWhenRemovingVolumes.

/**
   * Test disk balancer behavior when one of the disks involved
   * in balancing operation is removed after submitting the plan.
   * @throws Exception
   */
@Test
public void testDiskBalancerWhenRemovingVolumes() throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
    final int blockCount = 100;
    final int blockSize = 1024;
    final int diskCount = 2;
    final int dataNodeCount = 1;
    final int dataNodeIndex = 0;
    final int sourceDiskIndex = 0;
    final long cap = blockSize * 2L * blockCount;
    MiniDFSCluster cluster = new ClusterBuilder().setBlockCount(blockCount).setBlockSize(blockSize).setDiskCount(diskCount).setNumDatanodes(dataNodeCount).setConf(conf).setCapacities(new long[] { cap, cap }).build();
    try {
        DataMover dataMover = new DataMover(cluster, dataNodeIndex, sourceDiskIndex, conf, blockSize, blockCount);
        dataMover.moveDataToSourceDisk();
        NodePlan plan = dataMover.generatePlan();
        dataMover.executePlanDuringDiskRemove(plan);
        dataMover.verifyAllVolumesHaveData();
        dataMover.verifyTolerance(plan, 0, sourceDiskIndex, 10);
    } catch (Exception e) {
        Assert.fail("Unexpected exception: " + e);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) TimeoutException(java.util.concurrent.TimeoutException) IOException(java.io.IOException) ReconfigurationException(org.apache.hadoop.conf.ReconfigurationException) Test(org.junit.Test)

Example 35 with NodePlan

use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.

the class TestDiskBalancerRPC method testSubmitPlanWithInvalidHash.

@Test
public void testSubmitPlanWithInvalidHash() throws Exception {
    RpcTestHelper rpcTestHelper = new RpcTestHelper().invoke();
    DataNode dataNode = rpcTestHelper.getDataNode();
    String planHash = rpcTestHelper.getPlanHash();
    char[] hashArray = planHash.toCharArray();
    hashArray[0]++;
    planHash = String.valueOf(hashArray);
    int planVersion = rpcTestHelper.getPlanVersion();
    NodePlan plan = rpcTestHelper.getPlan();
    thrown.expect(DiskBalancerException.class);
    thrown.expect(new DiskBalancerResultVerifier(Result.INVALID_PLAN_HASH));
    dataNode.submitDiskBalancerPlan(planHash, planVersion, PLAN_FILE, plan.toJson(), false);
}
Also used : NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Aggregations

NodePlan (org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan)41 Test (org.junit.Test)33 DiskBalancerDataNode (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode)21 DiskBalancerCluster (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster)13 NullConnector (org.apache.hadoop.hdfs.server.diskbalancer.connectors.NullConnector)11 GreedyPlanner (org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner)11 DiskBalancerVolume (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume)10 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)9 DiskBalancer (org.apache.hadoop.hdfs.server.datanode.DiskBalancer)8 Step (org.apache.hadoop.hdfs.server.diskbalancer.planner.Step)8 Configuration (org.apache.hadoop.conf.Configuration)3 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)3 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)3 DiskBalancerWorkStatus (org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus)3 DiskBalancerException (org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException)3 IOException (java.io.IOException)2 URI (java.net.URI)2 ClientDatanodeProtocol (org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol)2 ClusterConnector (org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector)2 DiskBalancerVolumeSet (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet)2