use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.
the class TestPlanner method testGreedyPlannerNoVolumeTest.
@Test
public void testGreedyPlannerNoVolumeTest() throws Exception {
NullConnector nullConnector = new NullConnector();
DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
List<NodePlan> planList = cluster.computePlan(10.0f);
assertNotNull(planList);
}
use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.
the class TestPlanner method testGreedyPlannerMoveFromSingleDisk.
@Test
public void testGreedyPlannerMoveFromSingleDisk() throws Exception {
NullConnector nullConnector = new NullConnector();
DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
// All disks have same capacity of data
DiskBalancerVolume volume1 = createVolume("volume100", 200, 100);
DiskBalancerVolume volume2 = createVolume("volume0-1", 200, 0);
DiskBalancerVolume volume3 = createVolume("volume0-2", 200, 0);
node.addVolume(volume1);
node.addVolume(volume2);
node.addVolume(volume3);
nullConnector.addNode(node);
cluster.readClusterInfo();
Assert.assertEquals(1, cluster.getNodes().size());
GreedyPlanner planner = new GreedyPlanner(10.0f, node);
NodePlan plan = new NodePlan(node.getDataNodeName(), node.getDataNodePort());
planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
// We should see 2 move plans. One from volume100 to volume0-1
// and another from volume100 to volume0-2
assertEquals(2, plan.getVolumeSetPlans().size());
Step step = plan.getVolumeSetPlans().get(0);
assertEquals("volume100", step.getSourceVolume().getPath());
assertTrue(step.getSizeString(step.getBytesToMove()).matches("33.[2|3|4] G"));
step = plan.getVolumeSetPlans().get(1);
assertEquals("volume100", step.getSourceVolume().getPath());
assertTrue(step.getSizeString(step.getBytesToMove()).matches("33.[2|3|4] G"));
}
use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.
the class TestDiskBalancer method testDiskBalancerEndToEnd.
/**
* This test simulates a real Data node working with DiskBalancer.
* <p>
* Here is the overview of this test.
* <p>
* 1. Write a bunch of blocks and move them to one disk to create imbalance.
* 2. Rewrite the capacity of the disks in DiskBalancer Model so that planner
* will produce a move plan. 3. Execute the move plan and wait unitl the plan
* is done. 4. Verify the source disk has blocks now.
*
* @throws Exception
*/
@Test
public void testDiskBalancerEndToEnd() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
final int blockCount = 100;
final int blockSize = 1024;
final int diskCount = 2;
final int dataNodeCount = 1;
final int dataNodeIndex = 0;
final int sourceDiskIndex = 0;
final long cap = blockSize * 2L * blockCount;
MiniDFSCluster cluster = new ClusterBuilder().setBlockCount(blockCount).setBlockSize(blockSize).setDiskCount(diskCount).setNumDatanodes(dataNodeCount).setConf(conf).setCapacities(new long[] { cap, cap }).build();
try {
DataMover dataMover = new DataMover(cluster, dataNodeIndex, sourceDiskIndex, conf, blockSize, blockCount);
dataMover.moveDataToSourceDisk();
NodePlan plan = dataMover.generatePlan();
dataMover.executePlan(plan);
dataMover.verifyPlanExectionDone();
dataMover.verifyAllVolumesHaveData();
dataMover.verifyTolerance(plan, 0, sourceDiskIndex, 10);
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.
the class TestDiskBalancer method testDiskBalancerWhenRemovingVolumes.
/**
* Test disk balancer behavior when one of the disks involved
* in balancing operation is removed after submitting the plan.
* @throws Exception
*/
@Test
public void testDiskBalancerWhenRemovingVolumes() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
final int blockCount = 100;
final int blockSize = 1024;
final int diskCount = 2;
final int dataNodeCount = 1;
final int dataNodeIndex = 0;
final int sourceDiskIndex = 0;
final long cap = blockSize * 2L * blockCount;
MiniDFSCluster cluster = new ClusterBuilder().setBlockCount(blockCount).setBlockSize(blockSize).setDiskCount(diskCount).setNumDatanodes(dataNodeCount).setConf(conf).setCapacities(new long[] { cap, cap }).build();
try {
DataMover dataMover = new DataMover(cluster, dataNodeIndex, sourceDiskIndex, conf, blockSize, blockCount);
dataMover.moveDataToSourceDisk();
NodePlan plan = dataMover.generatePlan();
dataMover.executePlanDuringDiskRemove(plan);
dataMover.verifyAllVolumesHaveData();
dataMover.verifyTolerance(plan, 0, sourceDiskIndex, 10);
} catch (Exception e) {
Assert.fail("Unexpected exception: " + e);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.
the class TestDiskBalancerRPC method testSubmitPlanWithInvalidHash.
@Test
public void testSubmitPlanWithInvalidHash() throws Exception {
RpcTestHelper rpcTestHelper = new RpcTestHelper().invoke();
DataNode dataNode = rpcTestHelper.getDataNode();
String planHash = rpcTestHelper.getPlanHash();
char[] hashArray = planHash.toCharArray();
hashArray[0]++;
planHash = String.valueOf(hashArray);
int planVersion = rpcTestHelper.getPlanVersion();
NodePlan plan = rpcTestHelper.getPlan();
thrown.expect(DiskBalancerException.class);
thrown.expect(new DiskBalancerResultVerifier(Result.INVALID_PLAN_HASH));
dataNode.submitDiskBalancerPlan(planHash, planVersion, PLAN_FILE, plan.toJson(), false);
}
Aggregations