use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode in project hadoop by apache.
the class TestPlanner method testPlannerScale.
@Test
public void testPlannerScale() throws Exception {
// it is rare to see more than 48 disks
final int diskCount = 256;
DiskBalancerTestUtil util = new DiskBalancerTestUtil();
DiskBalancerVolumeSet vSet = util.createRandomVolumeSet(StorageType.SSD, diskCount);
NullConnector nullConnector = new NullConnector();
DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
int diskNum = 0;
for (DiskBalancerVolume vol : vSet.getVolumes()) {
vol.setPath("volume" + diskNum++);
node.addVolume(vol);
}
nullConnector.addNode(node);
cluster.readClusterInfo();
GreedyPlanner newPlanner = new GreedyPlanner(01.0f, node);
NodePlan newPlan = new NodePlan(node.getDataNodeName(), node.getDataNodePort());
newPlanner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), newPlan);
// Assuming that our random disks at least generated one step
assertTrue("No Steps Generated from random disks, very unlikely", newPlan.getVolumeSetPlans().size() > 0);
assertTrue("Steps Generated less than disk count - false", newPlan.getVolumeSetPlans().size() < diskCount);
LOG.info("Number of steps are : %d%n", newPlan.getVolumeSetPlans().size());
}
use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode in project hadoop by apache.
the class TestPlanner method testGreedyPlannerLargeDisksWithData.
@Test
public void testGreedyPlannerLargeDisksWithData() throws Exception {
NullConnector nullConnector = new NullConnector();
DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
// All disks have same capacity of data
DiskBalancerVolume volume1 = createVolume("volume1", 1968, 88);
DiskBalancerVolume volume2 = createVolume("volume2", 1968, 88);
DiskBalancerVolume volume3 = createVolume("volume3", 1968, 111);
DiskBalancerVolume volume4 = createVolume("volume4", 1968, 111);
DiskBalancerVolume volume5 = createVolume("volume5", 1968, 30);
DiskBalancerVolume volume6 = createVolume("volume6", 1563, 30);
DiskBalancerVolume volume7 = createVolume("volume7", 1563, 30);
DiskBalancerVolume volume8 = createVolume("volume8", 1563, 30);
DiskBalancerVolume volume9 = createVolume("volume9", 1563, 210);
node.addVolume(volume1);
node.addVolume(volume2);
node.addVolume(volume3);
node.addVolume(volume4);
node.addVolume(volume5);
node.addVolume(volume6);
node.addVolume(volume7);
node.addVolume(volume8);
node.addVolume(volume9);
nullConnector.addNode(node);
cluster.readClusterInfo();
Assert.assertEquals(1, cluster.getNodes().size());
GreedyPlanner planner = new GreedyPlanner(1.0f, node);
NodePlan plan = new NodePlan(node.getDataNodeName(), node.getDataNodePort());
planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
assertTrue(plan.getVolumeSetPlans().size() > 2);
}
use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode in project hadoop by apache.
the class TestPlanner method testGreedyPlannerEqualizeData.
/**
* In this test we pass 3 volumes with 30, 20 and 10 GB of data used. We
* expect the planner to print out 20 GB on each volume.
* <p/>
* That is the plan should say move 10 GB from volume30 to volume10.
*/
@Test
public void testGreedyPlannerEqualizeData() throws Exception {
NullConnector nullConnector = new NullConnector();
DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
DiskBalancerVolume volume30 = createVolume("volume30", 100, 30);
DiskBalancerVolume volume20 = createVolume("volume20", 100, 20);
DiskBalancerVolume volume10 = createVolume("volume10", 100, 10);
node.addVolume(volume10);
node.addVolume(volume20);
node.addVolume(volume30);
nullConnector.addNode(node);
cluster.readClusterInfo();
Assert.assertEquals(1, cluster.getNodes().size());
GreedyPlanner planner = new GreedyPlanner(5.0f, node);
NodePlan plan = new NodePlan(node.getDataNodeUUID(), node.getDataNodePort());
planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
// We should have only one planned move from
// volume30 to volume10 of 10 GB Size.
assertEquals(1, plan.getVolumeSetPlans().size());
Step step = plan.getVolumeSetPlans().get(0);
assertEquals("volume30", step.getSourceVolume().getPath());
assertEquals("volume10", step.getDestinationVolume().getPath());
assertEquals("10 G", step.getSizeString(step.getBytesToMove()));
}
use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode in project hadoop by apache.
the class TestPlanner method testGreedyPlannerBalanceVolumeSet.
@Test
public void testGreedyPlannerBalanceVolumeSet() throws Exception {
URI clusterJson = getClass().getResource("/diskBalancer/data-cluster-3node-3disk.json").toURI();
ClusterConnector jsonConnector = ConnectorFactory.getCluster(clusterJson, null);
DiskBalancerCluster cluster = new DiskBalancerCluster(jsonConnector);
cluster.readClusterInfo();
Assert.assertEquals(3, cluster.getNodes().size());
cluster.setNodesToProcess(cluster.getNodes());
DiskBalancerDataNode node = cluster.getNodes().get(0);
GreedyPlanner planner = new GreedyPlanner(10.0f, node);
NodePlan plan = new NodePlan(node.getDataNodeName(), node.getDataNodePort());
planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
}
use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode in project hadoop by apache.
the class TestPlanner method testGreedyPlannerPlanWithDifferentDiskSizes.
@Test
public void testGreedyPlannerPlanWithDifferentDiskSizes() throws Exception {
NullConnector nullConnector = new NullConnector();
DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
DiskBalancerVolume volume1 = createVolume("volume100", 1000, 100);
DiskBalancerVolume volume2 = createVolume("volume0-1", 500, 0);
DiskBalancerVolume volume3 = createVolume("volume0-2", 250, 0);
node.addVolume(volume1);
node.addVolume(volume2);
node.addVolume(volume3);
nullConnector.addNode(node);
cluster.readClusterInfo();
Assert.assertEquals(1, cluster.getNodes().size());
GreedyPlanner newPlanner = new GreedyPlanner(01.0f, node);
NodePlan newPlan = new NodePlan(node.getDataNodeName(), node.getDataNodePort());
newPlanner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), newPlan);
assertEquals(2, newPlan.getVolumeSetPlans().size());
for (Step step : newPlan.getVolumeSetPlans()) {
if (step.getDestinationVolume().getPath().equals("volume0-1")) {
assertEquals("volume100", step.getSourceVolume().getPath());
assertEquals("28.5 G", step.getSizeString(step.getBytesToMove()));
}
if (step.getDestinationVolume().getPath().equals("volume0-2")) {
assertEquals("volume100", step.getSourceVolume().getPath());
assertEquals("14.3 G", step.getSizeString(step.getBytesToMove()));
}
}
Step step = newPlan.getVolumeSetPlans().get(0);
assertEquals(0.05714f, step.getIdealStorage(), 0.001f);
}
Aggregations