use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode in project hadoop by apache.
the class TestDiskBalancer method testDiskBalancerNameNodeConnectivity.
@Test
public void testDiskBalancerNameNodeConnectivity() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
final int numDatanodes = 2;
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
try {
cluster.waitActive();
ClusterConnector nameNodeConnector = ConnectorFactory.getCluster(cluster.getFileSystem(0).getUri(), conf);
DiskBalancerCluster diskBalancerCluster = new DiskBalancerCluster(nameNodeConnector);
diskBalancerCluster.readClusterInfo();
assertEquals(diskBalancerCluster.getNodes().size(), numDatanodes);
DataNode dnNode = cluster.getDataNodes().get(0);
DiskBalancerDataNode dbDnNode = diskBalancerCluster.getNodeByUUID(dnNode.getDatanodeUuid());
assertEquals(dnNode.getDatanodeUuid(), dbDnNode.getDataNodeUUID());
assertEquals(dnNode.getDatanodeId().getIpAddr(), dbDnNode.getDataNodeIP());
assertEquals(dnNode.getDatanodeId().getHostName(), dbDnNode.getDataNodeName());
try (FsDatasetSpi.FsVolumeReferences ref = dnNode.getFSDataset().getFsVolumeReferences()) {
assertEquals(ref.size(), dbDnNode.getVolumeCount());
}
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode in project hadoop by apache.
the class TestPlanner method testGreedyPlannerEqualDisksNoMoves.
@Test
public void testGreedyPlannerEqualDisksNoMoves() throws Exception {
NullConnector nullConnector = new NullConnector();
DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
// All disks have same capacity of data
DiskBalancerVolume volume1 = createVolume("volume1", 100, 30);
DiskBalancerVolume volume2 = createVolume("volume2", 100, 30);
DiskBalancerVolume volume3 = createVolume("volume3", 100, 30);
node.addVolume(volume1);
node.addVolume(volume2);
node.addVolume(volume3);
nullConnector.addNode(node);
cluster.readClusterInfo();
Assert.assertEquals(1, cluster.getNodes().size());
GreedyPlanner planner = new GreedyPlanner(10.0f, node);
NodePlan plan = new NodePlan(node.getDataNodeName(), node.getDataNodePort());
planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
// since we have same size of data in all disks , we should have
// no moves planned.
assertEquals(0, plan.getVolumeSetPlans().size());
}
use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode in project hadoop by apache.
the class TestPlanner method testNodePlanSerialize.
@Test
public void testNodePlanSerialize() throws Exception {
final int diskCount = 12;
DiskBalancerTestUtil util = new DiskBalancerTestUtil();
DiskBalancerVolumeSet vSet = util.createRandomVolumeSet(StorageType.SSD, diskCount);
NullConnector nullConnector = new NullConnector();
DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
int diskNum = 0;
for (DiskBalancerVolume vol : vSet.getVolumes()) {
vol.setPath("volume" + diskNum++);
node.addVolume(vol);
}
nullConnector.addNode(node);
cluster.readClusterInfo();
GreedyPlanner newPlanner = new GreedyPlanner(01.0f, node);
NodePlan newPlan = new NodePlan(node.getDataNodeName(), node.getDataNodePort());
newPlanner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), newPlan);
String planString = newPlan.toJson();
assertNotNull(planString);
NodePlan copy = NodePlan.parseJson(planString);
assertNotNull(copy);
assertEquals(newPlan.getVolumeSetPlans().size(), copy.getVolumeSetPlans().size());
}
use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode in project hadoop by apache.
the class TestPlanner method testGreedyPlannerOneVolumeNoPlanTest.
@Test
public void testGreedyPlannerOneVolumeNoPlanTest() throws Exception {
NullConnector nullConnector = new NullConnector();
DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
DiskBalancerVolume volume30 = createVolume("volume30", 100, 30);
node.addVolume(volume30);
nullConnector.addNode(node);
cluster.readClusterInfo();
Assert.assertEquals(1, cluster.getNodes().size());
GreedyPlanner planner = new GreedyPlanner(10.0f, node);
NodePlan plan = new NodePlan(node.getDataNodeName(), node.getDataNodePort());
planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
// With a single volume we should not have any plans for moves.
assertEquals(0, plan.getVolumeSetPlans().size());
}
use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode in project hadoop by apache.
the class TestPlanner method testGreedyPlannerTwoVolume.
@Test
public void testGreedyPlannerTwoVolume() throws Exception {
NullConnector nullConnector = new NullConnector();
DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
DiskBalancerVolume volume30 = createVolume("volume30", 100, 30);
DiskBalancerVolume volume10 = createVolume("volume10", 100, 10);
node.addVolume(volume10);
node.addVolume(volume30);
nullConnector.addNode(node);
cluster.readClusterInfo();
Assert.assertEquals(1, cluster.getNodes().size());
GreedyPlanner planner = new GreedyPlanner(5.0f, node);
NodePlan plan = new NodePlan(node.getDataNodeUUID(), node.getDataNodePort());
planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
// We should have only one planned move from
// volume30 to volume10 of 10 GB Size.
assertEquals(1, plan.getVolumeSetPlans().size());
Step step = plan.getVolumeSetPlans().get(0);
assertEquals("volume30", step.getSourceVolume().getPath());
assertEquals("volume10", step.getDestinationVolume().getPath());
assertEquals("10 G", step.getSizeString(step.getBytesToMove()));
}
Aggregations