use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume in project hadoop by apache.
the class DBNameNodeConnector method getVolumeInfoFromStorageReports.
/**
* Reads the relevant fields from each storage volume and populate the
* DiskBalancer Node.
*
* @param node - Disk Balancer Node
* @param reports - Array of StorageReport
*/
private void getVolumeInfoFromStorageReports(DiskBalancerDataNode node, StorageReport[] reports) throws Exception {
Preconditions.checkNotNull(node);
Preconditions.checkNotNull(reports);
for (StorageReport report : reports) {
DatanodeStorage storage = report.getStorage();
DiskBalancerVolume volume = new DiskBalancerVolume();
volume.setCapacity(report.getCapacity());
volume.setFailed(report.isFailed());
volume.setUsed(report.getDfsUsed());
// TODO : Should we do BlockPool level balancing at all ?
// Does it make sense ? Balancer does do that. Right now
// we only deal with volumes and not blockPools
volume.setUsed(report.getDfsUsed());
volume.setUuid(storage.getStorageID());
// we will skip this volume for disk balancer if
// it is read-only since we will not be able to delete
// or if it is already failed.
volume.setSkip((storage.getState() == DatanodeStorage.State.READ_ONLY_SHARED) || report.isFailed());
volume.setStorageType(storage.getStorageType().name());
volume.setIsTransient(storage.getStorageType().isTransient());
node.addVolume(volume);
}
}
use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume in project hadoop by apache.
the class TestDataModels method testVolumeSerialize.
@Test
public void testVolumeSerialize() throws Exception {
DiskBalancerTestUtil util = new DiskBalancerTestUtil();
DiskBalancerVolume volume = util.createRandomVolume(StorageType.DISK);
String originalString = volume.toJson();
DiskBalancerVolume parsedVolume = DiskBalancerVolume.parseJson(originalString);
String parsedString = parsedVolume.toJson();
Assert.assertEquals(originalString, parsedString);
}
use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume in project hadoop by apache.
the class TestPlanner method testGreedyPlannerEqualDisksNoMoves.
@Test
public void testGreedyPlannerEqualDisksNoMoves() throws Exception {
NullConnector nullConnector = new NullConnector();
DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
// All disks have same capacity of data
DiskBalancerVolume volume1 = createVolume("volume1", 100, 30);
DiskBalancerVolume volume2 = createVolume("volume2", 100, 30);
DiskBalancerVolume volume3 = createVolume("volume3", 100, 30);
node.addVolume(volume1);
node.addVolume(volume2);
node.addVolume(volume3);
nullConnector.addNode(node);
cluster.readClusterInfo();
Assert.assertEquals(1, cluster.getNodes().size());
GreedyPlanner planner = new GreedyPlanner(10.0f, node);
NodePlan plan = new NodePlan(node.getDataNodeName(), node.getDataNodePort());
planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
// since we have same size of data in all disks , we should have
// no moves planned.
assertEquals(0, plan.getVolumeSetPlans().size());
}
use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume in project hadoop by apache.
the class TestPlanner method testNodePlanSerialize.
@Test
public void testNodePlanSerialize() throws Exception {
final int diskCount = 12;
DiskBalancerTestUtil util = new DiskBalancerTestUtil();
DiskBalancerVolumeSet vSet = util.createRandomVolumeSet(StorageType.SSD, diskCount);
NullConnector nullConnector = new NullConnector();
DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
int diskNum = 0;
for (DiskBalancerVolume vol : vSet.getVolumes()) {
vol.setPath("volume" + diskNum++);
node.addVolume(vol);
}
nullConnector.addNode(node);
cluster.readClusterInfo();
GreedyPlanner newPlanner = new GreedyPlanner(01.0f, node);
NodePlan newPlan = new NodePlan(node.getDataNodeName(), node.getDataNodePort());
newPlanner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), newPlan);
String planString = newPlan.toJson();
assertNotNull(planString);
NodePlan copy = NodePlan.parseJson(planString);
assertNotNull(copy);
assertEquals(newPlan.getVolumeSetPlans().size(), copy.getVolumeSetPlans().size());
}
use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume in project hadoop by apache.
the class TestPlanner method testGreedyPlannerOneVolumeNoPlanTest.
@Test
public void testGreedyPlannerOneVolumeNoPlanTest() throws Exception {
NullConnector nullConnector = new NullConnector();
DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
DiskBalancerVolume volume30 = createVolume("volume30", 100, 30);
node.addVolume(volume30);
nullConnector.addNode(node);
cluster.readClusterInfo();
Assert.assertEquals(1, cluster.getNodes().size());
GreedyPlanner planner = new GreedyPlanner(10.0f, node);
NodePlan plan = new NodePlan(node.getDataNodeName(), node.getDataNodePort());
planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
// With a single volume we should not have any plans for moves.
assertEquals(0, plan.getVolumeSetPlans().size());
}
Aggregations