use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet in project hadoop by apache.
the class DiskBalancerTestUtil method createRandomVolumeSet.
/**
* Creates a RandomVolumeSet.
*
* @param type - Storage Type
* @param diskCount - How many disks you need.
* @return volumeSet
* @throws Exception
*/
public DiskBalancerVolumeSet createRandomVolumeSet(StorageType type, int diskCount) throws Exception {
Preconditions.checkState(diskCount > 0);
DiskBalancerVolumeSet volumeSet = new DiskBalancerVolumeSet(type.isTransient());
for (int x = 0; x < diskCount; x++) {
volumeSet.addVolume(createRandomVolume(type));
}
assert (volumeSet.getVolumeCount() == diskCount);
return volumeSet;
}
use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet in project hadoop by apache.
the class TestDataModels method testCreateRandomVolumeSet.
@Test
public void testCreateRandomVolumeSet() throws Exception {
DiskBalancerTestUtil util = new DiskBalancerTestUtil();
DiskBalancerVolumeSet vSet = util.createRandomVolumeSet(StorageType.SSD, 10);
Assert.assertEquals(10, vSet.getVolumeCount());
Assert.assertEquals(StorageType.SSD.toString(), vSet.getVolumes().get(0).getStorageType());
}
use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet in project hadoop by apache.
the class TestPlanner method testNodePlanSerialize.
@Test
public void testNodePlanSerialize() throws Exception {
final int diskCount = 12;
DiskBalancerTestUtil util = new DiskBalancerTestUtil();
DiskBalancerVolumeSet vSet = util.createRandomVolumeSet(StorageType.SSD, diskCount);
NullConnector nullConnector = new NullConnector();
DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
int diskNum = 0;
for (DiskBalancerVolume vol : vSet.getVolumes()) {
vol.setPath("volume" + diskNum++);
node.addVolume(vol);
}
nullConnector.addNode(node);
cluster.readClusterInfo();
GreedyPlanner newPlanner = new GreedyPlanner(01.0f, node);
NodePlan newPlan = new NodePlan(node.getDataNodeName(), node.getDataNodePort());
newPlanner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), newPlan);
String planString = newPlan.toJson();
assertNotNull(planString);
NodePlan copy = NodePlan.parseJson(planString);
assertNotNull(copy);
assertEquals(newPlan.getVolumeSetPlans().size(), copy.getVolumeSetPlans().size());
}
use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet in project hadoop by apache.
the class TestPlanner method testPlannerScale.
@Test
public void testPlannerScale() throws Exception {
// it is rare to see more than 48 disks
final int diskCount = 256;
DiskBalancerTestUtil util = new DiskBalancerTestUtil();
DiskBalancerVolumeSet vSet = util.createRandomVolumeSet(StorageType.SSD, diskCount);
NullConnector nullConnector = new NullConnector();
DiskBalancerCluster cluster = new DiskBalancerCluster(nullConnector);
DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
int diskNum = 0;
for (DiskBalancerVolume vol : vSet.getVolumes()) {
vol.setPath("volume" + diskNum++);
node.addVolume(vol);
}
nullConnector.addNode(node);
cluster.readClusterInfo();
GreedyPlanner newPlanner = new GreedyPlanner(01.0f, node);
NodePlan newPlan = new NodePlan(node.getDataNodeName(), node.getDataNodePort());
newPlanner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), newPlan);
// Assuming that our random disks at least generated one step
assertTrue("No Steps Generated from random disks, very unlikely", newPlan.getVolumeSetPlans().size() > 0);
assertTrue("Steps Generated less than disk count - false", newPlan.getVolumeSetPlans().size() < diskCount);
LOG.info("Number of steps are : %d%n", newPlan.getVolumeSetPlans().size());
}
use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet in project hadoop by apache.
the class Command method populatePathNames.
/**
* Reads the Physical path of the disks we are balancing. This is needed to
* make the disk balancer human friendly and not used in balancing.
*
* @param node - Disk Balancer Node.
*/
protected void populatePathNames(DiskBalancerDataNode node) throws IOException {
// invoke rpc call to dataNode.
if (getClusterURI().getScheme().startsWith("file")) {
return;
}
String dnAddress = node.getDataNodeIP() + ":" + node.getDataNodePort();
ClientDatanodeProtocol dnClient = getDataNodeProxy(dnAddress);
String volumeNameJson = dnClient.getDiskBalancerSetting(DiskBalancerConstants.DISKBALANCER_VOLUME_NAME);
@SuppressWarnings("unchecked") Map<String, String> volumeMap = READER.readValue(volumeNameJson);
for (DiskBalancerVolumeSet set : node.getVolumeSets().values()) {
for (DiskBalancerVolume vol : set.getVolumes()) {
if (volumeMap.containsKey(vol.getUuid())) {
vol.setPath(volumeMap.get(vol.getUuid()));
}
}
}
}
Aggregations