use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume in project hadoop by apache.
the class GreedyPlanner method removeSkipVolumes.
// Removes all volumes which are part of the volumeSet but skip flag is set.
private void removeSkipVolumes(DiskBalancerVolumeSet currentSet) {
List<DiskBalancerVolume> volumeList = currentSet.getVolumes();
Iterator<DiskBalancerVolume> volumeIterator = volumeList.iterator();
while (volumeIterator.hasNext()) {
DiskBalancerVolume vol = volumeIterator.next();
if (vol.isSkip() || vol.isFailed()) {
currentSet.removeVolume(vol);
}
}
currentSet.computeVolumeDataDensity();
printQueue(currentSet.getSortedQueue());
}
use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume in project hadoop by apache.
the class GreedyPlanner method balanceVolumeSet.
/**
* Computes Steps to make a DiskBalancerVolumeSet Balanced.
*
* @param node
* @param vSet - DiskBalancerVolumeSet
* @param plan - NodePlan
*/
public void balanceVolumeSet(DiskBalancerDataNode node, DiskBalancerVolumeSet vSet, NodePlan plan) throws Exception {
Preconditions.checkNotNull(vSet);
Preconditions.checkNotNull(plan);
Preconditions.checkNotNull(node);
DiskBalancerVolumeSet currentSet = new DiskBalancerVolumeSet(vSet);
while (currentSet.isBalancingNeeded(this.threshold)) {
removeSkipVolumes(currentSet);
DiskBalancerVolume lowVolume = currentSet.getSortedQueue().first();
DiskBalancerVolume highVolume = currentSet.getSortedQueue().last();
Step nextStep = null;
// Then we create a move request.
if (!lowVolume.isSkip() && !highVolume.isSkip()) {
nextStep = computeMove(currentSet, lowVolume, highVolume);
} else {
LOG.debug("Skipping compute move. lowVolume: {} highVolume: {}", lowVolume.getPath(), highVolume.getPath());
}
applyStep(nextStep, currentSet, lowVolume, highVolume);
if (nextStep != null) {
LOG.debug("Step : {} ", nextStep.toString());
plan.addStep(nextStep);
}
}
String message = String.format("Disk Volume set %s Type : %s plan completed.", currentSet.getSetID(), currentSet.getVolumes().get(0).getStorageType());
plan.setNodeName(node.getDataNodeName());
plan.setNodeUUID(node.getDataNodeUUID());
plan.setTimeStamp(Time.now());
plan.setPort(node.getDataNodePort());
LOG.info(message);
}
use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume in project hadoop by apache.
the class DiskBalancerTestUtil method createRandomVolume.
/**
* Creates a Random Volume for testing purpose.
*
* @param type - StorageType
* @return DiskBalancerVolume
*/
public DiskBalancerVolume createRandomVolume(StorageType type) {
DiskBalancerVolume volume = new DiskBalancerVolume();
volume.setPath("/tmp/disk/" + getRandomName(10));
volume.setStorageType(type.toString());
volume.setTransient(type.isTransient());
volume.setCapacity(getRandomCapacity());
volume.setReserved(getRandomReserved(volume.getCapacity()));
volume.setUsed(getRandomDfsUsed(volume.getCapacity(), volume.getReserved()));
volume.setUuid(UUID.randomUUID().toString());
return volume;
}
use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume in project hadoop by apache.
the class DiskBalancerTestUtil method createRandomDataNode.
/**
* Creates a RandomDataNode.
*
* @param diskTypes - Storage types needed in the Node
* @param diskCount - Disk count - that many disks of each type is created
* @return DataNode
* @throws Exception
*/
public DiskBalancerDataNode createRandomDataNode(StorageType[] diskTypes, int diskCount) throws Exception {
Preconditions.checkState(diskTypes.length > 0);
Preconditions.checkState(diskCount > 0);
DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
for (StorageType t : diskTypes) {
DiskBalancerVolumeSet vSet = createRandomVolumeSet(t, diskCount);
for (DiskBalancerVolume v : vSet.getVolumes()) {
node.addVolume(v);
}
}
return node;
}
use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume in project hadoop by apache.
the class TestDataModels method testNoBalancingNeededEvenDataSpread.
@Test
public void testNoBalancingNeededEvenDataSpread() throws Exception {
DiskBalancerTestUtil util = new DiskBalancerTestUtil();
DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
// create two disks which have exactly same data and isBalancing should
// say we don't need to balance.
DiskBalancerVolume v1 = util.createRandomVolume(StorageType.SSD);
v1.setCapacity(DiskBalancerTestUtil.TB);
v1.setReserved(100 * DiskBalancerTestUtil.GB);
v1.setUsed(500 * DiskBalancerTestUtil.GB);
DiskBalancerVolume v2 = util.createRandomVolume(StorageType.SSD);
v2.setCapacity(DiskBalancerTestUtil.TB);
v2.setReserved(100 * DiskBalancerTestUtil.GB);
v2.setUsed(500 * DiskBalancerTestUtil.GB);
node.addVolume(v1);
node.addVolume(v2);
for (DiskBalancerVolumeSet vsets : node.getVolumeSets().values()) {
Assert.assertFalse(vsets.isBalancingNeeded(10.0f));
}
}
Aggregations