use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet in project hadoop by apache.
the class ReportCommand method recordNodeReport.
/**
* Put node report lines to string buffer.
*/
private void recordNodeReport(StrBuilder result, DiskBalancerDataNode dbdn, final String nodeFormat, final String volumeFormat) throws Exception {
final String trueStr = "True";
final String falseStr = "False";
// get storage path of datanode
populatePathNames(dbdn);
result.appendln(String.format(nodeFormat, dbdn.getDataNodeName(), dbdn.getDataNodeIP(), dbdn.getDataNodePort(), dbdn.getDataNodeUUID(), dbdn.getVolumeCount(), dbdn.getNodeDataDensity()));
List<String> volumeList = Lists.newArrayList();
for (DiskBalancerVolumeSet vset : dbdn.getVolumeSets().values()) {
for (DiskBalancerVolume vol : vset.getVolumes()) {
volumeList.add(String.format(volumeFormat, vol.getStorageType(), vol.getPath(), vol.getUsedRatio(), vol.getUsed(), vol.getCapacity(), vol.getFreeRatio(), vol.getFreeSpace(), vol.getCapacity(), vol.isFailed() ? trueStr : falseStr, vol.isReadOnly() ? trueStr : falseStr, vol.isSkip() ? trueStr : falseStr, vol.isTransient() ? trueStr : falseStr));
}
}
Collections.sort(volumeList);
result.appendln(StringUtils.join(volumeList.toArray(), System.lineSeparator()));
}
use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet in project hadoop by apache.
the class GreedyPlanner method plan.
/**
* Computes a node plan for the given node.
*
* @return NodePlan
* @throws Exception
*/
@Override
public NodePlan plan(DiskBalancerDataNode node) throws Exception {
long startTime = Time.monotonicNow();
NodePlan plan = new NodePlan(node.getDataNodeName(), node.getDataNodePort());
LOG.info("Starting plan for Node : {}:{}", node.getDataNodeName(), node.getDataNodePort());
while (node.isBalancingNeeded(this.threshold)) {
for (DiskBalancerVolumeSet vSet : node.getVolumeSets().values()) {
balanceVolumeSet(node, vSet, plan);
}
}
long endTime = Time.monotonicNow();
String message = String.format("Compute Plan for Node : %s:%d took %d ms ", node.getDataNodeName(), node.getDataNodePort(), endTime - startTime);
LOG.info(message);
return plan;
}
use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet in project hadoop by apache.
the class GreedyPlanner method balanceVolumeSet.
/**
* Computes Steps to make a DiskBalancerVolumeSet Balanced.
*
* @param node
* @param vSet - DiskBalancerVolumeSet
* @param plan - NodePlan
*/
public void balanceVolumeSet(DiskBalancerDataNode node, DiskBalancerVolumeSet vSet, NodePlan plan) throws Exception {
Preconditions.checkNotNull(vSet);
Preconditions.checkNotNull(plan);
Preconditions.checkNotNull(node);
DiskBalancerVolumeSet currentSet = new DiskBalancerVolumeSet(vSet);
while (currentSet.isBalancingNeeded(this.threshold)) {
removeSkipVolumes(currentSet);
DiskBalancerVolume lowVolume = currentSet.getSortedQueue().first();
DiskBalancerVolume highVolume = currentSet.getSortedQueue().last();
Step nextStep = null;
// Then we create a move request.
if (!lowVolume.isSkip() && !highVolume.isSkip()) {
nextStep = computeMove(currentSet, lowVolume, highVolume);
} else {
LOG.debug("Skipping compute move. lowVolume: {} highVolume: {}", lowVolume.getPath(), highVolume.getPath());
}
applyStep(nextStep, currentSet, lowVolume, highVolume);
if (nextStep != null) {
LOG.debug("Step : {} ", nextStep.toString());
plan.addStep(nextStep);
}
}
String message = String.format("Disk Volume set %s Type : %s plan completed.", currentSet.getSetID(), currentSet.getVolumes().get(0).getStorageType());
plan.setNodeName(node.getDataNodeName());
plan.setNodeUUID(node.getDataNodeUUID());
plan.setTimeStamp(Time.now());
plan.setPort(node.getDataNodePort());
LOG.info(message);
}
Aggregations