Search in sources :

Example 11 with DiskBalancerVolumeSet

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet in project hadoop by apache.

the class ReportCommand method recordNodeReport.

/**
   * Put node report lines to string buffer.
   */
private void recordNodeReport(StrBuilder result, DiskBalancerDataNode dbdn, final String nodeFormat, final String volumeFormat) throws Exception {
    final String trueStr = "True";
    final String falseStr = "False";
    // get storage path of datanode
    populatePathNames(dbdn);
    result.appendln(String.format(nodeFormat, dbdn.getDataNodeName(), dbdn.getDataNodeIP(), dbdn.getDataNodePort(), dbdn.getDataNodeUUID(), dbdn.getVolumeCount(), dbdn.getNodeDataDensity()));
    List<String> volumeList = Lists.newArrayList();
    for (DiskBalancerVolumeSet vset : dbdn.getVolumeSets().values()) {
        for (DiskBalancerVolume vol : vset.getVolumes()) {
            volumeList.add(String.format(volumeFormat, vol.getStorageType(), vol.getPath(), vol.getUsedRatio(), vol.getUsed(), vol.getCapacity(), vol.getFreeRatio(), vol.getFreeSpace(), vol.getCapacity(), vol.isFailed() ? trueStr : falseStr, vol.isReadOnly() ? trueStr : falseStr, vol.isSkip() ? trueStr : falseStr, vol.isTransient() ? trueStr : falseStr));
        }
    }
    Collections.sort(volumeList);
    result.appendln(StringUtils.join(volumeList.toArray(), System.lineSeparator()));
}
Also used : DiskBalancerVolumeSet(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet) DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume)

Example 12 with DiskBalancerVolumeSet

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet in project hadoop by apache.

the class GreedyPlanner method plan.

/**
   * Computes a node plan for the given node.
   *
   * @return NodePlan
   * @throws Exception
   */
@Override
public NodePlan plan(DiskBalancerDataNode node) throws Exception {
    long startTime = Time.monotonicNow();
    NodePlan plan = new NodePlan(node.getDataNodeName(), node.getDataNodePort());
    LOG.info("Starting plan for Node : {}:{}", node.getDataNodeName(), node.getDataNodePort());
    while (node.isBalancingNeeded(this.threshold)) {
        for (DiskBalancerVolumeSet vSet : node.getVolumeSets().values()) {
            balanceVolumeSet(node, vSet, plan);
        }
    }
    long endTime = Time.monotonicNow();
    String message = String.format("Compute Plan for Node : %s:%d took %d ms ", node.getDataNodeName(), node.getDataNodePort(), endTime - startTime);
    LOG.info(message);
    return plan;
}
Also used : DiskBalancerVolumeSet(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet)

Example 13 with DiskBalancerVolumeSet

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet in project hadoop by apache.

the class GreedyPlanner method balanceVolumeSet.

/**
   * Computes Steps to make a DiskBalancerVolumeSet Balanced.
   *
   * @param node
   * @param vSet - DiskBalancerVolumeSet
   * @param plan - NodePlan
   */
public void balanceVolumeSet(DiskBalancerDataNode node, DiskBalancerVolumeSet vSet, NodePlan plan) throws Exception {
    Preconditions.checkNotNull(vSet);
    Preconditions.checkNotNull(plan);
    Preconditions.checkNotNull(node);
    DiskBalancerVolumeSet currentSet = new DiskBalancerVolumeSet(vSet);
    while (currentSet.isBalancingNeeded(this.threshold)) {
        removeSkipVolumes(currentSet);
        DiskBalancerVolume lowVolume = currentSet.getSortedQueue().first();
        DiskBalancerVolume highVolume = currentSet.getSortedQueue().last();
        Step nextStep = null;
        // Then we create a move request.
        if (!lowVolume.isSkip() && !highVolume.isSkip()) {
            nextStep = computeMove(currentSet, lowVolume, highVolume);
        } else {
            LOG.debug("Skipping compute move. lowVolume: {} highVolume: {}", lowVolume.getPath(), highVolume.getPath());
        }
        applyStep(nextStep, currentSet, lowVolume, highVolume);
        if (nextStep != null) {
            LOG.debug("Step : {} ", nextStep.toString());
            plan.addStep(nextStep);
        }
    }
    String message = String.format("Disk Volume set %s Type : %s plan completed.", currentSet.getSetID(), currentSet.getVolumes().get(0).getStorageType());
    plan.setNodeName(node.getDataNodeName());
    plan.setNodeUUID(node.getDataNodeUUID());
    plan.setTimeStamp(Time.now());
    plan.setPort(node.getDataNodePort());
    LOG.info(message);
}
Also used : DiskBalancerVolumeSet(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet) DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume)

Aggregations

DiskBalancerVolumeSet (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet)13 DiskBalancerVolume (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume)10 DiskBalancerDataNode (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode)7 Test (org.junit.Test)7 NullConnector (org.apache.hadoop.hdfs.server.diskbalancer.connectors.NullConnector)2 DiskBalancerCluster (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster)2 GreedyPlanner (org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner)2 NodePlan (org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan)2 StorageType (org.apache.hadoop.fs.StorageType)1 ClientDatanodeProtocol (org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol)1