Search in sources :

Example 6 with DiskBalancerVolumeSet

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet in project hadoop by apache.

the class GreedyPlanner method plan.

/**
   * Computes a node plan for the given node.
   *
   * @return NodePlan
   * @throws Exception
   */
@Override
public NodePlan plan(DiskBalancerDataNode node) throws Exception {
    long startTime = Time.monotonicNow();
    NodePlan plan = new NodePlan(node.getDataNodeName(), node.getDataNodePort());
    LOG.info("Starting plan for Node : {}:{}", node.getDataNodeName(), node.getDataNodePort());
    while (node.isBalancingNeeded(this.threshold)) {
        for (DiskBalancerVolumeSet vSet : node.getVolumeSets().values()) {
            balanceVolumeSet(node, vSet, plan);
        }
    }
    long endTime = Time.monotonicNow();
    String message = String.format("Compute Plan for Node : %s:%d took %d ms ", node.getDataNodeName(), node.getDataNodePort(), endTime - startTime);
    LOG.info(message);
    return plan;
}
Also used : DiskBalancerVolumeSet(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet)

Example 7 with DiskBalancerVolumeSet

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet in project hadoop by apache.

the class GreedyPlanner method balanceVolumeSet.

/**
   * Computes Steps to make a DiskBalancerVolumeSet Balanced.
   *
   * @param node
   * @param vSet - DiskBalancerVolumeSet
   * @param plan - NodePlan
   */
public void balanceVolumeSet(DiskBalancerDataNode node, DiskBalancerVolumeSet vSet, NodePlan plan) throws Exception {
    Preconditions.checkNotNull(vSet);
    Preconditions.checkNotNull(plan);
    Preconditions.checkNotNull(node);
    DiskBalancerVolumeSet currentSet = new DiskBalancerVolumeSet(vSet);
    while (currentSet.isBalancingNeeded(this.threshold)) {
        removeSkipVolumes(currentSet);
        DiskBalancerVolume lowVolume = currentSet.getSortedQueue().first();
        DiskBalancerVolume highVolume = currentSet.getSortedQueue().last();
        Step nextStep = null;
        // Then we create a move request.
        if (!lowVolume.isSkip() && !highVolume.isSkip()) {
            nextStep = computeMove(currentSet, lowVolume, highVolume);
        } else {
            LOG.debug("Skipping compute move. lowVolume: {} highVolume: {}", lowVolume.getPath(), highVolume.getPath());
        }
        applyStep(nextStep, currentSet, lowVolume, highVolume);
        if (nextStep != null) {
            LOG.debug("Step : {} ", nextStep.toString());
            plan.addStep(nextStep);
        }
    }
    String message = String.format("Disk Volume set %s Type : %s plan completed.", currentSet.getSetID(), currentSet.getVolumes().get(0).getStorageType());
    plan.setNodeName(node.getDataNodeName());
    plan.setNodeUUID(node.getDataNodeUUID());
    plan.setTimeStamp(Time.now());
    plan.setPort(node.getDataNodePort());
    LOG.info(message);
}
Also used : DiskBalancerVolumeSet(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet) DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume)

Example 8 with DiskBalancerVolumeSet

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet in project hadoop by apache.

the class DiskBalancerTestUtil method createRandomDataNode.

/**
   * Creates a RandomDataNode.
   *
   * @param diskTypes - Storage types needed in the Node
   * @param diskCount - Disk count - that many disks of each type is created
   * @return DataNode
   * @throws Exception
   */
public DiskBalancerDataNode createRandomDataNode(StorageType[] diskTypes, int diskCount) throws Exception {
    Preconditions.checkState(diskTypes.length > 0);
    Preconditions.checkState(diskCount > 0);
    DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
    for (StorageType t : diskTypes) {
        DiskBalancerVolumeSet vSet = createRandomVolumeSet(t, diskCount);
        for (DiskBalancerVolume v : vSet.getVolumes()) {
            node.addVolume(v);
        }
    }
    return node;
}
Also used : DiskBalancerVolumeSet(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet) StorageType(org.apache.hadoop.fs.StorageType) DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode)

Example 9 with DiskBalancerVolumeSet

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet in project hadoop by apache.

the class TestDataModels method testNoBalancingNeededEvenDataSpread.

@Test
public void testNoBalancingNeededEvenDataSpread() throws Exception {
    DiskBalancerTestUtil util = new DiskBalancerTestUtil();
    DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
    // create two disks which have exactly same data and isBalancing should
    // say we don't need to balance.
    DiskBalancerVolume v1 = util.createRandomVolume(StorageType.SSD);
    v1.setCapacity(DiskBalancerTestUtil.TB);
    v1.setReserved(100 * DiskBalancerTestUtil.GB);
    v1.setUsed(500 * DiskBalancerTestUtil.GB);
    DiskBalancerVolume v2 = util.createRandomVolume(StorageType.SSD);
    v2.setCapacity(DiskBalancerTestUtil.TB);
    v2.setReserved(100 * DiskBalancerTestUtil.GB);
    v2.setUsed(500 * DiskBalancerTestUtil.GB);
    node.addVolume(v1);
    node.addVolume(v2);
    for (DiskBalancerVolumeSet vsets : node.getVolumeSets().values()) {
        Assert.assertFalse(vsets.isBalancingNeeded(10.0f));
    }
}
Also used : DiskBalancerVolumeSet(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet) DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Example 10 with DiskBalancerVolumeSet

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet in project hadoop by apache.

the class TestDataModels method testNoBalancingNeededTransientDisks.

@Test
public void testNoBalancingNeededTransientDisks() throws Exception {
    DiskBalancerTestUtil util = new DiskBalancerTestUtil();
    DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
    // create two disks which have different data sizes, but
    // transient. isBalancing should say no balancing needed.
    DiskBalancerVolume v1 = util.createRandomVolume(StorageType.RAM_DISK);
    v1.setCapacity(DiskBalancerTestUtil.TB);
    v1.setReserved(100 * DiskBalancerTestUtil.GB);
    v1.setUsed(1 * DiskBalancerTestUtil.GB);
    DiskBalancerVolume v2 = util.createRandomVolume(StorageType.RAM_DISK);
    v2.setCapacity(DiskBalancerTestUtil.TB);
    v2.setReserved(100 * DiskBalancerTestUtil.GB);
    v2.setUsed(500 * DiskBalancerTestUtil.GB);
    node.addVolume(v1);
    node.addVolume(v2);
    for (DiskBalancerVolumeSet vsets : node.getVolumeSets().values()) {
        Assert.assertFalse(vsets.isBalancingNeeded(10.0f));
    }
}
Also used : DiskBalancerVolumeSet(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet) DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Aggregations

DiskBalancerVolumeSet (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet)13 DiskBalancerVolume (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume)10 DiskBalancerDataNode (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode)7 Test (org.junit.Test)7 NullConnector (org.apache.hadoop.hdfs.server.diskbalancer.connectors.NullConnector)2 DiskBalancerCluster (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster)2 GreedyPlanner (org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner)2 NodePlan (org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan)2 StorageType (org.apache.hadoop.fs.StorageType)1 ClientDatanodeProtocol (org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol)1