Search in sources :

Example 16 with DiskBalancerVolume

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume in project hadoop by apache.

the class GreedyPlanner method removeSkipVolumes.

// Removes all volumes which are part of the volumeSet but skip flag is set.
private void removeSkipVolumes(DiskBalancerVolumeSet currentSet) {
    List<DiskBalancerVolume> volumeList = currentSet.getVolumes();
    Iterator<DiskBalancerVolume> volumeIterator = volumeList.iterator();
    while (volumeIterator.hasNext()) {
        DiskBalancerVolume vol = volumeIterator.next();
        if (vol.isSkip() || vol.isFailed()) {
            currentSet.removeVolume(vol);
        }
    }
    currentSet.computeVolumeDataDensity();
    printQueue(currentSet.getSortedQueue());
}
Also used : DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume)

Example 17 with DiskBalancerVolume

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume in project hadoop by apache.

the class GreedyPlanner method balanceVolumeSet.

/**
   * Computes Steps to make a DiskBalancerVolumeSet Balanced.
   *
   * @param node
   * @param vSet - DiskBalancerVolumeSet
   * @param plan - NodePlan
   */
public void balanceVolumeSet(DiskBalancerDataNode node, DiskBalancerVolumeSet vSet, NodePlan plan) throws Exception {
    Preconditions.checkNotNull(vSet);
    Preconditions.checkNotNull(plan);
    Preconditions.checkNotNull(node);
    DiskBalancerVolumeSet currentSet = new DiskBalancerVolumeSet(vSet);
    while (currentSet.isBalancingNeeded(this.threshold)) {
        removeSkipVolumes(currentSet);
        DiskBalancerVolume lowVolume = currentSet.getSortedQueue().first();
        DiskBalancerVolume highVolume = currentSet.getSortedQueue().last();
        Step nextStep = null;
        // Then we create a move request.
        if (!lowVolume.isSkip() && !highVolume.isSkip()) {
            nextStep = computeMove(currentSet, lowVolume, highVolume);
        } else {
            LOG.debug("Skipping compute move. lowVolume: {} highVolume: {}", lowVolume.getPath(), highVolume.getPath());
        }
        applyStep(nextStep, currentSet, lowVolume, highVolume);
        if (nextStep != null) {
            LOG.debug("Step : {} ", nextStep.toString());
            plan.addStep(nextStep);
        }
    }
    String message = String.format("Disk Volume set %s Type : %s plan completed.", currentSet.getSetID(), currentSet.getVolumes().get(0).getStorageType());
    plan.setNodeName(node.getDataNodeName());
    plan.setNodeUUID(node.getDataNodeUUID());
    plan.setTimeStamp(Time.now());
    plan.setPort(node.getDataNodePort());
    LOG.info(message);
}
Also used : DiskBalancerVolumeSet(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet) DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume)

Example 18 with DiskBalancerVolume

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume in project hadoop by apache.

the class DiskBalancerTestUtil method createRandomVolume.

/**
   * Creates a Random Volume for testing purpose.
   *
   * @param type - StorageType
   * @return DiskBalancerVolume
   */
public DiskBalancerVolume createRandomVolume(StorageType type) {
    DiskBalancerVolume volume = new DiskBalancerVolume();
    volume.setPath("/tmp/disk/" + getRandomName(10));
    volume.setStorageType(type.toString());
    volume.setTransient(type.isTransient());
    volume.setCapacity(getRandomCapacity());
    volume.setReserved(getRandomReserved(volume.getCapacity()));
    volume.setUsed(getRandomDfsUsed(volume.getCapacity(), volume.getReserved()));
    volume.setUuid(UUID.randomUUID().toString());
    return volume;
}
Also used : DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume)

Example 19 with DiskBalancerVolume

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume in project hadoop by apache.

the class DiskBalancerTestUtil method createRandomDataNode.

/**
   * Creates a RandomDataNode.
   *
   * @param diskTypes - Storage types needed in the Node
   * @param diskCount - Disk count - that many disks of each type is created
   * @return DataNode
   * @throws Exception
   */
public DiskBalancerDataNode createRandomDataNode(StorageType[] diskTypes, int diskCount) throws Exception {
    Preconditions.checkState(diskTypes.length > 0);
    Preconditions.checkState(diskCount > 0);
    DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
    for (StorageType t : diskTypes) {
        DiskBalancerVolumeSet vSet = createRandomVolumeSet(t, diskCount);
        for (DiskBalancerVolume v : vSet.getVolumes()) {
            node.addVolume(v);
        }
    }
    return node;
}
Also used : DiskBalancerVolumeSet(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet) StorageType(org.apache.hadoop.fs.StorageType) DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode)

Example 20 with DiskBalancerVolume

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume in project hadoop by apache.

the class TestDataModels method testNoBalancingNeededEvenDataSpread.

@Test
public void testNoBalancingNeededEvenDataSpread() throws Exception {
    DiskBalancerTestUtil util = new DiskBalancerTestUtil();
    DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
    // create two disks which have exactly same data and isBalancing should
    // say we don't need to balance.
    DiskBalancerVolume v1 = util.createRandomVolume(StorageType.SSD);
    v1.setCapacity(DiskBalancerTestUtil.TB);
    v1.setReserved(100 * DiskBalancerTestUtil.GB);
    v1.setUsed(500 * DiskBalancerTestUtil.GB);
    DiskBalancerVolume v2 = util.createRandomVolume(StorageType.SSD);
    v2.setCapacity(DiskBalancerTestUtil.TB);
    v2.setReserved(100 * DiskBalancerTestUtil.GB);
    v2.setUsed(500 * DiskBalancerTestUtil.GB);
    node.addVolume(v1);
    node.addVolume(v2);
    for (DiskBalancerVolumeSet vsets : node.getVolumeSets().values()) {
        Assert.assertFalse(vsets.isBalancingNeeded(10.0f));
    }
}
Also used : DiskBalancerVolumeSet(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet) DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Aggregations

DiskBalancerVolume (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume)25 Test (org.junit.Test)17 DiskBalancerDataNode (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode)16 NullConnector (org.apache.hadoop.hdfs.server.diskbalancer.connectors.NullConnector)10 DiskBalancerCluster (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster)10 DiskBalancerVolumeSet (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet)10 GreedyPlanner (org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner)10 NodePlan (org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan)10 Step (org.apache.hadoop.hdfs.server.diskbalancer.planner.Step)5 LinkedList (java.util.LinkedList)1 StorageType (org.apache.hadoop.fs.StorageType)1 ClientDatanodeProtocol (org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol)1 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)1 DatanodeStorageReport (org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport)1 StorageReport (org.apache.hadoop.hdfs.server.protocol.StorageReport)1