Search in sources :

Example 1 with DatanodeUsageInfo

use of org.apache.hadoop.hdds.scm.node.DatanodeUsageInfo in project ozone by apache.

the class AbstractFindTargetGreedy method findTargetForContainerMove.

/**
 * Find a {@link ContainerMoveSelection} consisting of a target and
 * container to move for a source datanode. Favours more under-utilized nodes.
 * @param source Datanode to find a target for
 * @param candidateContainers Set of candidate containers satisfying
 *                            selection criteria
 *                            {@link ContainerBalancerSelectionCriteria}
 * (DatanodeDetails, Long) method returns true if the size specified in the
 * second argument can enter the specified DatanodeDetails node
 * @return Found target and container
 */
@Override
public ContainerMoveSelection findTargetForContainerMove(DatanodeDetails source, Set<ContainerID> candidateContainers) {
    sortTargetForSource(source);
    for (DatanodeUsageInfo targetInfo : potentialTargets) {
        DatanodeDetails target = targetInfo.getDatanodeDetails();
        for (ContainerID container : candidateContainers) {
            Set<ContainerReplica> replicas;
            ContainerInfo containerInfo;
            try {
                replicas = containerManager.getContainerReplicas(container);
                containerInfo = containerManager.getContainer(container);
            } catch (ContainerNotFoundException e) {
                logger.warn("Could not get Container {} from Container Manager for " + "obtaining replicas in Container Balancer.", container, e);
                continue;
            }
            if (replicas.stream().noneMatch(replica -> replica.getDatanodeDetails().equals(target)) && containerMoveSatisfiesPlacementPolicy(container, replicas, source, target) && canSizeEnterTarget(target, containerInfo.getUsedBytes())) {
                return new ContainerMoveSelection(target, container);
            }
        }
    }
    logger.info("Container Balancer could not find a target for " + "source datanode {}", source.getUuidString());
    return null;
}
Also used : ContainerID(org.apache.hadoop.hdds.scm.container.ContainerID) ContainerReplica(org.apache.hadoop.hdds.scm.container.ContainerReplica) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) ContainerInfo(org.apache.hadoop.hdds.scm.container.ContainerInfo) DatanodeUsageInfo(org.apache.hadoop.hdds.scm.node.DatanodeUsageInfo) ContainerNotFoundException(org.apache.hadoop.hdds.scm.container.ContainerNotFoundException)

Example 2 with DatanodeUsageInfo

use of org.apache.hadoop.hdds.scm.node.DatanodeUsageInfo in project ozone by apache.

the class ContainerBalancer method calculateAvgUtilization.

/**
 * Calculates the average utilization for the specified nodes.
 * Utilization is (capacity - remaining) divided by capacity.
 *
 * @param nodes List of DatanodeUsageInfo to find the average utilization for
 * @return Average utilization value
 */
double calculateAvgUtilization(List<DatanodeUsageInfo> nodes) {
    if (nodes.size() == 0) {
        LOG.warn("No nodes to calculate average utilization for in " + "ContainerBalancer.");
        return 0;
    }
    SCMNodeStat aggregatedStats = new SCMNodeStat(0, 0, 0);
    for (DatanodeUsageInfo node : nodes) {
        aggregatedStats.add(node.getScmNodeStat());
    }
    clusterCapacity = aggregatedStats.getCapacity().get();
    clusterUsed = aggregatedStats.getScmUsed().get();
    clusterRemaining = aggregatedStats.getRemaining().get();
    return (clusterCapacity - clusterRemaining) / (double) clusterCapacity;
}
Also used : SCMNodeStat(org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat) DatanodeUsageInfo(org.apache.hadoop.hdds.scm.node.DatanodeUsageInfo)

Example 3 with DatanodeUsageInfo

use of org.apache.hadoop.hdds.scm.node.DatanodeUsageInfo in project ozone by apache.

the class TestFindTargetStrategy method testFindTargetGreedyByUsage.

/**
 * Checks whether FindTargetGreedyByUsage always choose target
 * for a given source by Usage.
 */
@Test
public void testFindTargetGreedyByUsage() {
    FindTargetGreedyByUsageInfo findTargetStrategyByUsageInfo = new FindTargetGreedyByUsageInfo(null, null, null);
    List<DatanodeUsageInfo> overUtilizedDatanodes = new ArrayList<>();
    // create three datanodes with different usageinfo
    DatanodeUsageInfo dui1 = new DatanodeUsageInfo(MockDatanodeDetails.randomDatanodeDetails(), new SCMNodeStat(100, 0, 40));
    DatanodeUsageInfo dui2 = new DatanodeUsageInfo(MockDatanodeDetails.randomDatanodeDetails(), new SCMNodeStat(100, 0, 60));
    DatanodeUsageInfo dui3 = new DatanodeUsageInfo(MockDatanodeDetails.randomDatanodeDetails(), new SCMNodeStat(100, 0, 80));
    // insert in ascending order
    overUtilizedDatanodes.add(dui1);
    overUtilizedDatanodes.add(dui2);
    overUtilizedDatanodes.add(dui3);
    findTargetStrategyByUsageInfo.reInitialize(overUtilizedDatanodes, null, null);
    // no need to set the datanode usage for source.
    findTargetStrategyByUsageInfo.sortTargetForSource(MockDatanodeDetails.randomDatanodeDetails());
    Collection<DatanodeUsageInfo> potentialTargets = findTargetStrategyByUsageInfo.getPotentialTargets();
    Object[] sortedPotentialTargetArray = potentialTargets.toArray();
    Assert.assertEquals(sortedPotentialTargetArray.length, 3);
    // make sure after sorting target for source, the potentialTargets is
    // sorted in descending order of usage
    Assert.assertEquals(((DatanodeUsageInfo) sortedPotentialTargetArray[0]).getDatanodeDetails(), dui3.getDatanodeDetails());
    Assert.assertEquals(((DatanodeUsageInfo) sortedPotentialTargetArray[1]).getDatanodeDetails(), dui2.getDatanodeDetails());
    Assert.assertEquals(((DatanodeUsageInfo) sortedPotentialTargetArray[2]).getDatanodeDetails(), dui1.getDatanodeDetails());
}
Also used : ArrayList(java.util.ArrayList) DatanodeUsageInfo(org.apache.hadoop.hdds.scm.node.DatanodeUsageInfo) SCMNodeStat(org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat) Test(org.junit.Test)

Example 4 with DatanodeUsageInfo

use of org.apache.hadoop.hdds.scm.node.DatanodeUsageInfo in project ozone by apache.

the class TestContainerBalancer method generateData.

/**
 * Create some datanodes and containers for each node.
 */
private void generateData() {
    this.numberOfNodes = 10;
    generateUtilizations(numberOfNodes);
    nodesInCluster = new ArrayList<>(nodeUtilizations.size());
    // create datanodes and add containers to them
    for (int i = 0; i < numberOfNodes; i++) {
        Set<ContainerID> containerIDSet = new HashSet<>();
        DatanodeUsageInfo usageInfo = new DatanodeUsageInfo(MockDatanodeDetails.randomDatanodeDetails(), new SCMNodeStat());
        // create containers with varying used space
        int sizeMultiple = 0;
        for (int j = 0; j < i; j++) {
            sizeMultiple %= 5;
            sizeMultiple++;
            ContainerInfo container = createContainer((long) i * i + j, sizeMultiple);
            cidToInfoMap.put(container.containerID(), container);
            containerIDSet.add(container.containerID());
            // create initial replica for this container and add it
            Set<ContainerReplica> containerReplicaSet = new HashSet<>();
            containerReplicaSet.add(createReplica(container.containerID(), usageInfo.getDatanodeDetails(), container.getUsedBytes()));
            cidToReplicasMap.put(container.containerID(), containerReplicaSet);
        }
        nodesInCluster.add(usageInfo);
        datanodeToContainersMap.put(usageInfo, containerIDSet);
    }
}
Also used : ContainerID(org.apache.hadoop.hdds.scm.container.ContainerID) ContainerReplica(org.apache.hadoop.hdds.scm.container.ContainerReplica) ContainerInfo(org.apache.hadoop.hdds.scm.container.ContainerInfo) DatanodeUsageInfo(org.apache.hadoop.hdds.scm.node.DatanodeUsageInfo) SCMNodeStat(org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat) HashSet(java.util.HashSet)

Example 5 with DatanodeUsageInfo

use of org.apache.hadoop.hdds.scm.node.DatanodeUsageInfo in project ozone by apache.

the class MockNodeManager method getMostOrLeastUsedDatanodes.

/**
 * Gets a sorted list of most or least used DatanodeUsageInfo containing
 * healthy, in-service nodes. If the specified mostUsed is true, the returned
 * list is in descending order of usage. Otherwise, the returned list is in
 * ascending order of usage.
 *
 * @param mostUsed true if most used, false if least used
 * @return List of DatanodeUsageInfo
 */
@Override
public List<DatanodeUsageInfo> getMostOrLeastUsedDatanodes(boolean mostUsed) {
    List<DatanodeDetails> datanodeDetailsList = getNodes(NodeOperationalState.IN_SERVICE, HEALTHY);
    if (datanodeDetailsList == null) {
        return new ArrayList<>();
    }
    Comparator<DatanodeUsageInfo> comparator;
    if (mostUsed) {
        comparator = DatanodeUsageInfo.getMostUtilized().reversed();
    } else {
        comparator = DatanodeUsageInfo.getMostUtilized();
    }
    return datanodeDetailsList.stream().map(node -> new DatanodeUsageInfo(node, nodeMetricMap.get(node))).sorted(comparator).collect(Collectors.toList());
}
Also used : MockDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) ArrayList(java.util.ArrayList) DatanodeUsageInfo(org.apache.hadoop.hdds.scm.node.DatanodeUsageInfo)

Aggregations

DatanodeUsageInfo (org.apache.hadoop.hdds.scm.node.DatanodeUsageInfo)9 DatanodeDetails (org.apache.hadoop.hdds.protocol.DatanodeDetails)5 SCMNodeStat (org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat)4 ArrayList (java.util.ArrayList)3 HashSet (java.util.HashSet)2 MockDatanodeDetails (org.apache.hadoop.hdds.protocol.MockDatanodeDetails)2 ContainerID (org.apache.hadoop.hdds.scm.container.ContainerID)2 ContainerInfo (org.apache.hadoop.hdds.scm.container.ContainerInfo)2 ContainerReplica (org.apache.hadoop.hdds.scm.container.ContainerReplica)2 Test (org.junit.Test)2 VisibleForTesting (com.google.common.annotations.VisibleForTesting)1 ContainerNotFoundException (org.apache.hadoop.hdds.scm.container.ContainerNotFoundException)1 NetworkTopology (org.apache.hadoop.hdds.scm.net.NetworkTopology)1 NetworkTopologyImpl (org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl)1 NodeSchema (org.apache.hadoop.hdds.scm.net.NodeSchema)1 NodeSchemaManager (org.apache.hadoop.hdds.scm.net.NodeSchemaManager)1