Search in sources :

Example 81 with NodeId

use of org.apache.hadoop.yarn.api.records.NodeId in project hadoop by apache.

the class FiCaSchedulerApp method findNodeToUnreserve.

@VisibleForTesting
public RMContainer findNodeToUnreserve(Resource clusterResource, FiCaSchedulerNode node, SchedulerRequestKey schedulerKey, Resource minimumUnreservedResource) {
    try {
        readLock.lock();
        // need to unreserve some other container first
        NodeId idToUnreserve = getNodeIdToUnreserve(schedulerKey, minimumUnreservedResource, rc, clusterResource);
        if (idToUnreserve == null) {
            if (LOG.isDebugEnabled()) {
                LOG.debug("checked to see if could unreserve for app but nothing " + "reserved that matches for this app");
            }
            return null;
        }
        FiCaSchedulerNode nodeToUnreserve = ((CapacityScheduler) scheduler).getNode(idToUnreserve);
        if (nodeToUnreserve == null) {
            LOG.error("node to unreserve doesn't exist, nodeid: " + idToUnreserve);
            return null;
        }
        if (LOG.isDebugEnabled()) {
            LOG.debug("unreserving for app: " + getApplicationId() + " on nodeId: " + idToUnreserve + " in order to replace reserved application and place it on node: " + node.getNodeID() + " needing: " + minimumUnreservedResource);
        }
        // headroom
        Resources.addTo(getHeadroom(), nodeToUnreserve.getReservedContainer().getReservedResource());
        return nodeToUnreserve.getReservedContainer();
    } finally {
        readLock.unlock();
    }
}
Also used : NodeId(org.apache.hadoop.yarn.api.records.NodeId) CapacityScheduler(org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 82 with NodeId

use of org.apache.hadoop.yarn.api.records.NodeId in project hadoop by apache.

the class NodeQueueLoadMonitor method sortNodes.

private List<NodeId> sortNodes() {
    ReentrantReadWriteLock.ReadLock readLock = clusterNodesLock.readLock();
    readLock.lock();
    try {
        ArrayList aList = new ArrayList<>(this.clusterNodes.values());
        List<NodeId> retList = new ArrayList<>();
        Object[] nodes = aList.toArray();
        // Collections.sort would do something similar by calling Arrays.sort
        // internally but would finally iterate through the input list (aList)
        // to reset the value of each element. Since we don't really care about
        // 'aList', we can use the iteration to create the list of nodeIds which
        // is what we ultimately care about.
        Arrays.sort(nodes, (Comparator) comparator);
        for (int j = 0; j < nodes.length; j++) {
            retList.add(((ClusterNode) nodes[j]).nodeId);
        }
        return retList;
    } finally {
        readLock.unlock();
    }
}
Also used : ArrayList(java.util.ArrayList) NodeId(org.apache.hadoop.yarn.api.records.NodeId) ReentrantReadWriteLock(java.util.concurrent.locks.ReentrantReadWriteLock)

Example 83 with NodeId

use of org.apache.hadoop.yarn.api.records.NodeId in project hadoop by apache.

the class FSAppAttempt method createContainer.

/**
   * Create and return a container object reflecting an allocation for the
   * given application on the given node with the given capability and
   * priority.
   *
   * @param node Node
   * @param capability Capability
   * @param schedulerKey Scheduler Key
   * @return Container
   */
private Container createContainer(FSSchedulerNode node, Resource capability, SchedulerRequestKey schedulerKey) {
    NodeId nodeId = node.getRMNode().getNodeID();
    ContainerId containerId = BuilderUtils.newContainerId(getApplicationAttemptId(), getNewContainerId());
    // Create the container
    return BuilderUtils.newContainer(containerId, nodeId, node.getRMNode().getHttpAddress(), capability, schedulerKey.getPriority(), null, schedulerKey.getAllocationRequestId());
}
Also used : ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) NodeId(org.apache.hadoop.yarn.api.records.NodeId)

Example 84 with NodeId

use of org.apache.hadoop.yarn.api.records.NodeId in project hadoop by apache.

the class FifoScheduler method assignContainer.

private int assignContainer(FiCaSchedulerNode node, FifoAppAttempt application, SchedulerRequestKey schedulerKey, int assignableContainers, Resource capability, NodeType type) {
    LOG.debug("assignContainers:" + " node=" + node.getRMNode().getNodeAddress() + " application=" + application.getApplicationId().getId() + " priority=" + schedulerKey.getPriority().getPriority() + " assignableContainers=" + assignableContainers + " capability=" + capability + " type=" + type);
    // TODO: A buggy application with this zero would crash the scheduler.
    int availableContainers = (int) (node.getUnallocatedResource().getMemorySize() / capability.getMemorySize());
    int assignedContainers = Math.min(assignableContainers, availableContainers);
    if (assignedContainers > 0) {
        for (int i = 0; i < assignedContainers; ++i) {
            NodeId nodeId = node.getRMNode().getNodeID();
            ContainerId containerId = BuilderUtils.newContainerId(application.getApplicationAttemptId(), application.getNewContainerId());
            // Create the container
            Container container = BuilderUtils.newContainer(containerId, nodeId, node.getRMNode().getHttpAddress(), capability, schedulerKey.getPriority(), null, schedulerKey.getAllocationRequestId());
            // Allocate!
            // Inform the application
            RMContainer rmContainer = application.allocate(type, node, schedulerKey, container);
            // Inform the node
            node.allocateContainer(rmContainer);
            // Update usage for this container
            increaseUsedResources(rmContainer);
        }
    }
    return assignedContainers;
}
Also used : RMContainer(org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer) Container(org.apache.hadoop.yarn.api.records.Container) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) NodeId(org.apache.hadoop.yarn.api.records.NodeId) RMContainer(org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer)

Example 85 with NodeId

use of org.apache.hadoop.yarn.api.records.NodeId in project hadoop by apache.

the class NMTokenSecretManagerInRM method createAndGetNMToken.

public NMToken createAndGetNMToken(String applicationSubmitter, ApplicationAttemptId appAttemptId, Container container) {
    try {
        this.writeLock.lock();
        HashSet<NodeId> nodeSet = this.appAttemptToNodeKeyMap.get(appAttemptId);
        NMToken nmToken = null;
        if (nodeSet != null) {
            if (!nodeSet.contains(container.getNodeId())) {
                LOG.info("Sending NMToken for nodeId : " + container.getNodeId() + " for container : " + container.getId());
                Token token = createNMToken(container.getId().getApplicationAttemptId(), container.getNodeId(), applicationSubmitter);
                nmToken = NMToken.newInstance(container.getNodeId(), token);
                nodeSet.add(container.getNodeId());
            }
        }
        return nmToken;
    } finally {
        this.writeLock.unlock();
    }
}
Also used : NMToken(org.apache.hadoop.yarn.api.records.NMToken) NodeId(org.apache.hadoop.yarn.api.records.NodeId) NMToken(org.apache.hadoop.yarn.api.records.NMToken) Token(org.apache.hadoop.yarn.api.records.Token)

Aggregations

NodeId (org.apache.hadoop.yarn.api.records.NodeId)257 Test (org.junit.Test)137 Resource (org.apache.hadoop.yarn.api.records.Resource)89 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)74 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)59 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)46 Container (org.apache.hadoop.yarn.api.records.Container)44 ArrayList (java.util.ArrayList)43 Configuration (org.apache.hadoop.conf.Configuration)40 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)40 RMContainer (org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer)40 HashSet (java.util.HashSet)39 Set (java.util.Set)36 HashMap (java.util.HashMap)35 FiCaSchedulerNode (org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode)34 Priority (org.apache.hadoop.yarn.api.records.Priority)32 FiCaSchedulerApp (org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp)31 IOException (java.io.IOException)29 ResourceLimits (org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits)29 RMNode (org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode)28