Search in sources :

Example 21 with SchedulerRequestKey

use of org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey in project hadoop by apache.

the class FiCaSchedulerApp method getTotalPendingRequestsPerPartition.

public Map<String, Resource> getTotalPendingRequestsPerPartition() {
    try {
        readLock.lock();
        Map<String, Resource> ret = new HashMap<>();
        for (SchedulerRequestKey schedulerKey : appSchedulingInfo.getSchedulerKeys()) {
            SchedulingPlacementSet<FiCaSchedulerNode> ps = appSchedulingInfo.getSchedulingPlacementSet(schedulerKey);
            String nodePartition = ps.getPrimaryRequestedNodePartition();
            Resource res = ret.get(nodePartition);
            if (null == res) {
                res = Resources.createResource(0);
                ret.put(nodePartition, res);
            }
            PendingAsk ask = ps.getPendingAsk(ResourceRequest.ANY);
            if (ask.getCount() > 0) {
                Resources.addTo(res, Resources.multiply(ask.getPerAllocationResource(), ask.getCount()));
            }
        }
        return ret;
    } finally {
        readLock.unlock();
    }
}
Also used : ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) Resource(org.apache.hadoop.yarn.api.records.Resource) PendingAsk(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.PendingAsk) SchedulerRequestKey(org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey)

Example 22 with SchedulerRequestKey

use of org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey in project hadoop by apache.

the class TestLeafQueue method testLocalityConstraints.

@Test
public void testLocalityConstraints() throws Exception {
    // Manipulate queue 'a'
    LeafQueue a = stubLeafQueue((LeafQueue) queues.get(A));
    // User
    String user_0 = "user_0";
    // Submit applications
    final ApplicationAttemptId appAttemptId_0 = TestUtils.getMockApplicationAttemptId(0, 0);
    FiCaSchedulerApp app_0 = new FiCaSchedulerApp(appAttemptId_0, user_0, a, mock(ActiveUsersManager.class), spyRMContext);
    a.submitApplicationAttempt(app_0, user_0);
    final ApplicationAttemptId appAttemptId_1 = TestUtils.getMockApplicationAttemptId(1, 0);
    FiCaSchedulerApp app_1 = new FiCaSchedulerApp(appAttemptId_1, user_0, a, mock(ActiveUsersManager.class), spyRMContext);
    a.submitApplicationAttempt(app_1, user_0);
    Map<ApplicationAttemptId, FiCaSchedulerApp> apps = ImmutableMap.of(app_0.getApplicationAttemptId(), app_0, app_1.getApplicationAttemptId(), app_1);
    // Setup some nodes and racks
    String host_0_0 = "127.0.0.1";
    String rack_0 = "rack_0";
    String host_0_1 = "127.0.0.2";
    FiCaSchedulerNode node_0_1 = TestUtils.getMockNode(host_0_1, rack_0, 0, 8 * GB);
    String host_1_0 = "127.0.0.3";
    String rack_1 = "rack_1";
    FiCaSchedulerNode node_1_0 = TestUtils.getMockNode(host_1_0, rack_1, 0, 8 * GB);
    String host_1_1 = "127.0.0.4";
    FiCaSchedulerNode node_1_1 = TestUtils.getMockNode(host_1_1, rack_1, 0, 8 * GB);
    Map<NodeId, FiCaSchedulerNode> nodes = ImmutableMap.of(node_0_1.getNodeID(), node_0_1, node_1_0.getNodeID(), node_1_0, node_1_1.getNodeID(), node_1_1);
    final int numNodes = 4;
    Resource clusterResource = Resources.createResource(numNodes * (8 * GB), numNodes * 1);
    when(csContext.getNumClusterNodes()).thenReturn(numNodes);
    // Setup resource-requests
    // resourceName: <priority, memory, #containers, relaxLocality>
    // host_0_0: < 1, 1GB, 1, true >
    // host_0_1: < null >
    // rack_0:   < null >                     <----
    // host_1_0: < 1, 1GB, 1, true >
    // host_1_1: < null >
    // rack_1:   < 1, 1GB, 1, false >         <----
    // ANY:      < 1, 1GB, 1, false >         <----
    // Availability:
    // host_0_0: 8G
    // host_0_1: 8G
    // host_1_0: 8G
    // host_1_1: 8G
    // Blacklist: <host_0_0>
    Priority priority = TestUtils.createMockPriority(1);
    SchedulerRequestKey schedulerKey = toSchedulerKey(priority);
    List<ResourceRequest> app_0_requests_0 = new ArrayList<ResourceRequest>();
    app_0_requests_0.add(TestUtils.createResourceRequest(host_0_0, 1 * GB, 1, true, priority, recordFactory));
    app_0_requests_0.add(TestUtils.createResourceRequest(host_1_0, 1 * GB, 1, true, priority, recordFactory));
    app_0_requests_0.add(TestUtils.createResourceRequest(rack_1, 1 * GB, 1, false, priority, recordFactory));
    app_0_requests_0.add(// only one
    TestUtils.createResourceRequest(// only one
    ResourceRequest.ANY, // only one
    1 * GB, // only one
    1, false, priority, recordFactory));
    app_0.updateResourceRequests(app_0_requests_0);
    app_0.updateBlacklist(Collections.singletonList(host_0_0), null);
    app_0_requests_0.clear();
    //
    // Start testing...
    //
    // node_0_1  
    // Shouldn't allocate since RR(rack_0) = null && RR(ANY) = relax: false
    CSAssignment assignment = a.assignContainers(clusterResource, node_0_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    applyCSAssignment(clusterResource, assignment, a, nodes, apps);
    verifyNoContainerAllocated(assignment);
    // should be 0
    assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey));
    // resourceName: <priority, memory, #containers, relaxLocality>
    // host_0_0: < 1, 1GB, 1, true >
    // host_0_1: < null >
    // rack_0:   < null >                     <----
    // host_1_0: < 1, 1GB, 1, true >
    // host_1_1: < null >
    // rack_1:   < 1, 1GB, 1, false >         <----
    // ANY:      < 1, 1GB, 1, false >         <----
    // Availability:
    // host_0_0: 8G
    // host_0_1: 8G
    // host_1_0: 8G
    // host_1_1: 8G
    // Blacklist: <host_0_0>
    // node_1_1  
    // Shouldn't allocate since RR(rack_1) = relax: false
    assignment = a.assignContainers(clusterResource, node_1_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    applyCSAssignment(clusterResource, assignment, a, nodes, apps);
    verifyNoContainerAllocated(assignment);
    // should be 0
    assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey));
    // Allow rack-locality for rack_1, but blacklist node_1_1
    app_0_requests_0.add(TestUtils.createResourceRequest(rack_1, 1 * GB, 1, true, priority, recordFactory));
    app_0.updateResourceRequests(app_0_requests_0);
    app_0.updateBlacklist(Collections.singletonList(host_1_1), null);
    app_0_requests_0.clear();
    // resourceName: <priority, memory, #containers, relaxLocality>
    // host_0_0: < 1, 1GB, 1, true >
    // host_0_1: < null >
    // rack_0:   < null >                     
    // host_1_0: < 1, 1GB, 1, true >
    // host_1_1: < null >
    // rack_1:   < 1, 1GB, 1, true >         
    // ANY:      < 1, 1GB, 1, false >         
    // Availability:
    // host_0_0: 8G
    // host_0_1: 8G
    // host_1_0: 8G
    // host_1_1: 8G
    // Blacklist: < host_0_0 , host_1_1 >       <----
    // node_1_1  
    // Shouldn't allocate since node_1_1 is blacklisted
    assignment = a.assignContainers(clusterResource, node_1_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    applyCSAssignment(clusterResource, assignment, a, nodes, apps);
    verifyNoContainerAllocated(assignment);
    // should be 0
    assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey));
    // Now, remove node_1_1 from blacklist, but add rack_1 to blacklist
    app_0.updateResourceRequests(app_0_requests_0);
    app_0.updateBlacklist(Collections.singletonList(rack_1), Collections.singletonList(host_1_1));
    app_0_requests_0.clear();
    // resourceName: <priority, memory, #containers, relaxLocality>
    // host_0_0: < 1, 1GB, 1, true >
    // host_0_1: < null >
    // rack_0:   < null >                     
    // host_1_0: < 1, 1GB, 1, true >
    // host_1_1: < null >
    // rack_1:   < 1, 1GB, 1, true >         
    // ANY:      < 1, 1GB, 1, false >         
    // Availability:
    // host_0_0: 8G
    // host_0_1: 8G
    // host_1_0: 8G
    // host_1_1: 8G
    // Blacklist: < host_0_0 , rack_1 >       <----
    // node_1_1  
    // Shouldn't allocate since rack_1 is blacklisted
    assignment = a.assignContainers(clusterResource, node_1_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    applyCSAssignment(clusterResource, assignment, a, nodes, apps);
    verifyNoContainerAllocated(assignment);
    // should be 0
    assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey));
    // Now remove rack_1 from blacklist
    app_0.updateResourceRequests(app_0_requests_0);
    app_0.updateBlacklist(null, Collections.singletonList(rack_1));
    app_0_requests_0.clear();
    // resourceName: <priority, memory, #containers, relaxLocality>
    // host_0_0: < 1, 1GB, 1, true >
    // host_0_1: < null >
    // rack_0:   < null >                     
    // host_1_0: < 1, 1GB, 1, true >
    // host_1_1: < null >
    // rack_1:   < 1, 1GB, 1, true >         
    // ANY:      < 1, 1GB, 1, false >         
    // Availability:
    // host_0_0: 8G
    // host_0_1: 8G
    // host_1_0: 8G
    // host_1_1: 8G
    // Blacklist: < host_0_0 >       <----
    // Now, should allocate since RR(rack_1) = relax: true
    assignment = a.assignContainers(clusterResource, node_1_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    applyCSAssignment(clusterResource, assignment, a, nodes, apps);
    verifyNoContainerAllocated(assignment);
    assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey));
    assertEquals(1, app_0.getOutstandingAsksCount(schedulerKey));
    // Now sanity-check node_local
    app_0_requests_0.add(TestUtils.createResourceRequest(rack_1, 1 * GB, 1, false, priority, recordFactory));
    app_0_requests_0.add(// only one
    TestUtils.createResourceRequest(// only one
    ResourceRequest.ANY, // only one
    1 * GB, // only one
    1, false, priority, recordFactory));
    app_0.updateResourceRequests(app_0_requests_0);
    app_0_requests_0.clear();
    // resourceName: <priority, memory, #containers, relaxLocality>
    // host_0_0: < 1, 1GB, 1, true >
    // host_0_1: < null >
    // rack_0:   < null >                     
    // host_1_0: < 1, 1GB, 1, true >
    // host_1_1: < null >
    // rack_1:   < 1, 1GB, 1, false >          <----
    // ANY:      < 1, 1GB, 1, false >          <----
    // Availability:
    // host_0_0: 8G
    // host_0_1: 8G
    // host_1_0: 8G
    // host_1_1: 7G
    assignment = a.assignContainers(clusterResource, node_1_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    applyCSAssignment(clusterResource, assignment, a, nodes, apps);
    verifyContainerAllocated(assignment, NodeType.NODE_LOCAL);
    assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey));
    assertEquals(0, app_0.getOutstandingAsksCount(schedulerKey));
}
Also used : FiCaSchedulerNode(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode) Priority(org.apache.hadoop.yarn.api.records.Priority) Resource(org.apache.hadoop.yarn.api.records.Resource) ArrayList(java.util.ArrayList) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) SchedulerRequestKey(org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey) FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp) ResourceLimits(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits) NodeId(org.apache.hadoop.yarn.api.records.NodeId) ActiveUsersManager(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager) ResourceRequest(org.apache.hadoop.yarn.api.records.ResourceRequest) Test(org.junit.Test)

Example 23 with SchedulerRequestKey

use of org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey in project hadoop by apache.

the class FifoScheduler method assignContainers.

/**
   * Heart of the scheduler...
   * 
   * @param node node on which resources are available to be allocated
   */
private void assignContainers(FiCaSchedulerNode node) {
    LOG.debug("assignContainers:" + " node=" + node.getRMNode().getNodeAddress() + " #applications=" + applications.size());
    // Try to assign containers to applications in fifo order
    for (Map.Entry<ApplicationId, SchedulerApplication<FifoAppAttempt>> e : applications.entrySet()) {
        FifoAppAttempt application = e.getValue().getCurrentAppAttempt();
        if (application == null) {
            continue;
        }
        LOG.debug("pre-assignContainers");
        application.showRequests();
        synchronized (application) {
            // Check if this resource is on the blacklist
            if (SchedulerAppUtils.isPlaceBlacklisted(application, node, LOG)) {
                continue;
            }
            for (SchedulerRequestKey schedulerKey : application.getSchedulerKeys()) {
                int maxContainers = getMaxAllocatableContainers(application, schedulerKey, node, NodeType.OFF_SWITCH);
                // Ensure the application needs containers of this priority
                if (maxContainers > 0) {
                    int assignedContainers = assignContainersOnNode(node, application, schedulerKey);
                    // Do not assign out of order w.r.t priorities
                    if (assignedContainers == 0) {
                        break;
                    }
                }
            }
        }
        LOG.debug("post-assignContainers");
        application.showRequests();
        // Done
        if (Resources.lessThan(resourceCalculator, getClusterResource(), node.getUnallocatedResource(), minimumAllocation)) {
            break;
        }
    }
    // account the containers assigned in this update.
    for (SchedulerApplication<FifoAppAttempt> application : applications.values()) {
        FifoAppAttempt attempt = (FifoAppAttempt) application.getCurrentAppAttempt();
        if (attempt == null) {
            continue;
        }
        updateAppHeadRoom(attempt);
    }
}
Also used : SchedulerApplication(org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplication) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) Map(java.util.Map) HashMap(java.util.HashMap) ConcurrentSkipListMap(java.util.concurrent.ConcurrentSkipListMap) SchedulerRequestKey(org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey)

Example 24 with SchedulerRequestKey

use of org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey in project hadoop by apache.

the class AppSchedulingInfo method updateResourceRequests.

/**
   * The ApplicationMaster is updating resource requirements for the
   * application, by asking for more resources and releasing resources acquired
   * by the application.
   *
   * @param requests
   *          resources to be acquired
   * @param recoverPreemptedRequestForAContainer
   *          recover ResourceRequest on preemption
   * @return true if any resource was updated, false otherwise
   */
public boolean updateResourceRequests(List<ResourceRequest> requests, boolean recoverPreemptedRequestForAContainer) {
    if (null == requests || requests.isEmpty()) {
        return false;
    }
    // Flag to track if any incoming requests update "ANY" requests
    boolean offswitchResourcesUpdated = false;
    try {
        this.writeLock.lock();
        // A map to group resource requests and dedup
        Map<SchedulerRequestKey, Map<String, ResourceRequest>> dedupRequests = new HashMap<>();
        // Group resource request by schedulerRequestKey and resourceName
        for (ResourceRequest request : requests) {
            SchedulerRequestKey schedulerKey = SchedulerRequestKey.create(request);
            if (!dedupRequests.containsKey(schedulerKey)) {
                dedupRequests.put(schedulerKey, new HashMap<>());
            }
            dedupRequests.get(schedulerKey).put(request.getResourceName(), request);
        }
        // Update scheduling placement set
        offswitchResourcesUpdated = addToPlacementSets(recoverPreemptedRequestForAContainer, dedupRequests);
        return offswitchResourcesUpdated;
    } finally {
        this.writeLock.unlock();
    }
}
Also used : HashMap(java.util.HashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ResourceRequest(org.apache.hadoop.yarn.api.records.ResourceRequest) HashMap(java.util.HashMap) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ConcurrentSkipListMap(java.util.concurrent.ConcurrentSkipListMap) TreeMap(java.util.TreeMap) SchedulerRequestKey(org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey)

Example 25 with SchedulerRequestKey

use of org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey in project hadoop by apache.

the class FSAppAttempt method assignContainer.

private Resource assignContainer(FSSchedulerNode node, boolean reserved) {
    if (LOG.isTraceEnabled()) {
        LOG.trace("Node offered to app: " + getName() + " reserved: " + reserved);
    }
    Collection<SchedulerRequestKey> keysToTry = (reserved) ? Collections.singletonList(node.getReservedContainer().getReservedSchedulerKey()) : getSchedulerKeys();
    // (not scheduled) in order to promote better locality.
    try {
        writeLock.lock();
        // implementation.
        for (SchedulerRequestKey schedulerKey : keysToTry) {
            // we already check it in isValidReservation.
            if (!reserved && !hasContainerForNode(schedulerKey, node)) {
                continue;
            }
            addSchedulingOpportunity(schedulerKey);
            PendingAsk rackLocalPendingAsk = getPendingAsk(schedulerKey, node.getRackName());
            PendingAsk nodeLocalPendingAsk = getPendingAsk(schedulerKey, node.getNodeName());
            if (nodeLocalPendingAsk.getCount() > 0 && !appSchedulingInfo.canDelayTo(schedulerKey, node.getNodeName())) {
                LOG.warn("Relax locality off is not supported on local request: " + nodeLocalPendingAsk);
            }
            NodeType allowedLocality;
            if (scheduler.isContinuousSchedulingEnabled()) {
                allowedLocality = getAllowedLocalityLevelByTime(schedulerKey, scheduler.getNodeLocalityDelayMs(), scheduler.getRackLocalityDelayMs(), scheduler.getClock().getTime());
            } else {
                allowedLocality = getAllowedLocalityLevel(schedulerKey, scheduler.getNumClusterNodes(), scheduler.getNodeLocalityThreshold(), scheduler.getRackLocalityThreshold());
            }
            if (rackLocalPendingAsk.getCount() > 0 && nodeLocalPendingAsk.getCount() > 0) {
                if (LOG.isTraceEnabled()) {
                    LOG.trace("Assign container on " + node.getNodeName() + " node, assignType: NODE_LOCAL" + ", allowedLocality: " + allowedLocality + ", priority: " + schedulerKey.getPriority() + ", app attempt id: " + this.attemptId);
                }
                return assignContainer(node, nodeLocalPendingAsk, NodeType.NODE_LOCAL, reserved, schedulerKey);
            }
            if (!appSchedulingInfo.canDelayTo(schedulerKey, node.getRackName())) {
                continue;
            }
            if (rackLocalPendingAsk.getCount() > 0 && (allowedLocality.equals(NodeType.RACK_LOCAL) || allowedLocality.equals(NodeType.OFF_SWITCH))) {
                if (LOG.isTraceEnabled()) {
                    LOG.trace("Assign container on " + node.getNodeName() + " node, assignType: RACK_LOCAL" + ", allowedLocality: " + allowedLocality + ", priority: " + schedulerKey.getPriority() + ", app attempt id: " + this.attemptId);
                }
                return assignContainer(node, rackLocalPendingAsk, NodeType.RACK_LOCAL, reserved, schedulerKey);
            }
            PendingAsk offswitchAsk = getPendingAsk(schedulerKey, ResourceRequest.ANY);
            if (!appSchedulingInfo.canDelayTo(schedulerKey, ResourceRequest.ANY)) {
                continue;
            }
            if (offswitchAsk.getCount() > 0) {
                if (getSchedulingPlacementSet(schedulerKey).getUniqueLocationAsks() <= 1 || allowedLocality.equals(NodeType.OFF_SWITCH)) {
                    if (LOG.isTraceEnabled()) {
                        LOG.trace("Assign container on " + node.getNodeName() + " node, assignType: OFF_SWITCH" + ", allowedLocality: " + allowedLocality + ", priority: " + schedulerKey.getPriority() + ", app attempt id: " + this.attemptId);
                    }
                    return assignContainer(node, offswitchAsk, NodeType.OFF_SWITCH, reserved, schedulerKey);
                }
            }
            if (LOG.isTraceEnabled()) {
                LOG.trace("Can't assign container on " + node.getNodeName() + " node, allowedLocality: " + allowedLocality + ", priority: " + schedulerKey.getPriority() + ", app attempt id: " + this.attemptId);
            }
        }
    } finally {
        writeLock.unlock();
    }
    return Resources.none();
}
Also used : NodeType(org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType) PendingAsk(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.PendingAsk) SchedulerRequestKey(org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey)

Aggregations

SchedulerRequestKey (org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey)35 Test (org.junit.Test)16 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)15 Priority (org.apache.hadoop.yarn.api.records.Priority)15 Resource (org.apache.hadoop.yarn.api.records.Resource)13 ResourceRequest (org.apache.hadoop.yarn.api.records.ResourceRequest)12 NodeId (org.apache.hadoop.yarn.api.records.NodeId)10 RMContainer (org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer)10 Container (org.apache.hadoop.yarn.api.records.Container)9 FiCaSchedulerNode (org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode)9 ArrayList (java.util.ArrayList)8 HashMap (java.util.HashMap)8 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)8 FiCaSchedulerApp (org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp)8 ActiveUsersManager (org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager)7 RMContext (org.apache.hadoop.yarn.server.resourcemanager.RMContext)6 ResourceLimits (org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits)6 Map (java.util.Map)5 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)4 RMContainerImpl (org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl)4