Search in sources :

Example 21 with ResourceScheduler

use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler in project hadoop by apache.

the class RMServerUtils method checkSchedContainerChangeRequest.

/**
   * Validate increase/decrease request.
   * <pre>
   * - Throw exception when any other error happens
   * </pre>
   */
public static void checkSchedContainerChangeRequest(SchedContainerChangeRequest request, boolean increase) throws InvalidResourceRequestException {
    RMContext rmContext = request.getRmContext();
    ContainerId containerId = request.getContainerId();
    RMContainer rmContainer = request.getRMContainer();
    Resource targetResource = request.getTargetCapacity();
    // Compare targetResource and original resource
    Resource originalResource = rmContainer.getAllocatedResource();
    // <20G, 8>
    if (increase) {
        if (originalResource.getMemorySize() > targetResource.getMemorySize() || originalResource.getVirtualCores() > targetResource.getVirtualCores()) {
            String msg = "Trying to increase a container, but target resource has some" + " resource < original resource, target=" + targetResource + " original=" + originalResource + " containerId=" + containerId;
            throw new InvalidResourceRequestException(msg);
        }
    } else {
        if (originalResource.getMemorySize() < targetResource.getMemorySize() || originalResource.getVirtualCores() < targetResource.getVirtualCores()) {
            String msg = "Trying to decrease a container, but target resource has " + "some resource > original resource, target=" + targetResource + " original=" + originalResource + " containerId=" + containerId;
            throw new InvalidResourceRequestException(msg);
        }
    }
    // Target resource of the increase request is more than NM can offer
    ResourceScheduler scheduler = rmContext.getScheduler();
    RMNode rmNode = request.getSchedulerNode().getRMNode();
    if (!Resources.fitsIn(scheduler.getResourceCalculator(), scheduler.getClusterResource(), targetResource, rmNode.getTotalCapability())) {
        String msg = "Target resource=" + targetResource + " of containerId=" + containerId + " is more than node's total resource=" + rmNode.getTotalCapability();
        throw new InvalidResourceRequestException(msg);
    }
}
Also used : InvalidResourceRequestException(org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException) RMNode(org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) Resource(org.apache.hadoop.yarn.api.records.Resource) ResourceScheduler(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler) RMContainer(org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer)

Example 22 with ResourceScheduler

use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler in project hadoop by apache.

the class RMServerUtils method validateIncreaseDecreaseRequest.

// Sanity check and normalize target resource
private static boolean validateIncreaseDecreaseRequest(RMContext rmContext, UpdateContainerRequest request, Resource maximumAllocation) {
    if (request.getCapability().getMemorySize() < 0 || request.getCapability().getMemorySize() > maximumAllocation.getMemorySize()) {
        return false;
    }
    if (request.getCapability().getVirtualCores() < 0 || request.getCapability().getVirtualCores() > maximumAllocation.getVirtualCores()) {
        return false;
    }
    ResourceScheduler scheduler = rmContext.getScheduler();
    request.setCapability(scheduler.getNormalizedResource(request.getCapability()));
    return true;
}
Also used : ResourceScheduler(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler)

Example 23 with ResourceScheduler

use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler in project hadoop by apache.

the class RMWebServices method getSchedulerInfo.

@GET
@Path("/scheduler")
@Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
public SchedulerTypeInfo getSchedulerInfo() {
    init();
    ResourceScheduler rs = rm.getResourceScheduler();
    SchedulerInfo sinfo;
    if (rs instanceof CapacityScheduler) {
        CapacityScheduler cs = (CapacityScheduler) rs;
        CSQueue root = cs.getRootQueue();
        sinfo = new CapacitySchedulerInfo(root, cs);
    } else if (rs instanceof FairScheduler) {
        FairScheduler fs = (FairScheduler) rs;
        sinfo = new FairSchedulerInfo(fs);
    } else if (rs instanceof FifoScheduler) {
        sinfo = new FifoSchedulerInfo(this.rm);
    } else {
        throw new NotFoundException("Unknown scheduler configured");
    }
    return new SchedulerTypeInfo(sinfo);
}
Also used : FairSchedulerInfo(org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.FairSchedulerInfo) CapacitySchedulerInfo(org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerInfo) FifoSchedulerInfo(org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.FifoSchedulerInfo) SchedulerInfo(org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerInfo) SchedulerTypeInfo(org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerTypeInfo) FairScheduler(org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler) CapacitySchedulerInfo(org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerInfo) FairSchedulerInfo(org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.FairSchedulerInfo) NotFoundException(org.apache.hadoop.yarn.webapp.NotFoundException) ResourceScheduler(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler) FifoScheduler(org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler) FifoSchedulerInfo(org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.FifoSchedulerInfo) CapacityScheduler(org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler) CSQueue(org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue) Path(javax.ws.rs.Path) Produces(javax.ws.rs.Produces) GET(javax.ws.rs.GET)

Example 24 with ResourceScheduler

use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler in project hadoop by apache.

the class RMWebServices method getNodes.

/**
   * Returns all nodes in the cluster. If the states param is given, returns
   * all nodes that are in the comma-separated list of states.
   */
@GET
@Path("/nodes")
@Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
public NodesInfo getNodes(@QueryParam("states") String states) {
    init();
    ResourceScheduler sched = this.rm.getResourceScheduler();
    if (sched == null) {
        throw new NotFoundException("Null ResourceScheduler instance");
    }
    EnumSet<NodeState> acceptedStates;
    if (states == null) {
        acceptedStates = EnumSet.allOf(NodeState.class);
    } else {
        acceptedStates = EnumSet.noneOf(NodeState.class);
        for (String stateStr : states.split(",")) {
            acceptedStates.add(NodeState.valueOf(StringUtils.toUpperCase(stateStr)));
        }
    }
    Collection<RMNode> rmNodes = RMServerUtils.queryRMNodes(this.rm.getRMContext(), acceptedStates);
    NodesInfo nodesInfo = new NodesInfo();
    for (RMNode rmNode : rmNodes) {
        NodeInfo nodeInfo = new NodeInfo(rmNode, sched);
        if (EnumSet.of(NodeState.LOST, NodeState.DECOMMISSIONED, NodeState.REBOOTED).contains(rmNode.getState())) {
            nodeInfo.setNodeHTTPAddress(EMPTY);
        }
        nodesInfo.add(nodeInfo);
    }
    return nodesInfo;
}
Also used : RMNode(org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode) NodeState(org.apache.hadoop.yarn.api.records.NodeState) NodesInfo(org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodesInfo) LabelsToNodesInfo(org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.LabelsToNodesInfo) NodeInfo(org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeInfo) NotFoundException(org.apache.hadoop.yarn.webapp.NotFoundException) ResourceScheduler(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler) Path(javax.ws.rs.Path) Produces(javax.ws.rs.Produces) GET(javax.ws.rs.GET)

Example 25 with ResourceScheduler

use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler in project hadoop by apache.

the class RMWebServices method getApps.

@GET
@Path("/apps")
@Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
public AppsInfo getApps(@Context HttpServletRequest hsr, @QueryParam("state") String stateQuery, @QueryParam("states") Set<String> statesQuery, @QueryParam("finalStatus") String finalStatusQuery, @QueryParam("user") String userQuery, @QueryParam("queue") String queueQuery, @QueryParam("limit") String count, @QueryParam("startedTimeBegin") String startedBegin, @QueryParam("startedTimeEnd") String startedEnd, @QueryParam("finishedTimeBegin") String finishBegin, @QueryParam("finishedTimeEnd") String finishEnd, @QueryParam("applicationTypes") Set<String> applicationTypes, @QueryParam("applicationTags") Set<String> applicationTags) {
    boolean checkCount = false;
    boolean checkStart = false;
    boolean checkEnd = false;
    boolean checkAppTypes = false;
    boolean checkAppStates = false;
    boolean checkAppTags = false;
    long countNum = 0;
    // set values suitable in case both of begin/end not specified
    long sBegin = 0;
    long sEnd = Long.MAX_VALUE;
    long fBegin = 0;
    long fEnd = Long.MAX_VALUE;
    init();
    if (count != null && !count.isEmpty()) {
        checkCount = true;
        countNum = Long.parseLong(count);
        if (countNum <= 0) {
            throw new BadRequestException("limit value must be greater then 0");
        }
    }
    if (startedBegin != null && !startedBegin.isEmpty()) {
        checkStart = true;
        sBegin = Long.parseLong(startedBegin);
        if (sBegin < 0) {
            throw new BadRequestException("startedTimeBegin must be greater than 0");
        }
    }
    if (startedEnd != null && !startedEnd.isEmpty()) {
        checkStart = true;
        sEnd = Long.parseLong(startedEnd);
        if (sEnd < 0) {
            throw new BadRequestException("startedTimeEnd must be greater than 0");
        }
    }
    if (sBegin > sEnd) {
        throw new BadRequestException("startedTimeEnd must be greater than startTimeBegin");
    }
    if (finishBegin != null && !finishBegin.isEmpty()) {
        checkEnd = true;
        fBegin = Long.parseLong(finishBegin);
        if (fBegin < 0) {
            throw new BadRequestException("finishTimeBegin must be greater than 0");
        }
    }
    if (finishEnd != null && !finishEnd.isEmpty()) {
        checkEnd = true;
        fEnd = Long.parseLong(finishEnd);
        if (fEnd < 0) {
            throw new BadRequestException("finishTimeEnd must be greater than 0");
        }
    }
    if (fBegin > fEnd) {
        throw new BadRequestException("finishTimeEnd must be greater than finishTimeBegin");
    }
    Set<String> appTypes = parseQueries(applicationTypes, false);
    if (!appTypes.isEmpty()) {
        checkAppTypes = true;
    }
    Set<String> appTags = parseQueries(applicationTags, false);
    if (!appTags.isEmpty()) {
        checkAppTags = true;
    }
    // stateQuery is deprecated.
    if (stateQuery != null && !stateQuery.isEmpty()) {
        statesQuery.add(stateQuery);
    }
    Set<String> appStates = parseQueries(statesQuery, true);
    if (!appStates.isEmpty()) {
        checkAppStates = true;
    }
    GetApplicationsRequest request = GetApplicationsRequest.newInstance();
    if (checkStart) {
        request.setStartRange(sBegin, sEnd);
    }
    if (checkEnd) {
        request.setFinishRange(fBegin, fEnd);
    }
    if (checkCount) {
        request.setLimit(countNum);
    }
    if (checkAppTypes) {
        request.setApplicationTypes(appTypes);
    }
    if (checkAppTags) {
        request.setApplicationTags(appTags);
    }
    if (checkAppStates) {
        request.setApplicationStates(appStates);
    }
    if (queueQuery != null && !queueQuery.isEmpty()) {
        ResourceScheduler rs = rm.getResourceScheduler();
        if (rs instanceof CapacityScheduler) {
            CapacityScheduler cs = (CapacityScheduler) rs;
            // validate queue exists
            try {
                cs.getQueueInfo(queueQuery, false, false);
            } catch (IOException e) {
                throw new BadRequestException(e.getMessage());
            }
        }
        Set<String> queues = new HashSet<String>(1);
        queues.add(queueQuery);
        request.setQueues(queues);
    }
    if (userQuery != null && !userQuery.isEmpty()) {
        Set<String> users = new HashSet<String>(1);
        users.add(userQuery);
        request.setUsers(users);
    }
    List<ApplicationReport> appReports = null;
    try {
        appReports = rm.getClientRMService().getApplications(request, false).getApplicationList();
    } catch (YarnException e) {
        LOG.error("Unable to retrieve apps from ClientRMService", e);
        throw new YarnRuntimeException("Unable to retrieve apps from ClientRMService", e);
    }
    final ConcurrentMap<ApplicationId, RMApp> apps = rm.getRMContext().getRMApps();
    AppsInfo allApps = new AppsInfo();
    for (ApplicationReport report : appReports) {
        RMApp rmapp = apps.get(report.getApplicationId());
        if (rmapp == null) {
            continue;
        }
        if (finalStatusQuery != null && !finalStatusQuery.isEmpty()) {
            FinalApplicationStatus.valueOf(finalStatusQuery);
            if (!rmapp.getFinalApplicationStatus().toString().equalsIgnoreCase(finalStatusQuery)) {
                continue;
            }
        }
        AppInfo app = new AppInfo(rm, rmapp, hasAccess(rmapp, hsr), WebAppUtils.getHttpSchemePrefix(conf));
        allApps.add(app);
    }
    return allApps;
}
Also used : RMApp(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp) IOException(java.io.IOException) GetApplicationsRequest(org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest) YarnException(org.apache.hadoop.yarn.exceptions.YarnException) AppInfo(org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo) ApplicationReport(org.apache.hadoop.yarn.api.records.ApplicationReport) YarnRuntimeException(org.apache.hadoop.yarn.exceptions.YarnRuntimeException) BadRequestException(org.apache.hadoop.yarn.webapp.BadRequestException) ResourceScheduler(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) AppsInfo(org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppsInfo) CapacityScheduler(org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler) HashSet(java.util.HashSet) Path(javax.ws.rs.Path) Produces(javax.ws.rs.Produces) GET(javax.ws.rs.GET)

Aggregations

ResourceScheduler (org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler)51 Test (org.junit.Test)22 RMApp (org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp)12 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)11 Resource (org.apache.hadoop.yarn.api.records.Resource)10 Configuration (org.apache.hadoop.conf.Configuration)8 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)8 RMNode (org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode)8 CapacityScheduler (org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler)8 Container (org.apache.hadoop.yarn.api.records.Container)7 RMContainer (org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer)7 AllocateResponse (org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse)6 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)6 Priority (org.apache.hadoop.yarn.api.records.Priority)6 Application (org.apache.hadoop.yarn.server.resourcemanager.Application)6 ResourceManager (org.apache.hadoop.yarn.server.resourcemanager.ResourceManager)6 IOException (java.io.IOException)5 ArrayList (java.util.ArrayList)5 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)5 YarnRuntimeException (org.apache.hadoop.yarn.exceptions.YarnRuntimeException)5