Search in sources :

Example 56 with Priority

use of org.apache.hadoop.yarn.api.records.Priority in project hadoop by apache.

the class Client method run.

/**
   * Main run function for the client
   * @return true if application completed successfully
   * @throws IOException
   * @throws YarnException
   */
public boolean run() throws IOException, YarnException {
    LOG.info("Running Client");
    yarnClient.start();
    YarnClusterMetrics clusterMetrics = yarnClient.getYarnClusterMetrics();
    LOG.info("Got Cluster metric info from ASM" + ", numNodeManagers=" + clusterMetrics.getNumNodeManagers());
    List<NodeReport> clusterNodeReports = yarnClient.getNodeReports(NodeState.RUNNING);
    LOG.info("Got Cluster node info from ASM");
    for (NodeReport node : clusterNodeReports) {
        LOG.info("Got node report from ASM for" + ", nodeId=" + node.getNodeId() + ", nodeAddress=" + node.getHttpAddress() + ", nodeRackName=" + node.getRackName() + ", nodeNumContainers=" + node.getNumContainers());
    }
    QueueInfo queueInfo = yarnClient.getQueueInfo(this.amQueue);
    LOG.info("Queue info" + ", queueName=" + queueInfo.getQueueName() + ", queueCurrentCapacity=" + queueInfo.getCurrentCapacity() + ", queueMaxCapacity=" + queueInfo.getMaximumCapacity() + ", queueApplicationCount=" + queueInfo.getApplications().size() + ", queueChildQueueCount=" + queueInfo.getChildQueues().size());
    List<QueueUserACLInfo> listAclInfo = yarnClient.getQueueAclsInfo();
    for (QueueUserACLInfo aclInfo : listAclInfo) {
        for (QueueACL userAcl : aclInfo.getUserAcls()) {
            LOG.info("User ACL Info for Queue" + ", queueName=" + aclInfo.getQueueName() + ", userAcl=" + userAcl.name());
        }
    }
    if (domainId != null && domainId.length() > 0 && toCreateDomain) {
        prepareTimelineDomain();
    }
    // Get a new application id
    YarnClientApplication app = yarnClient.createApplication();
    GetNewApplicationResponse appResponse = app.getNewApplicationResponse();
    // TODO get min/max resource capabilities from RM and change memory ask if needed
    // If we do not have min/max, we may not be able to correctly request 
    // the required resources from the RM for the app master
    // Memory ask has to be a multiple of min and less than max. 
    // Dump out information about cluster capability as seen by the resource manager
    long maxMem = appResponse.getMaximumResourceCapability().getMemorySize();
    LOG.info("Max mem capability of resources in this cluster " + maxMem);
    // A resource ask cannot exceed the max. 
    if (amMemory > maxMem) {
        LOG.info("AM memory specified above max threshold of cluster. Using max value." + ", specified=" + amMemory + ", max=" + maxMem);
        amMemory = maxMem;
    }
    int maxVCores = appResponse.getMaximumResourceCapability().getVirtualCores();
    LOG.info("Max virtual cores capability of resources in this cluster " + maxVCores);
    if (amVCores > maxVCores) {
        LOG.info("AM virtual cores specified above max threshold of cluster. " + "Using max value." + ", specified=" + amVCores + ", max=" + maxVCores);
        amVCores = maxVCores;
    }
    // set the application name
    ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
    ApplicationId appId = appContext.getApplicationId();
    appContext.setKeepContainersAcrossApplicationAttempts(keepContainers);
    appContext.setApplicationName(appName);
    if (attemptFailuresValidityInterval >= 0) {
        appContext.setAttemptFailuresValidityInterval(attemptFailuresValidityInterval);
    }
    Set<String> tags = new HashSet<String>();
    if (flowName != null) {
        tags.add(TimelineUtils.generateFlowNameTag(flowName));
    }
    if (flowVersion != null) {
        tags.add(TimelineUtils.generateFlowVersionTag(flowVersion));
    }
    if (flowRunId != 0) {
        tags.add(TimelineUtils.generateFlowRunIdTag(flowRunId));
    }
    appContext.setApplicationTags(tags);
    // set local resources for the application master
    // local files or archives as needed
    // In this scenario, the jar file for the application master is part of the local resources			
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
    LOG.info("Copy App Master jar from local filesystem and add to local environment");
    // Copy the application master jar to the filesystem 
    // Create a local resource to point to the destination jar path 
    FileSystem fs = FileSystem.get(conf);
    addToLocalResources(fs, appMasterJar, appMasterJarPath, appId.toString(), localResources, null);
    // Set the log4j properties if needed 
    if (!log4jPropFile.isEmpty()) {
        addToLocalResources(fs, log4jPropFile, log4jPath, appId.toString(), localResources, null);
    }
    // The shell script has to be made available on the final container(s)
    // where it will be executed. 
    // To do this, we need to first copy into the filesystem that is visible 
    // to the yarn framework. 
    // We do not need to set this as a local resource for the application 
    // master as the application master does not need it. 		
    String hdfsShellScriptLocation = "";
    long hdfsShellScriptLen = 0;
    long hdfsShellScriptTimestamp = 0;
    if (!shellScriptPath.isEmpty()) {
        Path shellSrc = new Path(shellScriptPath);
        String shellPathSuffix = appName + "/" + appId.toString() + "/" + SCRIPT_PATH;
        Path shellDst = new Path(fs.getHomeDirectory(), shellPathSuffix);
        fs.copyFromLocalFile(false, true, shellSrc, shellDst);
        hdfsShellScriptLocation = shellDst.toUri().toString();
        FileStatus shellFileStatus = fs.getFileStatus(shellDst);
        hdfsShellScriptLen = shellFileStatus.getLen();
        hdfsShellScriptTimestamp = shellFileStatus.getModificationTime();
    }
    if (!shellCommand.isEmpty()) {
        addToLocalResources(fs, null, shellCommandPath, appId.toString(), localResources, shellCommand);
    }
    if (shellArgs.length > 0) {
        addToLocalResources(fs, null, shellArgsPath, appId.toString(), localResources, StringUtils.join(shellArgs, " "));
    }
    // Set the necessary security tokens as needed
    //amContainer.setContainerTokens(containerToken);
    // Set the env variables to be setup in the env where the application master will be run
    LOG.info("Set the environment for the application master");
    Map<String, String> env = new HashMap<String, String>();
    // put location of shell script into env
    // using the env info, the application master will create the correct local resource for the 
    // eventual containers that will be launched to execute the shell scripts
    env.put(DSConstants.DISTRIBUTEDSHELLSCRIPTLOCATION, hdfsShellScriptLocation);
    env.put(DSConstants.DISTRIBUTEDSHELLSCRIPTTIMESTAMP, Long.toString(hdfsShellScriptTimestamp));
    env.put(DSConstants.DISTRIBUTEDSHELLSCRIPTLEN, Long.toString(hdfsShellScriptLen));
    if (domainId != null && domainId.length() > 0) {
        env.put(DSConstants.DISTRIBUTEDSHELLTIMELINEDOMAIN, domainId);
    }
    // Add AppMaster.jar location to classpath 		
    // At some point we should not be required to add 
    // the hadoop specific classpaths to the env. 
    // It should be provided out of the box. 
    // For now setting all required classpaths including
    // the classpath to "." for the application jar
    StringBuilder classPathEnv = new StringBuilder(Environment.CLASSPATH.$$()).append(ApplicationConstants.CLASS_PATH_SEPARATOR).append("./*");
    for (String c : conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH, YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH)) {
        classPathEnv.append(ApplicationConstants.CLASS_PATH_SEPARATOR);
        classPathEnv.append(c.trim());
    }
    classPathEnv.append(ApplicationConstants.CLASS_PATH_SEPARATOR).append("./log4j.properties");
    // add the runtime classpath needed for tests to work
    if (conf.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) {
        classPathEnv.append(':');
        classPathEnv.append(System.getProperty("java.class.path"));
    }
    env.put("CLASSPATH", classPathEnv.toString());
    // Set the necessary command to execute the application master 
    Vector<CharSequence> vargs = new Vector<CharSequence>(30);
    // Set java executable command 
    LOG.info("Setting up app master command");
    vargs.add(Environment.JAVA_HOME.$$() + "/bin/java");
    // Set Xmx based on am memory size
    vargs.add("-Xmx" + amMemory + "m");
    // Set class name 
    vargs.add(appMasterMainClass);
    // Set params for Application Master
    vargs.add("--container_memory " + String.valueOf(containerMemory));
    vargs.add("--container_vcores " + String.valueOf(containerVirtualCores));
    vargs.add("--num_containers " + String.valueOf(numContainers));
    if (null != nodeLabelExpression) {
        appContext.setNodeLabelExpression(nodeLabelExpression);
    }
    vargs.add("--priority " + String.valueOf(shellCmdPriority));
    for (Map.Entry<String, String> entry : shellEnv.entrySet()) {
        vargs.add("--shell_env " + entry.getKey() + "=" + entry.getValue());
    }
    if (debugFlag) {
        vargs.add("--debug");
    }
    vargs.addAll(containerRetryOptions);
    vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout");
    vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stderr");
    // Get final commmand
    StringBuilder command = new StringBuilder();
    for (CharSequence str : vargs) {
        command.append(str).append(" ");
    }
    LOG.info("Completed setting up app master command " + command.toString());
    List<String> commands = new ArrayList<String>();
    commands.add(command.toString());
    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = ContainerLaunchContext.newInstance(localResources, env, commands, null, null, null);
    // Set up resource type requirements
    // For now, both memory and vcores are supported, so we set memory and 
    // vcores requirements
    Resource capability = Resource.newInstance(amMemory, amVCores);
    appContext.setResource(capability);
    // Setup security tokens
    if (UserGroupInformation.isSecurityEnabled()) {
        // Note: Credentials class is marked as LimitedPrivate for HDFS and MapReduce
        Credentials credentials = new Credentials();
        String tokenRenewer = YarnClientUtils.getRmPrincipal(conf);
        if (tokenRenewer == null || tokenRenewer.length() == 0) {
            throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer");
        }
        // For now, only getting tokens for the default file-system.
        final Token<?>[] tokens = fs.addDelegationTokens(tokenRenewer, credentials);
        if (tokens != null) {
            for (Token<?> token : tokens) {
                LOG.info("Got dt for " + fs.getUri() + "; " + token);
            }
        }
        DataOutputBuffer dob = new DataOutputBuffer();
        credentials.writeTokenStorageToStream(dob);
        ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
        amContainer.setTokens(fsTokens);
    }
    appContext.setAMContainerSpec(amContainer);
    // Set the priority for the application master
    // TODO - what is the range for priority? how to decide? 
    Priority pri = Priority.newInstance(amPriority);
    appContext.setPriority(pri);
    // Set the queue to which this application is to be submitted in the RM
    appContext.setQueue(amQueue);
    // Submit the application to the applications manager
    // SubmitApplicationResponse submitResp = applicationsManager.submitApplication(appRequest);
    // Ignore the response as either a valid response object is returned on success 
    // or an exception thrown to denote some form of a failure
    LOG.info("Submitting application to ASM");
    yarnClient.submitApplication(appContext);
    // Monitor the application
    return monitorApplication(appId);
}
Also used : QueueInfo(org.apache.hadoop.yarn.api.records.QueueInfo) GetNewApplicationResponse(org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse) FileStatus(org.apache.hadoop.fs.FileStatus) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) Token(org.apache.hadoop.security.token.Token) FileSystem(org.apache.hadoop.fs.FileSystem) ApplicationSubmissionContext(org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext) DataOutputBuffer(org.apache.hadoop.io.DataOutputBuffer) Vector(java.util.Vector) HashSet(java.util.HashSet) Path(org.apache.hadoop.fs.Path) YarnClusterMetrics(org.apache.hadoop.yarn.api.records.YarnClusterMetrics) YarnClientApplication(org.apache.hadoop.yarn.client.api.YarnClientApplication) Priority(org.apache.hadoop.yarn.api.records.Priority) QueueACL(org.apache.hadoop.yarn.api.records.QueueACL) Resource(org.apache.hadoop.yarn.api.records.Resource) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) QueueUserACLInfo(org.apache.hadoop.yarn.api.records.QueueUserACLInfo) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) IOException(java.io.IOException) ByteBuffer(java.nio.ByteBuffer) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) Map(java.util.Map) HashMap(java.util.HashMap) NodeReport(org.apache.hadoop.yarn.api.records.NodeReport) Credentials(org.apache.hadoop.security.Credentials)

Example 57 with Priority

use of org.apache.hadoop.yarn.api.records.Priority in project hadoop by apache.

the class ApplicationMaster method setupContainerAskForRM.

/**
   * Setup the request that will be sent to the RM for the container ask.
   *
   * @return the setup ResourceRequest to be sent to RM
   */
private ContainerRequest setupContainerAskForRM() {
    // setup requirements for hosts
    // using * as any host will do for the distributed shell app
    // set the priority for the request
    // TODO - what is the range for priority? how to decide?
    Priority pri = Priority.newInstance(requestPriority);
    // Set up resource type requirements
    // For now, memory and CPU are supported so we set memory and cpu requirements
    Resource capability = Resource.newInstance(containerMemory, containerVirtualCores);
    ContainerRequest request = new ContainerRequest(capability, null, null, pri);
    LOG.info("Requested container ask: " + request.toString());
    return request;
}
Also used : Priority(org.apache.hadoop.yarn.api.records.Priority) Resource(org.apache.hadoop.yarn.api.records.Resource) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) StartContainerRequest(org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest) ContainerRequest(org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest)

Example 58 with Priority

use of org.apache.hadoop.yarn.api.records.Priority in project hadoop by apache.

the class BuilderUtils method newPriority.

public static Priority newPriority(int p) {
    Priority priority = recordFactory.newRecordInstance(Priority.class);
    priority.setPriority(p);
    return priority;
}
Also used : Priority(org.apache.hadoop.yarn.api.records.Priority)

Example 59 with Priority

use of org.apache.hadoop.yarn.api.records.Priority in project hadoop by apache.

the class TestResourceManager method testResourceAllocation.

@Test
public void testResourceAllocation() throws IOException, YarnException, InterruptedException {
    LOG.info("--- START: testResourceAllocation ---");
    final int memory = 4 * 1024;
    final int vcores = 4;
    // Register node1
    String host1 = "host1";
    org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm1 = registerNode(host1, 1234, 2345, NetworkTopology.DEFAULT_RACK, Resources.createResource(memory, vcores));
    // Register node2
    String host2 = "host2";
    org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm2 = registerNode(host2, 1234, 2345, NetworkTopology.DEFAULT_RACK, Resources.createResource(memory / 2, vcores / 2));
    // Submit an application
    Application application = new Application("user1", resourceManager);
    application.submit();
    application.addNodeManager(host1, 1234, nm1);
    application.addNodeManager(host2, 1234, nm2);
    // Application resource requirements
    final int memory1 = 1024;
    Resource capability1 = Resources.createResource(memory1, 1);
    Priority priority1 = Priority.newInstance(1);
    application.addResourceRequestSpec(priority1, capability1);
    Task t1 = new Task(application, priority1, new String[] { host1, host2 });
    application.addTask(t1);
    final int memory2 = 2048;
    Resource capability2 = Resources.createResource(memory2, 1);
    // higher
    Priority priority0 = Priority.newInstance(0);
    application.addResourceRequestSpec(priority0, capability2);
    // Send resource requests to the scheduler
    application.schedule();
    // Send a heartbeat to kick the tires on the Scheduler
    nodeUpdate(nm1);
    // Get allocations from the scheduler
    application.schedule();
    checkResourceUsage(nm1, nm2);
    LOG.info("Adding new tasks...");
    Task t2 = new Task(application, priority1, new String[] { host1, host2 });
    application.addTask(t2);
    Task t3 = new Task(application, priority0, new String[] { ResourceRequest.ANY });
    application.addTask(t3);
    // Send resource requests to the scheduler
    application.schedule();
    checkResourceUsage(nm1, nm2);
    // Send heartbeats to kick the tires on the Scheduler
    nodeUpdate(nm2);
    nodeUpdate(nm2);
    nodeUpdate(nm1);
    nodeUpdate(nm1);
    // Get allocations from the scheduler
    LOG.info("Trying to allocate...");
    application.schedule();
    checkResourceUsage(nm1, nm2);
    // Complete tasks
    LOG.info("Finishing up tasks...");
    application.finishTask(t1);
    application.finishTask(t2);
    application.finishTask(t3);
    // Notify scheduler application is finished.
    AppAttemptRemovedSchedulerEvent appRemovedEvent1 = new AppAttemptRemovedSchedulerEvent(application.getApplicationAttemptId(), RMAppAttemptState.FINISHED, false);
    resourceManager.getResourceScheduler().handle(appRemovedEvent1);
    checkResourceUsage(nm1, nm2);
    LOG.info("--- END: testResourceAllocation ---");
}
Also used : Priority(org.apache.hadoop.yarn.api.records.Priority) Resource(org.apache.hadoop.yarn.api.records.Resource) AppAttemptRemovedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptRemovedSchedulerEvent) Test(org.junit.Test)

Example 60 with Priority

use of org.apache.hadoop.yarn.api.records.Priority in project hadoop by apache.

the class TestReservations method testAssignToQueue.

@Test
public void testAssignToQueue() throws Exception {
    CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration();
    setup(csConf);
    // Manipulate queue 'a'
    LeafQueue a = stubLeafQueue((LeafQueue) queues.get(A));
    // Users
    final String user_0 = "user_0";
    // Submit applications
    final ApplicationAttemptId appAttemptId_0 = TestUtils.getMockApplicationAttemptId(0, 0);
    FiCaSchedulerApp app_0 = new FiCaSchedulerApp(appAttemptId_0, user_0, a, mock(ActiveUsersManager.class), spyRMContext);
    app_0 = spy(app_0);
    Mockito.doNothing().when(app_0).updateAMContainerDiagnostics(any(AMState.class), any(String.class));
    rmContext.getRMApps().put(app_0.getApplicationId(), mock(RMApp.class));
    a.submitApplicationAttempt(app_0, user_0);
    final ApplicationAttemptId appAttemptId_1 = TestUtils.getMockApplicationAttemptId(1, 0);
    FiCaSchedulerApp app_1 = new FiCaSchedulerApp(appAttemptId_1, user_0, a, mock(ActiveUsersManager.class), spyRMContext);
    app_1 = spy(app_1);
    Mockito.doNothing().when(app_1).updateAMContainerDiagnostics(any(AMState.class), any(String.class));
    a.submitApplicationAttempt(app_1, user_0);
    // Setup some nodes
    String host_0 = "host_0";
    FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 8 * GB);
    String host_1 = "host_1";
    FiCaSchedulerNode node_1 = TestUtils.getMockNode(host_1, DEFAULT_RACK, 0, 8 * GB);
    String host_2 = "host_2";
    FiCaSchedulerNode node_2 = TestUtils.getMockNode(host_2, DEFAULT_RACK, 0, 8 * GB);
    when(csContext.getNode(node_0.getNodeID())).thenReturn(node_0);
    when(csContext.getNode(node_1.getNodeID())).thenReturn(node_1);
    when(csContext.getNode(node_2.getNodeID())).thenReturn(node_2);
    Map<ApplicationAttemptId, FiCaSchedulerApp> apps = ImmutableMap.of(app_0.getApplicationAttemptId(), app_0, app_1.getApplicationAttemptId(), app_1);
    Map<NodeId, FiCaSchedulerNode> nodes = ImmutableMap.of(node_0.getNodeID(), node_0, node_1.getNodeID(), node_1, node_2.getNodeID(), node_2);
    final int numNodes = 2;
    Resource clusterResource = Resources.createResource(numNodes * (8 * GB));
    when(csContext.getNumClusterNodes()).thenReturn(numNodes);
    // Setup resource-requests
    Priority priorityAM = TestUtils.createMockPriority(1);
    Priority priorityMap = TestUtils.createMockPriority(5);
    Priority priorityReduce = TestUtils.createMockPriority(10);
    app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 2 * GB, 1, true, priorityAM, recordFactory)));
    app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 5 * GB, 2, true, priorityReduce, recordFactory)));
    app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 3 * GB, 2, true, priorityMap, recordFactory)));
    // Start testing...
    // Only AM
    TestUtils.applyResourceCommitRequest(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), nodes, apps);
    assertEquals(2 * GB, a.getUsedResources().getMemorySize());
    assertEquals(2 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, a.getMetrics().getReservedMB());
    assertEquals(2 * GB, a.getMetrics().getAllocatedMB());
    assertEquals(14 * GB, a.getMetrics().getAvailableMB());
    assertEquals(2 * GB, node_0.getAllocatedResource().getMemorySize());
    assertEquals(0 * GB, node_1.getAllocatedResource().getMemorySize());
    // Only 1 map - simulating reduce
    TestUtils.applyResourceCommitRequest(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), nodes, apps);
    assertEquals(5 * GB, a.getUsedResources().getMemorySize());
    assertEquals(5 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, a.getMetrics().getReservedMB());
    assertEquals(5 * GB, a.getMetrics().getAllocatedMB());
    assertEquals(11 * GB, a.getMetrics().getAvailableMB());
    assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize());
    assertEquals(0 * GB, node_1.getAllocatedResource().getMemorySize());
    // Only 1 map to other node - simulating reduce
    TestUtils.applyResourceCommitRequest(clusterResource, a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), nodes, apps);
    assertEquals(8 * GB, a.getUsedResources().getMemorySize());
    assertEquals(8 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, a.getMetrics().getReservedMB());
    assertEquals(8 * GB, a.getMetrics().getAllocatedMB());
    assertEquals(8 * GB, a.getMetrics().getAvailableMB());
    assertEquals(null, node_0.getReservedContainer());
    assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize());
    assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize());
    // now add in reservations and make sure it continues if config set
    // allocate to queue so that the potential new capacity is greater then
    // absoluteMaxCapacity
    TestUtils.applyResourceCommitRequest(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), nodes, apps);
    assertEquals(13 * GB, a.getUsedResources().getMemorySize());
    assertEquals(8 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(5 * GB, a.getMetrics().getReservedMB());
    assertEquals(8 * GB, a.getMetrics().getAllocatedMB());
    assertEquals(3 * GB, a.getMetrics().getAvailableMB());
    assertEquals(3 * GB, app_0.getHeadroom().getMemorySize());
    assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize());
    assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize());
    ResourceLimits limits = new ResourceLimits(Resources.createResource(13 * GB));
    boolean res = a.canAssignToThisQueue(Resources.createResource(13 * GB), RMNodeLabelsManager.NO_LABEL, limits, Resources.createResource(3 * GB), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    assertTrue(res);
    // 16GB total, 13GB consumed (8 allocated, 5 reserved). asking for 5GB so we would have to
    // unreserve 2GB to get the total 5GB needed.
    // also note vcore checks not enabled
    assertEquals(0, limits.getHeadroom().getMemorySize());
    refreshQueuesTurnOffReservationsContLook(a, csConf);
    // should return false since reservations continue look is off.
    limits = new ResourceLimits(Resources.createResource(13 * GB));
    res = a.canAssignToThisQueue(Resources.createResource(13 * GB), RMNodeLabelsManager.NO_LABEL, limits, Resources.createResource(3 * GB), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    assertFalse(res);
}
Also used : RMApp(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp) FiCaSchedulerNode(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode) Priority(org.apache.hadoop.yarn.api.records.Priority) AMState(org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.AMState) Resource(org.apache.hadoop.yarn.api.records.Resource) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp) ResourceLimits(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits) NodeId(org.apache.hadoop.yarn.api.records.NodeId) ActiveUsersManager(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager) Test(org.junit.Test)

Aggregations

Priority (org.apache.hadoop.yarn.api.records.Priority)216 Test (org.junit.Test)139 Resource (org.apache.hadoop.yarn.api.records.Resource)109 Container (org.apache.hadoop.yarn.api.records.Container)64 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)62 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)49 NodeId (org.apache.hadoop.yarn.api.records.NodeId)43 FiCaSchedulerApp (org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp)40 TezTaskAttemptID (org.apache.tez.dag.records.TezTaskAttemptID)40 Configuration (org.apache.hadoop.conf.Configuration)37 FiCaSchedulerNode (org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode)34 ArrayList (java.util.ArrayList)33 ResourceLimits (org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits)31 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)30 TezConfiguration (org.apache.tez.dag.api.TezConfiguration)29 TaskSchedulerContextDrainable (org.apache.tez.dag.app.rm.TestTaskSchedulerHelpers.TaskSchedulerContextDrainable)27 ResourceRequest (org.apache.hadoop.yarn.api.records.ResourceRequest)26 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)25 RMApp (org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp)24 LocalResource (org.apache.hadoop.yarn.api.records.LocalResource)23