Search in sources :

Example 76 with FiCaSchedulerApp

use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp in project hadoop by apache.

the class TestApplicationLimits method testAMResourceLimit.

@Test
public void testAMResourceLimit() throws Exception {
    final String user_0 = "user_0";
    final String user_1 = "user_1";
    // This uses the default 10% of cluster value for the max am resources
    // which are allowed, at 80GB = 8GB for AM's at the queue level.  The user
    // am limit is 4G initially (based on the queue absolute capacity)
    // when there is only 1 user, and drops to 2G (the userlimit) when there
    // is a second user
    Resource clusterResource = Resource.newInstance(80 * GB, 40);
    queue.updateClusterResource(clusterResource, new ResourceLimits(clusterResource));
    ActiveUsersManager activeUsersManager = mock(ActiveUsersManager.class);
    when(queue.getAbstractUsersManager()).thenReturn(activeUsersManager);
    assertEquals(Resource.newInstance(8 * GB, 1), queue.calculateAndGetAMResourceLimit());
    assertEquals(Resource.newInstance(4 * GB, 1), queue.getUserAMResourceLimit());
    // Two apps for user_0, both start
    int APPLICATION_ID = 0;
    FiCaSchedulerApp app_0 = getMockApplication(APPLICATION_ID++, user_0, Resource.newInstance(2 * GB, 1));
    queue.submitApplicationAttempt(app_0, user_0);
    assertEquals(1, queue.getNumActiveApplications());
    assertEquals(0, queue.getNumPendingApplications());
    assertEquals(1, queue.getNumActiveApplications(user_0));
    assertEquals(0, queue.getNumPendingApplications(user_0));
    when(activeUsersManager.getNumActiveUsers()).thenReturn(1);
    FiCaSchedulerApp app_1 = getMockApplication(APPLICATION_ID++, user_0, Resource.newInstance(2 * GB, 1));
    queue.submitApplicationAttempt(app_1, user_0);
    assertEquals(2, queue.getNumActiveApplications());
    assertEquals(0, queue.getNumPendingApplications());
    assertEquals(2, queue.getNumActiveApplications(user_0));
    assertEquals(0, queue.getNumPendingApplications(user_0));
    // AMLimits unchanged
    assertEquals(Resource.newInstance(8 * GB, 1), queue.getAMResourceLimit());
    assertEquals(Resource.newInstance(4 * GB, 1), queue.getUserAMResourceLimit());
    // One app for user_1, starts
    FiCaSchedulerApp app_2 = getMockApplication(APPLICATION_ID++, user_1, Resource.newInstance(2 * GB, 1));
    queue.submitApplicationAttempt(app_2, user_1);
    assertEquals(3, queue.getNumActiveApplications());
    assertEquals(0, queue.getNumPendingApplications());
    assertEquals(1, queue.getNumActiveApplications(user_1));
    assertEquals(0, queue.getNumPendingApplications(user_1));
    when(activeUsersManager.getNumActiveUsers()).thenReturn(2);
    // Now userAMResourceLimit drops to the queue configured 50% as there is
    // another user active
    assertEquals(Resource.newInstance(8 * GB, 1), queue.getAMResourceLimit());
    assertEquals(Resource.newInstance(2 * GB, 1), queue.getUserAMResourceLimit());
    // Second user_1 app cannot start
    FiCaSchedulerApp app_3 = getMockApplication(APPLICATION_ID++, user_1, Resource.newInstance(2 * GB, 1));
    queue.submitApplicationAttempt(app_3, user_1);
    assertEquals(3, queue.getNumActiveApplications());
    assertEquals(1, queue.getNumPendingApplications());
    assertEquals(1, queue.getNumActiveApplications(user_1));
    assertEquals(1, queue.getNumPendingApplications(user_1));
    // Now finish app so another should be activated
    queue.finishApplicationAttempt(app_2, A);
    assertEquals(3, queue.getNumActiveApplications());
    assertEquals(0, queue.getNumPendingApplications());
    assertEquals(1, queue.getNumActiveApplications(user_1));
    assertEquals(0, queue.getNumPendingApplications(user_1));
}
Also used : ResourceLimits(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits) FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp) Resource(org.apache.hadoop.yarn.api.records.Resource) ActiveUsersManager(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager) Test(org.junit.Test)

Example 77 with FiCaSchedulerApp

use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp in project hadoop by apache.

the class TestApplicationLimitsByPartition method testHeadroom.

@Test
public void testHeadroom() throws Exception {
    /*
     * Test Case: Verify Headroom calculated is sum of headrooms for each
     * partition requested. So submit a app with requests for default partition
     * and 'x' partition, so the total headroom for the user should be sum of
     * the head room for both labels.
     */
    simpleNodeLabelMappingToManager();
    CapacitySchedulerConfiguration csConf = (CapacitySchedulerConfiguration) TestUtils.getComplexConfigurationWithQueueLabels(conf);
    final String A1 = CapacitySchedulerConfiguration.ROOT + ".a" + ".a1";
    final String B2 = CapacitySchedulerConfiguration.ROOT + ".b" + ".b2";
    csConf.setUserLimit(A1, 25);
    csConf.setUserLimit(B2, 25);
    YarnConfiguration conf = new YarnConfiguration();
    CapacitySchedulerContext csContext = mock(CapacitySchedulerContext.class);
    when(csContext.getConfiguration()).thenReturn(csConf);
    when(csContext.getConf()).thenReturn(conf);
    when(csContext.getMinimumResourceCapability()).thenReturn(Resources.createResource(GB));
    when(csContext.getMaximumResourceCapability()).thenReturn(Resources.createResource(16 * GB));
    when(csContext.getResourceCalculator()).thenReturn(resourceCalculator);
    RMContext rmContext = TestUtils.getMockRMContext();
    RMContext spyRMContext = spy(rmContext);
    when(spyRMContext.getNodeLabelManager()).thenReturn(mgr);
    when(csContext.getRMContext()).thenReturn(spyRMContext);
    mgr.activateNode(NodeId.newInstance("h0", 0), // default Label
    Resource.newInstance(160 * GB, 16));
    mgr.activateNode(NodeId.newInstance("h1", 0), // label x
    Resource.newInstance(160 * GB, 16));
    mgr.activateNode(NodeId.newInstance("h2", 0), // label y
    Resource.newInstance(160 * GB, 16));
    // Say cluster has 100 nodes of 16G each
    Resource clusterResource = Resources.createResource(160 * GB);
    when(csContext.getClusterResource()).thenReturn(clusterResource);
    Map<String, CSQueue> queues = new HashMap<String, CSQueue>();
    CSQueue rootQueue = CapacitySchedulerQueueManager.parseQueue(csContext, csConf, null, "root", queues, queues, TestUtils.spyHook);
    ResourceUsage queueResUsage = rootQueue.getQueueResourceUsage();
    when(csContext.getClusterResourceUsage()).thenReturn(queueResUsage);
    // Manipulate queue 'a'
    LeafQueue queue = TestLeafQueue.stubLeafQueue((LeafQueue) queues.get("b2"));
    queue.updateClusterResource(clusterResource, new ResourceLimits(clusterResource));
    String rack_0 = "rack_0";
    FiCaSchedulerNode node_0 = TestUtils.getMockNode("h0", rack_0, 0, 160 * GB);
    FiCaSchedulerNode node_1 = TestUtils.getMockNode("h1", rack_0, 0, 160 * GB);
    final String user_0 = "user_0";
    final String user_1 = "user_1";
    RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
    ConcurrentMap<ApplicationId, RMApp> spyApps = spy(new ConcurrentHashMap<ApplicationId, RMApp>());
    RMApp rmApp = mock(RMApp.class);
    ResourceRequest amResourceRequest = mock(ResourceRequest.class);
    Resource amResource = Resources.createResource(0, 0);
    when(amResourceRequest.getCapability()).thenReturn(amResource);
    when(rmApp.getAMResourceRequest()).thenReturn(amResourceRequest);
    Mockito.doReturn(rmApp).when(spyApps).get((ApplicationId) Matchers.any());
    when(spyRMContext.getRMApps()).thenReturn(spyApps);
    RMAppAttempt rmAppAttempt = mock(RMAppAttempt.class);
    when(rmApp.getRMAppAttempt((ApplicationAttemptId) Matchers.any())).thenReturn(rmAppAttempt);
    when(rmApp.getCurrentAppAttempt()).thenReturn(rmAppAttempt);
    Mockito.doReturn(rmApp).when(spyApps).get((ApplicationId) Matchers.any());
    Mockito.doReturn(true).when(spyApps).containsKey((ApplicationId) Matchers.any());
    Priority priority_1 = TestUtils.createMockPriority(1);
    // Submit first application with some resource-requests from user_0,
    // and check headroom
    final ApplicationAttemptId appAttemptId_0_0 = TestUtils.getMockApplicationAttemptId(0, 0);
    FiCaSchedulerApp app_0_0 = new FiCaSchedulerApp(appAttemptId_0_0, user_0, queue, queue.getAbstractUsersManager(), spyRMContext);
    queue.submitApplicationAttempt(app_0_0, user_0);
    List<ResourceRequest> app_0_0_requests = new ArrayList<ResourceRequest>();
    app_0_0_requests.add(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 2, true, priority_1, recordFactory));
    app_0_0.updateResourceRequests(app_0_0_requests);
    // Schedule to compute
    queue.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    //head room = queue capacity = 50 % 90% 160 GB * 0.25 (UL)
    Resource expectedHeadroom = Resources.createResource((int) (0.5 * 0.9 * 160 * 0.25) * GB, 1);
    assertEquals(expectedHeadroom, app_0_0.getHeadroom());
    // Submit second application from user_0, check headroom
    final ApplicationAttemptId appAttemptId_0_1 = TestUtils.getMockApplicationAttemptId(1, 0);
    FiCaSchedulerApp app_0_1 = new FiCaSchedulerApp(appAttemptId_0_1, user_0, queue, queue.getAbstractUsersManager(), spyRMContext);
    queue.submitApplicationAttempt(app_0_1, user_0);
    List<ResourceRequest> app_0_1_requests = new ArrayList<ResourceRequest>();
    app_0_1_requests.add(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 2, true, priority_1, recordFactory));
    app_0_1.updateResourceRequests(app_0_1_requests);
    app_0_1_requests.clear();
    app_0_1_requests.add(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 2, true, priority_1, recordFactory, "y"));
    app_0_1.updateResourceRequests(app_0_1_requests);
    // Schedule to compute
    queue.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), // Schedule to compute
    SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    queue.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), // Schedule to compute
    SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    // no change
    assertEquals(expectedHeadroom, app_0_0.getHeadroom());
    //head room for default label + head room for y partition
    //head room for y partition = 100% 50%(b queue capacity ) *  160 * GB
    Resource expectedHeadroomWithReqInY = Resources.add(Resources.createResource((int) (0.25 * 0.5 * 160) * GB, 1), expectedHeadroom);
    assertEquals(expectedHeadroomWithReqInY, app_0_1.getHeadroom());
    // Submit first application from user_1, check for new headroom
    final ApplicationAttemptId appAttemptId_1_0 = TestUtils.getMockApplicationAttemptId(2, 0);
    FiCaSchedulerApp app_1_0 = new FiCaSchedulerApp(appAttemptId_1_0, user_1, queue, queue.getAbstractUsersManager(), spyRMContext);
    queue.submitApplicationAttempt(app_1_0, user_1);
    List<ResourceRequest> app_1_0_requests = new ArrayList<ResourceRequest>();
    app_1_0_requests.add(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 2, true, priority_1, recordFactory));
    app_1_0.updateResourceRequests(app_1_0_requests);
    app_1_0_requests.clear();
    app_1_0_requests.add(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 2, true, priority_1, recordFactory, "y"));
    app_1_0.updateResourceRequests(app_1_0_requests);
    // Schedule to compute
    queue.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), // Schedule to compute
    SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    //head room = queue capacity = (50 % 90% 160 GB)/2 (for 2 users)
    expectedHeadroom = Resources.createResource((int) (0.5 * 0.9 * 160 * 0.25) * GB, 1);
    //head room for default label + head room for y partition
    //head room for y partition = 100% 50%(b queue capacity ) *  160 * GB
    expectedHeadroomWithReqInY = Resources.add(Resources.createResource((int) (0.25 * 0.5 * 160) * GB, 1), expectedHeadroom);
    assertEquals(expectedHeadroom, app_0_0.getHeadroom());
    assertEquals(expectedHeadroomWithReqInY, app_0_1.getHeadroom());
    assertEquals(expectedHeadroomWithReqInY, app_1_0.getHeadroom());
}
Also used : RMApp(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp) RMAppAttempt(org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt) HashMap(java.util.HashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ArrayList(java.util.ArrayList) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp) RMContext(org.apache.hadoop.yarn.server.resourcemanager.RMContext) FiCaSchedulerNode(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode) ResourceUsage(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage) Priority(org.apache.hadoop.yarn.api.records.Priority) Resource(org.apache.hadoop.yarn.api.records.Resource) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) RecordFactory(org.apache.hadoop.yarn.factories.RecordFactory) ResourceLimits(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits) ResourceRequest(org.apache.hadoop.yarn.api.records.ResourceRequest) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) Test(org.junit.Test)

Example 78 with FiCaSchedulerApp

use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp in project hadoop by apache.

the class TestApplicationPriority method testUpdateInvalidPriorityAtRuntime.

@Test
public void testUpdateInvalidPriorityAtRuntime() throws Exception {
    Configuration conf = new Configuration();
    conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, ResourceScheduler.class);
    // Set Max Application Priority as 10
    conf.setInt(YarnConfiguration.MAX_CLUSTER_LEVEL_APPLICATION_PRIORITY, 10);
    MockRM rm = new MockRM(conf);
    rm.start();
    Priority appPriority1 = Priority.newInstance(5);
    MockNM nm1 = rm.registerNode("127.0.0.1:1234", 16 * GB);
    RMApp app1 = rm.submitApp(1 * GB, appPriority1);
    // kick the scheduler, 1 GB given to AM1, remaining 15GB on nm1
    MockAM am1 = MockRM.launchAM(app1, rm, nm1);
    am1.registerAppAttempt();
    // get scheduler
    CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
    // Change the priority of App1 to 15
    Priority appPriority2 = Priority.newInstance(15);
    UserGroupInformation ugi = UserGroupInformation.createRemoteUser(app1.getUser());
    cs.updateApplicationPriority(appPriority2, app1.getApplicationId(), null, ugi);
    // get scheduler app
    FiCaSchedulerApp schedulerAppAttempt = cs.getSchedulerApplications().get(app1.getApplicationId()).getCurrentAppAttempt();
    // Verify whether priority 15 is reset to 10
    Priority appPriority3 = Priority.newInstance(10);
    Assert.assertEquals(appPriority3, schedulerAppAttempt.getPriority());
    rm.stop();
}
Also used : RMApp(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) Configuration(org.apache.hadoop.conf.Configuration) Priority(org.apache.hadoop.yarn.api.records.Priority) MockNM(org.apache.hadoop.yarn.server.resourcemanager.MockNM) FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp) MockAM(org.apache.hadoop.yarn.server.resourcemanager.MockAM) MockRM(org.apache.hadoop.yarn.server.resourcemanager.MockRM) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Test(org.junit.Test)

Example 79 with FiCaSchedulerApp

use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp in project hadoop by apache.

the class TestApplicationPriority method testOrderOfActivatingThePriorityApplicationOnRMRestart.

/**
   * <p>
   * Test case verifies the order of applications activated after RM Restart.
   * </p>
   * <li>App-1 and app-2 submitted and scheduled and running with a priority
   * 5 and 6 Respectively</li>
   * <li>App-3 submitted and scheduled with a priority 7. This
   * is not activated since AMResourceLimit is reached</li>
   * <li>RM restarted</li>
   * <li>App-1 get activated nevertheless of AMResourceLimit</li>
   * <li>App-2 and app-3 put in pendingOrderingPolicy</li>
   * <li>After NM registration, app-3 is activated</li>
   * <p>
   * Expected Output : App-2 must get activated since app-2 was running earlier
   * </p>
   * @throws Exception
   */
@Test
public void testOrderOfActivatingThePriorityApplicationOnRMRestart() throws Exception {
    conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
    conf.setBoolean(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, true);
    conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
    conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
    conf.setInt(YarnConfiguration.MAX_CLUSTER_LEVEL_APPLICATION_PRIORITY, 10);
    final DrainDispatcher dispatcher = new DrainDispatcher();
    MemoryRMStateStore memStore = new MemoryRMStateStore();
    memStore.init(conf);
    MockRM rm1 = new MockRM(conf, memStore) {

        @Override
        protected Dispatcher createDispatcher() {
            return dispatcher;
        }
    };
    rm1.start();
    MockNM nm1 = new MockNM("127.0.0.1:1234", 16384, rm1.getResourceTrackerService());
    nm1.registerNode();
    dispatcher.await();
    ResourceScheduler scheduler = rm1.getRMContext().getScheduler();
    LeafQueue defaultQueue = (LeafQueue) ((CapacityScheduler) scheduler).getQueue("default");
    int memory = (int) (defaultQueue.getAMResourceLimit().getMemorySize() / 2);
    // App-1 with priority 5 submitted and running
    Priority appPriority1 = Priority.newInstance(5);
    RMApp app1 = rm1.submitApp(memory, appPriority1);
    MockAM am1 = MockRM.launchAM(app1, rm1, nm1);
    am1.registerAppAttempt();
    // App-2 with priority 6 submitted and running
    Priority appPriority2 = Priority.newInstance(6);
    RMApp app2 = rm1.submitApp(memory, appPriority2);
    MockAM am2 = MockRM.launchAM(app2, rm1, nm1);
    am2.registerAppAttempt();
    dispatcher.await();
    Assert.assertEquals(2, defaultQueue.getNumActiveApplications());
    Assert.assertEquals(0, defaultQueue.getNumPendingApplications());
    // App-3 with priority 7 submitted and scheduled. But not activated since
    // AMResourceLimit threshold
    Priority appPriority3 = Priority.newInstance(7);
    RMApp app3 = rm1.submitApp(memory, appPriority3);
    dispatcher.await();
    Assert.assertEquals(2, defaultQueue.getNumActiveApplications());
    Assert.assertEquals(1, defaultQueue.getNumPendingApplications());
    Iterator<FiCaSchedulerApp> iterator = defaultQueue.getOrderingPolicy().getSchedulableEntities().iterator();
    FiCaSchedulerApp fcApp2 = iterator.next();
    Assert.assertEquals(app2.getCurrentAppAttempt().getAppAttemptId(), fcApp2.getApplicationAttemptId());
    FiCaSchedulerApp fcApp1 = iterator.next();
    Assert.assertEquals(app1.getCurrentAppAttempt().getAppAttemptId(), fcApp1.getApplicationAttemptId());
    iterator = defaultQueue.getPendingApplications().iterator();
    FiCaSchedulerApp fcApp3 = iterator.next();
    Assert.assertEquals(app3.getCurrentAppAttempt().getAppAttemptId(), fcApp3.getApplicationAttemptId());
    final DrainDispatcher dispatcher1 = new DrainDispatcher();
    // create new RM to represent restart and recover state
    MockRM rm2 = new MockRM(conf, memStore) {

        @Override
        protected Dispatcher createDispatcher() {
            return dispatcher1;
        }
    };
    // start new RM
    rm2.start();
    // change NM to point to new RM
    nm1.setResourceTrackerService(rm2.getResourceTrackerService());
    // Verify RM Apps after this restart
    Assert.assertEquals(3, rm2.getRMContext().getRMApps().size());
    dispatcher1.await();
    scheduler = rm2.getRMContext().getScheduler();
    defaultQueue = (LeafQueue) ((CapacityScheduler) scheduler).getQueue("default");
    // wait for all applications to get added to scheduler
    int count = 50;
    while (count-- > 0) {
        if (defaultQueue.getNumPendingApplications() == 3) {
            break;
        }
        Thread.sleep(50);
    }
    // Before NM registration, AMResourceLimit threshold is 0. So no
    // applications get activated.
    Assert.assertEquals(0, defaultQueue.getNumActiveApplications());
    Assert.assertEquals(3, defaultQueue.getNumPendingApplications());
    // NM resync to new RM
    nm1.registerNode();
    dispatcher1.await();
    // wait for activating applications
    count = 50;
    while (count-- > 0) {
        if (defaultQueue.getNumActiveApplications() == 2) {
            break;
        }
        Thread.sleep(50);
    }
    Assert.assertEquals(2, defaultQueue.getNumActiveApplications());
    Assert.assertEquals(1, defaultQueue.getNumPendingApplications());
    // verify for order of activated applications iterator
    iterator = defaultQueue.getOrderingPolicy().getSchedulableEntities().iterator();
    fcApp2 = iterator.next();
    Assert.assertEquals(app2.getCurrentAppAttempt().getAppAttemptId(), fcApp2.getApplicationAttemptId());
    fcApp1 = iterator.next();
    Assert.assertEquals(app1.getCurrentAppAttempt().getAppAttemptId(), fcApp1.getApplicationAttemptId());
    // verify for pending application iterator. It should be app-3 attempt
    iterator = defaultQueue.getPendingApplications().iterator();
    fcApp3 = iterator.next();
    Assert.assertEquals(app3.getCurrentAppAttempt().getAppAttemptId(), fcApp3.getApplicationAttemptId());
    rm2.stop();
    rm1.stop();
}
Also used : DrainDispatcher(org.apache.hadoop.yarn.event.DrainDispatcher) RMApp(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp) MockNM(org.apache.hadoop.yarn.server.resourcemanager.MockNM) Priority(org.apache.hadoop.yarn.api.records.Priority) MockRM(org.apache.hadoop.yarn.server.resourcemanager.MockRM) MemoryRMStateStore(org.apache.hadoop.yarn.server.resourcemanager.recovery.MemoryRMStateStore) FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp) MockAM(org.apache.hadoop.yarn.server.resourcemanager.MockAM) ResourceScheduler(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler) Test(org.junit.Test)

Example 80 with FiCaSchedulerApp

use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp in project hadoop by apache.

the class TestApplicationPriority method testUpdatePriorityOnPendingAppAndKillAttempt.

@Test(timeout = 120000)
public void testUpdatePriorityOnPendingAppAndKillAttempt() throws Exception {
    int maxPriority = 10;
    int appPriority = 5;
    YarnConfiguration conf = new YarnConfiguration();
    conf.setInt(YarnConfiguration.MAX_CLUSTER_LEVEL_APPLICATION_PRIORITY, maxPriority);
    MockRM rm = new MockRM(conf);
    rm.init(conf);
    rm.start();
    CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
    CSQueue defaultQueue = (LeafQueue) cs.getQueue("default");
    // Update priority and kill application with no resource
    RMApp app1 = rm.submitApp(1024, Priority.newInstance(appPriority));
    Collection<FiCaSchedulerApp> appsPending = ((LeafQueue) defaultQueue).getPendingApplications();
    Collection<FiCaSchedulerApp> activeApps = ((LeafQueue) defaultQueue).getOrderingPolicy().getSchedulableEntities();
    // Verify app is in pending state
    Assert.assertEquals("Pending apps should be 1", 1, appsPending.size());
    Assert.assertEquals("Active apps should be 0", 0, activeApps.size());
    // kill app1 which is pending
    killAppAndVerifyOrderingPolicy(rm, defaultQueue, 0, 0, app1);
    // Check ordering policy size when resource is added
    MockNM nm1 = new MockNM("127.0.0.1:1234", 8096, rm.getResourceTrackerService());
    nm1.registerNode();
    RMApp app2 = rm.submitApp(1024, Priority.newInstance(appPriority));
    Assert.assertEquals("Pending apps should be 0", 0, appsPending.size());
    Assert.assertEquals("Active apps should be 1", 1, activeApps.size());
    RMApp app3 = rm.submitApp(1024, Priority.newInstance(appPriority));
    RMApp app4 = rm.submitApp(1024, Priority.newInstance(appPriority));
    Assert.assertEquals("Pending apps should be 2", 2, appsPending.size());
    Assert.assertEquals("Active apps should be 1", 1, activeApps.size());
    // kill app3, pending apps should reduce to 1
    killAppAndVerifyOrderingPolicy(rm, defaultQueue, 1, 1, app3);
    // kill app2, running apps is killed and pending added to running
    killAppAndVerifyOrderingPolicy(rm, defaultQueue, 0, 1, app2);
    // kill app4, all apps are killed and both policy size should be zero
    killAppAndVerifyOrderingPolicy(rm, defaultQueue, 0, 0, app4);
    rm.stop();
}
Also used : RMApp(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) MockNM(org.apache.hadoop.yarn.server.resourcemanager.MockNM) FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp) MockRM(org.apache.hadoop.yarn.server.resourcemanager.MockRM) Test(org.junit.Test)

Aggregations

FiCaSchedulerApp (org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp)141 Test (org.junit.Test)97 RMApp (org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp)60 Resource (org.apache.hadoop.yarn.api.records.Resource)53 MockRM (org.apache.hadoop.yarn.server.resourcemanager.MockRM)51 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)49 MockNM (org.apache.hadoop.yarn.server.resourcemanager.MockNM)48 MockAM (org.apache.hadoop.yarn.server.resourcemanager.MockAM)47 FiCaSchedulerNode (org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode)47 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)40 Priority (org.apache.hadoop.yarn.api.records.Priority)40 RMContainer (org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer)35 ResourceLimits (org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits)34 NodeId (org.apache.hadoop.yarn.api.records.NodeId)31 NodeUpdateSchedulerEvent (org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent)31 RMNode (org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode)30 ActiveUsersManager (org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager)24 ArrayList (java.util.ArrayList)19 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)19 Container (org.apache.hadoop.yarn.api.records.Container)13