Search in sources :

Example 11 with ResourceLimits

use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits in project hadoop by apache.

the class TestLeafQueue method testComputeUserLimitAndSetHeadroom.

@SuppressWarnings({ "unchecked", "rawtypes" })
@Test
public void testComputeUserLimitAndSetHeadroom() throws IOException {
    LeafQueue qb = stubLeafQueue((LeafQueue) queues.get(B));
    qb.setMaxCapacity(1.0f);
    // Users
    final String user_0 = "user_0";
    final String user_1 = "user_1";
    //create nodes
    String host_0 = "127.0.0.1";
    FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 8 * GB);
    String host_1 = "127.0.0.2";
    FiCaSchedulerNode node_1 = TestUtils.getMockNode(host_1, DEFAULT_RACK, 0, 8 * GB);
    Map<NodeId, FiCaSchedulerNode> nodes = ImmutableMap.of(node_0.getNodeID(), node_0, node_1.getNodeID(), node_1);
    final int numNodes = 2;
    Resource clusterResource = Resources.createResource(numNodes * (8 * GB), 1);
    when(csContext.getNumClusterNodes()).thenReturn(numNodes);
    CapacitySchedulerQueueManager mockCapacitySchedulerQueueManager = mock(CapacitySchedulerQueueManager.class);
    QueueStateManager mockQueueStateManager = mock(QueueStateManager.class);
    when(mockCapacitySchedulerQueueManager.getQueueStateManager()).thenReturn(mockQueueStateManager);
    when(csContext.getCapacitySchedulerQueueManager()).thenReturn(mockCapacitySchedulerQueueManager);
    //our test plan contains three cases
    //1. single user dominate the queue, we test the headroom
    //2. two users, but user_0 is assigned 100% of the queue resource,
    //   submit user_1's application, check headroom correctness
    //3. two users, each is assigned 50% of the queue resource
    //   each user submit one application and check their headrooms
    //4. similarly to 3. but user_0 has no quote left and there are
    //   free resources left, check headroom
    //test case 1
    qb.setUserLimit(100);
    qb.setUserLimitFactor(1);
    final ApplicationAttemptId appAttemptId_0 = TestUtils.getMockApplicationAttemptId(0, 0);
    FiCaSchedulerApp app_0 = new FiCaSchedulerApp(appAttemptId_0, user_0, qb, qb.getAbstractUsersManager(), spyRMContext);
    Map<ApplicationAttemptId, FiCaSchedulerApp> apps = new HashMap<>();
    apps.put(app_0.getApplicationAttemptId(), app_0);
    qb.submitApplicationAttempt(app_0, user_0);
    Priority u0Priority = TestUtils.createMockPriority(1);
    SchedulerRequestKey u0SchedKey = toSchedulerKey(u0Priority);
    app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 4 * GB, 1, true, u0Priority, recordFactory)));
    assertEquals("There should only be 1 active user!", 1, qb.getAbstractUsersManager().getNumActiveUsers());
    //get headroom
    applyCSAssignment(clusterResource, qb.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), qb, nodes, apps);
    qb.computeUserLimitAndSetHeadroom(app_0, clusterResource, "", SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    //maxqueue 16G, userlimit 13G, - 4G used = 9G
    assertEquals(9 * GB, app_0.getHeadroom().getMemorySize());
    //test case 2
    final ApplicationAttemptId appAttemptId_2 = TestUtils.getMockApplicationAttemptId(2, 0);
    FiCaSchedulerApp app_2 = new FiCaSchedulerApp(appAttemptId_2, user_1, qb, qb.getAbstractUsersManager(), spyRMContext);
    apps.put(app_2.getApplicationAttemptId(), app_2);
    Priority u1Priority = TestUtils.createMockPriority(2);
    SchedulerRequestKey u1SchedKey = toSchedulerKey(u1Priority);
    app_2.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 4 * GB, 1, true, u1Priority, recordFactory)));
    qb.submitApplicationAttempt(app_2, user_1);
    applyCSAssignment(clusterResource, qb.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), qb, nodes, apps);
    qb.computeUserLimitAndSetHeadroom(app_0, clusterResource, "", SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    assertEquals(8 * GB, qb.getUsedResources().getMemorySize());
    assertEquals(4 * GB, app_0.getCurrentConsumption().getMemorySize());
    //maxqueue 16G, userlimit 13G, - 4G used = 9G BUT
    //maxqueue 16G - used 8G (4 each app/user) = 8G max headroom (the new logic)
    assertEquals(8 * GB, app_0.getHeadroom().getMemorySize());
    assertEquals(4 * GB, app_2.getCurrentConsumption().getMemorySize());
    assertEquals(8 * GB, app_2.getHeadroom().getMemorySize());
    //test case 3
    qb.finishApplication(app_0.getApplicationId(), user_0);
    qb.finishApplication(app_2.getApplicationId(), user_1);
    qb.releaseResource(clusterResource, app_0, app_0.getAppSchedulingInfo().getPendingAsk(u0SchedKey).getPerAllocationResource(), null, null);
    qb.releaseResource(clusterResource, app_2, app_2.getAppSchedulingInfo().getPendingAsk(u1SchedKey).getPerAllocationResource(), null, null);
    qb.setUserLimit(50);
    qb.setUserLimitFactor(1);
    final ApplicationAttemptId appAttemptId_1 = TestUtils.getMockApplicationAttemptId(1, 0);
    FiCaSchedulerApp app_1 = new FiCaSchedulerApp(appAttemptId_1, user_0, qb, qb.getAbstractUsersManager(), spyRMContext);
    apps.put(app_1.getApplicationAttemptId(), app_1);
    final ApplicationAttemptId appAttemptId_3 = TestUtils.getMockApplicationAttemptId(3, 0);
    FiCaSchedulerApp app_3 = new FiCaSchedulerApp(appAttemptId_3, user_1, qb, qb.getAbstractUsersManager(), spyRMContext);
    apps.put(app_3.getApplicationAttemptId(), app_3);
    app_1.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 2 * GB, 1, true, u0Priority, recordFactory)));
    app_3.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 2 * GB, 1, true, u1Priority, recordFactory)));
    qb.submitApplicationAttempt(app_1, user_0);
    qb.submitApplicationAttempt(app_3, user_1);
    applyCSAssignment(clusterResource, qb.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), qb, nodes, apps);
    applyCSAssignment(clusterResource, qb.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), qb, nodes, apps);
    qb.computeUserLimitAndSetHeadroom(app_3, clusterResource, "", SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    assertEquals(4 * GB, qb.getUsedResources().getMemorySize());
    //maxqueue 16G, userlimit 7G, used (by each user) 2G, headroom 5G (both)
    assertEquals(5 * GB, app_3.getHeadroom().getMemorySize());
    assertEquals(5 * GB, app_1.getHeadroom().getMemorySize());
    //test case 4
    final ApplicationAttemptId appAttemptId_4 = TestUtils.getMockApplicationAttemptId(4, 0);
    FiCaSchedulerApp app_4 = new FiCaSchedulerApp(appAttemptId_4, user_0, qb, qb.getAbstractUsersManager(), spyRMContext);
    apps.put(app_4.getApplicationAttemptId(), app_4);
    qb.submitApplicationAttempt(app_4, user_0);
    app_4.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 6 * GB, 1, true, u0Priority, recordFactory)));
    applyCSAssignment(clusterResource, qb.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), qb, nodes, apps);
    qb.computeUserLimitAndSetHeadroom(app_4, clusterResource, "", SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    qb.computeUserLimitAndSetHeadroom(app_3, clusterResource, "", SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    //app3 is user1, active from last test case
    //maxqueue 16G, userlimit 13G, used 2G, would be headroom 10G BUT
    //10G in use, so max possible headroom is 6G (new logic)
    assertEquals(6 * GB, app_3.getHeadroom().getMemorySize());
    //testcase3 still active - 2+2+6=10
    assertEquals(10 * GB, qb.getUsedResources().getMemorySize());
    //app4 is user 0
    //maxqueue 16G, userlimit 7G, used 8G, headroom 5G
    //(8G used is 6G from this test case - app4, 2 from last test case, app_1)
    assertEquals(0 * GB, app_4.getHeadroom().getMemorySize());
}
Also used : FiCaSchedulerNode(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) Priority(org.apache.hadoop.yarn.api.records.Priority) Resource(org.apache.hadoop.yarn.api.records.Resource) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) SchedulerRequestKey(org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey) FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp) ResourceLimits(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits) NodeId(org.apache.hadoop.yarn.api.records.NodeId) QueueStateManager(org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueStateManager) Test(org.junit.Test)

Example 12 with ResourceLimits

use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits in project hadoop by apache.

the class TestLeafQueue method testDRFUserLimits.

@Test
public void testDRFUserLimits() throws Exception {
    setUpWithDominantResourceCalculator();
    // Mock the queue
    LeafQueue b = stubLeafQueue((LeafQueue) queues.get(B));
    // unset maxCapacity
    b.setMaxCapacity(1.0f);
    // Users
    final String user0 = "user_0";
    final String user1 = "user_1";
    // Submit applications
    final ApplicationAttemptId appAttemptId0 = TestUtils.getMockApplicationAttemptId(0, 0);
    FiCaSchedulerApp app0 = new FiCaSchedulerApp(appAttemptId0, user0, b, b.getAbstractUsersManager(), spyRMContext);
    b.submitApplicationAttempt(app0, user0);
    final ApplicationAttemptId appAttemptId2 = TestUtils.getMockApplicationAttemptId(2, 0);
    FiCaSchedulerApp app2 = new FiCaSchedulerApp(appAttemptId2, user1, b, b.getAbstractUsersManager(), spyRMContext);
    b.submitApplicationAttempt(app2, user1);
    // Setup some nodes
    String host0 = "127.0.0.1";
    FiCaSchedulerNode node0 = TestUtils.getMockNode(host0, DEFAULT_RACK, 0, 8 * GB, 100);
    String host1 = "127.0.0.2";
    FiCaSchedulerNode node1 = TestUtils.getMockNode(host1, DEFAULT_RACK, 0, 8 * GB, 100);
    Map<NodeId, FiCaSchedulerNode> nodes = ImmutableMap.of(node0.getNodeID(), node0, node1.getNodeID(), node1);
    Map<ApplicationAttemptId, FiCaSchedulerApp> apps = ImmutableMap.of(app0.getApplicationAttemptId(), app0, app2.getApplicationAttemptId(), app2);
    int numNodes = 2;
    Resource clusterResource = Resources.createResource(numNodes * (8 * GB), numNodes * 100);
    when(csContext.getNumClusterNodes()).thenReturn(numNodes);
    when(csContext.getClusterResource()).thenReturn(clusterResource);
    // Setup resource-requests so that one application is memory dominant
    // and other application is vcores dominant
    Priority priority = TestUtils.createMockPriority(1);
    app0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 40, 10, true, priority, recordFactory, RMNodeLabelsManager.NO_LABEL)));
    app2.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 2 * GB, 10, 10, true, priority, recordFactory, RMNodeLabelsManager.NO_LABEL)));
    /**
     * Start testing...
     */
    // Set user-limit
    b.setUserLimit(50);
    b.setUserLimitFactor(2);
    User queueUser0 = b.getUser(user0);
    User queueUser1 = b.getUser(user1);
    assertEquals("There should 2 active users!", 2, b.getAbstractUsersManager().getNumActiveUsers());
    // Fill both Nodes as far as we can
    CSAssignment assign;
    do {
        assign = b.assignContainers(clusterResource, node0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
        LOG.info(assign.toString());
        applyCSAssignment(clusterResource, assign, b, nodes, apps);
    } while (assign.getResource().getMemorySize() > 0 && assign.getAssignmentInformation().getNumReservations() == 0);
    do {
        assign = b.assignContainers(clusterResource, node1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
        applyCSAssignment(clusterResource, assign, b, nodes, apps);
    } while (assign.getResource().getMemorySize() > 0 && assign.getAssignmentInformation().getNumReservations() == 0);
    //LOG.info("user_0: " + queueUser0.getUsed());
    //LOG.info("user_1: " + queueUser1.getUsed());
    assertTrue("Verify user_0 got resources ", queueUser0.getUsed().getMemorySize() > 0);
    assertTrue("Verify user_1 got resources ", queueUser1.getUsed().getMemorySize() > 0);
    assertTrue("Expected AbsoluteUsedCapacity > 0.95, got: " + b.getAbsoluteUsedCapacity(), b.getAbsoluteUsedCapacity() > 0.95);
    // Verify consumedRatio is based on dominant resources
    float expectedRatio = queueUser0.getUsed().getVirtualCores() / (numNodes * 100.0f) + queueUser1.getUsed().getMemorySize() / (numNodes * 8.0f * GB);
    assertEquals(expectedRatio, b.getUsersManager().getUsageRatio(""), 0.001);
    // Add another node and make sure consumedRatio is adjusted
    // accordingly.
    numNodes = 3;
    clusterResource = Resources.createResource(numNodes * (8 * GB), numNodes * 100);
    when(csContext.getNumClusterNodes()).thenReturn(numNodes);
    root.updateClusterResource(clusterResource, new ResourceLimits(clusterResource));
    expectedRatio = queueUser0.getUsed().getVirtualCores() / (numNodes * 100.0f) + queueUser1.getUsed().getMemorySize() / (numNodes * 8.0f * GB);
    assertEquals(expectedRatio, b.getUsersManager().getUsageRatio(""), 0.001);
}
Also used : User(org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.UsersManager.User) FiCaSchedulerNode(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode) Priority(org.apache.hadoop.yarn.api.records.Priority) Resource(org.apache.hadoop.yarn.api.records.Resource) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp) ResourceLimits(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits) NodeId(org.apache.hadoop.yarn.api.records.NodeId) Test(org.junit.Test)

Example 13 with ResourceLimits

use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits in project hadoop by apache.

the class TestLeafQueue method testGetTotalPendingResourcesConsideringUserLimitTwoUsers.

@Test
public void testGetTotalPendingResourcesConsideringUserLimitTwoUsers() throws Exception {
    // Manipulate queue 'e'
    LeafQueue e = stubLeafQueue((LeafQueue) queues.get(E));
    // Allow queue 'e' to use 100% of cluster resources (max capacity).
    e.setMaxCapacity(1.0f);
    // When used queue resources goes above capacity (in this case, 1%), user
    // resource limit (used in calculating headroom) is calculated in small
    // increments to ensure that user-limit-percent can be met for all users in
    // a queue. Take user-limit-percent out of the equation so that user
    // resource limit will always be calculated to its max possible value.
    e.setUserLimit(1000);
    final String user_0 = "user_0";
    final String user_1 = "user_1";
    // Submit 2 applications for user_0
    final ApplicationAttemptId appAttemptId_0 = TestUtils.getMockApplicationAttemptId(0, 0);
    FiCaSchedulerApp app_0 = new FiCaSchedulerApp(appAttemptId_0, user_0, e, mock(ActiveUsersManager.class), spyRMContext);
    e.submitApplicationAttempt(app_0, user_0);
    final ApplicationAttemptId appAttemptId_1 = TestUtils.getMockApplicationAttemptId(1, 0);
    FiCaSchedulerApp app_1 = new FiCaSchedulerApp(appAttemptId_1, user_0, e, mock(ActiveUsersManager.class), spyRMContext);
    e.submitApplicationAttempt(app_1, user_0);
    // Submit 2 applications for user_1
    final ApplicationAttemptId appAttemptId_2 = TestUtils.getMockApplicationAttemptId(2, 0);
    FiCaSchedulerApp app_2 = new FiCaSchedulerApp(appAttemptId_2, user_1, e, mock(ActiveUsersManager.class), spyRMContext);
    e.submitApplicationAttempt(app_2, user_1);
    final ApplicationAttemptId appAttemptId_3 = TestUtils.getMockApplicationAttemptId(3, 0);
    FiCaSchedulerApp app_3 = new FiCaSchedulerApp(appAttemptId_3, user_1, e, mock(ActiveUsersManager.class), spyRMContext);
    e.submitApplicationAttempt(app_3, user_1);
    Map<ApplicationAttemptId, FiCaSchedulerApp> apps = ImmutableMap.of(app_0.getApplicationAttemptId(), app_0, app_1.getApplicationAttemptId(), app_1, app_2.getApplicationAttemptId(), app_2, app_3.getApplicationAttemptId(), app_3);
    // Setup 1 node with 100GB of memory resources.
    String host_0 = "127.0.0.1";
    FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 100 * GB);
    Map<NodeId, FiCaSchedulerNode> nodes = ImmutableMap.of(node_0.getNodeID(), node_0);
    final int numNodes = 1;
    Resource clusterResource = Resources.createResource(numNodes * (100 * GB), numNodes * 128);
    when(csContext.getNumClusterNodes()).thenReturn(numNodes);
    // Pending resource requests for user_0: app_0 and app_1 total 3GB.
    Priority priority = TestUtils.createMockPriority(1);
    app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 1, true, priority, recordFactory)));
    app_1.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 2, true, priority, recordFactory)));
    // Pending resource requests for user_1: app_2 and app_3 total 1GB.
    priority = TestUtils.createMockPriority(1);
    app_2.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 2, true, priority, recordFactory)));
    app_3.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 1, true, priority, recordFactory)));
    // Start testing...
    // With queue capacity set at 1% of 100GB and user-limit-factor set to 1.0,
    // queue 'e' should be able to consume 1GB per user.
    assertEquals(2 * GB, e.getTotalPendingResourcesConsideringUserLimit(clusterResource, RMNodeLabelsManager.NO_LABEL, false).getMemorySize());
    // None of the apps have assigned resources
    // user_0's apps:
    assertEquals(0 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_1.getCurrentConsumption().getMemorySize());
    // user_1's apps:
    assertEquals(0 * GB, app_2.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_3.getCurrentConsumption().getMemorySize());
    // Assign 1st Container of 1GB
    applyCSAssignment(clusterResource, e.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), e, nodes, apps);
    // The first container was assigned to user_0's app_0. Queues total headroom
    // has 1GB left for user_1.
    assertEquals(1 * GB, e.getTotalPendingResourcesConsideringUserLimit(clusterResource, RMNodeLabelsManager.NO_LABEL, false).getMemorySize());
    // user_0's apps:
    assertEquals(1 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_1.getCurrentConsumption().getMemorySize());
    // user_1's apps:
    assertEquals(0 * GB, app_2.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_3.getCurrentConsumption().getMemorySize());
    // Assign 2nd container of 1GB
    applyCSAssignment(clusterResource, e.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), e, nodes, apps);
    // user_0 has no headroom due to user-limit-factor of 1.0. However capacity
    // scheduler will assign one container more than user-limit-factor. So,
    // this container went to user_0's app_1. so, headroom for queue 'e'e is
    // still 1GB for user_1
    assertEquals(1 * GB, e.getTotalPendingResourcesConsideringUserLimit(clusterResource, RMNodeLabelsManager.NO_LABEL, false).getMemorySize());
    // user_0's apps:
    assertEquals(1 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(1 * GB, app_1.getCurrentConsumption().getMemorySize());
    // user_1's apps:
    assertEquals(0 * GB, app_2.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_3.getCurrentConsumption().getMemorySize());
    // Assign 3rd container.
    applyCSAssignment(clusterResource, e.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), e, nodes, apps);
    // Container was allocated to user_1's app_2 since user_1, Now, no headroom
    // is left.
    assertEquals(0 * GB, e.getTotalPendingResourcesConsideringUserLimit(clusterResource, RMNodeLabelsManager.NO_LABEL, false).getMemorySize());
    // user_0's apps:
    assertEquals(1 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(1 * GB, app_1.getCurrentConsumption().getMemorySize());
    // user_1's apps:
    assertEquals(1 * GB, app_2.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_3.getCurrentConsumption().getMemorySize());
    // Assign 4th container.
    applyCSAssignment(clusterResource, e.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), e, nodes, apps);
    // Allocated to user_1's app_2 since scheduler allocates 1 container
    // above user resource limit. Available headroom still 0.
    assertEquals(0 * GB, e.getTotalPendingResourcesConsideringUserLimit(clusterResource, RMNodeLabelsManager.NO_LABEL, false).getMemorySize());
    // user_0's apps:
    long app_0_consumption = app_0.getCurrentConsumption().getMemorySize();
    assertEquals(1 * GB, app_0_consumption);
    long app_1_consumption = app_1.getCurrentConsumption().getMemorySize();
    assertEquals(1 * GB, app_1_consumption);
    // user_1's apps:
    long app_2_consumption = app_2.getCurrentConsumption().getMemorySize();
    assertEquals(2 * GB, app_2_consumption);
    long app_3_consumption = app_3.getCurrentConsumption().getMemorySize();
    assertEquals(0 * GB, app_3_consumption);
    // Attempt to assign 5th container. Will be a no-op.
    applyCSAssignment(clusterResource, e.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), e, nodes, apps);
    // Cannot allocate 5th container because both users are above their allowed
    // user resource limit. Values should be the same as previously.
    assertEquals(0 * GB, e.getTotalPendingResourcesConsideringUserLimit(clusterResource, RMNodeLabelsManager.NO_LABEL, false).getMemorySize());
    // user_0's apps:
    assertEquals(app_0_consumption, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(app_1_consumption, app_1.getCurrentConsumption().getMemorySize());
    // user_1's apps:
    assertEquals(app_2_consumption, app_2.getCurrentConsumption().getMemorySize());
    assertEquals(app_3_consumption, app_3.getCurrentConsumption().getMemorySize());
    // Increase user-limit-factor from 1GB to 10GB (1% * 10 * 100GB = 10GB).
    // Pending for both user_0 and user_1 are still 1GB each, so user-limit-
    // factor is no longer the limiting factor.
    e.setUserLimitFactor(10.0f);
    applyCSAssignment(clusterResource, e.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), e, nodes, apps);
    // Next container goes to user_0's app_1, since it still wanted 1GB.
    assertEquals(1 * GB, e.getTotalPendingResourcesConsideringUserLimit(clusterResource, RMNodeLabelsManager.NO_LABEL, false).getMemorySize());
    // user_0's apps:
    assertEquals(1 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(2 * GB, app_1.getCurrentConsumption().getMemorySize());
    // user_1's apps:
    assertEquals(2 * GB, app_2.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_3.getCurrentConsumption().getMemorySize());
    applyCSAssignment(clusterResource, e.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), e, nodes, apps);
    // Last container goes to user_1's app_3, since it still wanted 1GB.
    // user_0's apps:
    assertEquals(0 * GB, e.getTotalPendingResourcesConsideringUserLimit(clusterResource, RMNodeLabelsManager.NO_LABEL, false).getMemorySize());
    assertEquals(1 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(2 * GB, app_1.getCurrentConsumption().getMemorySize());
    // user_1's apps:
    assertEquals(2 * GB, app_2.getCurrentConsumption().getMemorySize());
    assertEquals(1 * GB, app_3.getCurrentConsumption().getMemorySize());
    // Release each container from app_0
    for (RMContainer rmContainer : app_0.getLiveContainers()) {
        e.completedContainer(clusterResource, app_0, node_0, rmContainer, ContainerStatus.newInstance(rmContainer.getContainerId(), ContainerState.COMPLETE, "", ContainerExitStatus.KILLED_BY_RESOURCEMANAGER), RMContainerEventType.KILL, null, true);
    }
    // Release each container from app_1
    for (RMContainer rmContainer : app_1.getLiveContainers()) {
        e.completedContainer(clusterResource, app_1, node_0, rmContainer, ContainerStatus.newInstance(rmContainer.getContainerId(), ContainerState.COMPLETE, "", ContainerExitStatus.KILLED_BY_RESOURCEMANAGER), RMContainerEventType.KILL, null, true);
    }
}
Also used : FiCaSchedulerNode(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode) Priority(org.apache.hadoop.yarn.api.records.Priority) Resource(org.apache.hadoop.yarn.api.records.Resource) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) RMContainer(org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer) FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp) ResourceLimits(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits) NodeId(org.apache.hadoop.yarn.api.records.NodeId) ActiveUsersManager(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager) Test(org.junit.Test)

Example 14 with ResourceLimits

use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits in project hadoop by apache.

the class TestLeafQueue method testReservationExchange.

@Test
public void testReservationExchange() throws Exception {
    // Manipulate queue 'a'
    LeafQueue a = stubLeafQueue((LeafQueue) queues.get(A));
    //unset maxCapacity
    a.setMaxCapacity(1.0f);
    a.setUserLimitFactor(10);
    // Users
    final String user_0 = "user_0";
    final String user_1 = "user_1";
    // Submit applications
    final ApplicationAttemptId appAttemptId_0 = TestUtils.getMockApplicationAttemptId(0, 0);
    FiCaSchedulerApp app_0 = new FiCaSchedulerApp(appAttemptId_0, user_0, a, mock(ActiveUsersManager.class), spyRMContext);
    a.submitApplicationAttempt(app_0, user_0);
    final ApplicationAttemptId appAttemptId_1 = TestUtils.getMockApplicationAttemptId(1, 0);
    FiCaSchedulerApp app_1 = new FiCaSchedulerApp(appAttemptId_1, user_1, a, mock(ActiveUsersManager.class), spyRMContext);
    a.submitApplicationAttempt(app_1, user_1);
    // Setup some nodes
    String host_0 = "127.0.0.1";
    FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 4 * GB);
    String host_1 = "127.0.0.2";
    FiCaSchedulerNode node_1 = TestUtils.getMockNode(host_1, DEFAULT_RACK, 0, 4 * GB);
    when(csContext.getNode(node_0.getNodeID())).thenReturn(node_0);
    when(csContext.getNode(node_1.getNodeID())).thenReturn(node_1);
    when(csContext.getClusterResource()).thenReturn(Resource.newInstance(8, 1));
    Map<ApplicationAttemptId, FiCaSchedulerApp> apps = ImmutableMap.of(app_0.getApplicationAttemptId(), app_0, app_1.getApplicationAttemptId(), app_1);
    Map<NodeId, FiCaSchedulerNode> nodes = ImmutableMap.of(node_0.getNodeID(), node_0, node_1.getNodeID(), node_1);
    final int numNodes = 3;
    Resource clusterResource = Resources.createResource(numNodes * (4 * GB), numNodes * 16);
    when(csContext.getNumClusterNodes()).thenReturn(numNodes);
    when(csContext.getMaximumResourceCapability()).thenReturn(Resources.createResource(4 * GB, 16));
    when(a.getMaximumAllocation()).thenReturn(Resources.createResource(4 * GB, 16));
    // 1G / 4G 
    when(a.getMinimumAllocationFactor()).thenReturn(0.25f);
    // Setup resource-requests
    Priority priority = TestUtils.createMockPriority(1);
    app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 2, true, priority, recordFactory)));
    app_1.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 4 * GB, 1, true, priority, recordFactory)));
    // Start testing...
    // Only 1 container
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    assertEquals(1 * GB, a.getUsedResources().getMemorySize());
    assertEquals(1 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_1.getCurrentConsumption().getMemorySize());
    // Also 2nd -> minCapacity = 1024 since (.1 * 8G) < minAlloc, also
    // you can get one container more than user-limit
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    assertEquals(2 * GB, a.getUsedResources().getMemorySize());
    assertEquals(2 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_1.getCurrentConsumption().getMemorySize());
    // Now, reservation should kick in for app_1
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    assertEquals(6 * GB, a.getUsedResources().getMemorySize());
    assertEquals(2 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_1.getCurrentConsumption().getMemorySize());
    assertEquals(4 * GB, app_1.getCurrentReservation().getMemorySize());
    assertEquals(2 * GB, node_0.getAllocatedResource().getMemorySize());
    // Now free 1 container from app_0 i.e. 1G, and re-reserve it
    RMContainer rmContainer = app_0.getLiveContainers().iterator().next();
    a.completedContainer(clusterResource, app_0, node_0, rmContainer, ContainerStatus.newInstance(rmContainer.getContainerId(), ContainerState.COMPLETE, "", ContainerExitStatus.KILLED_BY_RESOURCEMANAGER), RMContainerEventType.KILL, null, true);
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    assertEquals(5 * GB, a.getUsedResources().getMemorySize());
    assertEquals(1 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_1.getCurrentConsumption().getMemorySize());
    assertEquals(4 * GB, app_1.getCurrentReservation().getMemorySize());
    assertEquals(1 * GB, node_0.getAllocatedResource().getMemorySize());
    assertEquals(1, app_1.getReReservations(toSchedulerKey(priority)));
    // Re-reserve
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    assertEquals(5 * GB, a.getUsedResources().getMemorySize());
    assertEquals(1 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_1.getCurrentConsumption().getMemorySize());
    assertEquals(4 * GB, app_1.getCurrentReservation().getMemorySize());
    assertEquals(1 * GB, node_0.getAllocatedResource().getMemorySize());
    assertEquals(2, app_1.getReReservations(toSchedulerKey(priority)));
    // Try to schedule on node_1 now, should *move* the reservation
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    assertEquals(9 * GB, a.getUsedResources().getMemorySize());
    assertEquals(1 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(4 * GB, app_1.getCurrentConsumption().getMemorySize());
    assertEquals(4 * GB, app_1.getCurrentReservation().getMemorySize());
    assertEquals(4 * GB, node_1.getAllocatedResource().getMemorySize());
    // Doesn't change yet... only when reservation is cancelled or a different
    // container is reserved
    assertEquals(2, app_1.getReReservations(toSchedulerKey(priority)));
    // Now finish another container from app_0 and see the reservation cancelled
    rmContainer = app_0.getLiveContainers().iterator().next();
    a.completedContainer(clusterResource, app_0, node_0, rmContainer, ContainerStatus.newInstance(rmContainer.getContainerId(), ContainerState.COMPLETE, "", ContainerExitStatus.KILLED_BY_RESOURCEMANAGER), RMContainerEventType.KILL, null, true);
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    assertEquals(4 * GB, a.getUsedResources().getMemorySize());
    assertEquals(0 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(4 * GB, app_1.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_1.getCurrentReservation().getMemorySize());
    assertEquals(0 * GB, node_0.getAllocatedResource().getMemorySize());
}
Also used : FiCaSchedulerNode(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode) Priority(org.apache.hadoop.yarn.api.records.Priority) Resource(org.apache.hadoop.yarn.api.records.Resource) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) RMContainer(org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer) FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp) ResourceLimits(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits) NodeId(org.apache.hadoop.yarn.api.records.NodeId) ActiveUsersManager(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager) Test(org.junit.Test)

Example 15 with ResourceLimits

use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits in project hadoop by apache.

the class TestLeafQueue method testUserLimits.

@Test
public void testUserLimits() throws Exception {
    // Mock the queue
    LeafQueue a = stubLeafQueue((LeafQueue) queues.get(A));
    //unset maxCapacity
    a.setMaxCapacity(1.0f);
    when(csContext.getClusterResource()).thenReturn(Resources.createResource(16 * GB, 32));
    // Users
    final String user_0 = "user_0";
    final String user_1 = "user_1";
    // Submit applications
    final ApplicationAttemptId appAttemptId_0 = TestUtils.getMockApplicationAttemptId(0, 0);
    FiCaSchedulerApp app_0 = new FiCaSchedulerApp(appAttemptId_0, user_0, a, a.getAbstractUsersManager(), spyRMContext);
    a.submitApplicationAttempt(app_0, user_0);
    final ApplicationAttemptId appAttemptId_1 = TestUtils.getMockApplicationAttemptId(1, 0);
    FiCaSchedulerApp app_1 = new FiCaSchedulerApp(appAttemptId_1, user_1, a, a.getAbstractUsersManager(), spyRMContext);
    // different user
    a.submitApplicationAttempt(app_1, user_1);
    // Setup some nodes
    String host_0 = "127.0.0.1";
    FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 8 * GB);
    String host_1 = "127.0.0.2";
    FiCaSchedulerNode node_1 = TestUtils.getMockNode(host_1, DEFAULT_RACK, 0, 8 * GB);
    final int numNodes = 2;
    Resource clusterResource = Resources.createResource(numNodes * (8 * GB), numNodes * 16);
    when(csContext.getNumClusterNodes()).thenReturn(numNodes);
    // Setup resource-requests
    Priority priority = TestUtils.createMockPriority(1);
    app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 3 * GB, 2, true, priority, recordFactory)));
    app_1.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 2, true, priority, recordFactory)));
    Map<ApplicationAttemptId, FiCaSchedulerApp> apps = ImmutableMap.of(app_0.getApplicationAttemptId(), app_0, app_1.getApplicationAttemptId(), app_1);
    Map<NodeId, FiCaSchedulerNode> nodes = ImmutableMap.of(node_0.getNodeID(), node_0, node_1.getNodeID(), node_1);
    /**
     * Start testing...
     */
    // Set user-limit
    a.setUserLimit(50);
    a.setUserLimitFactor(2);
    // There're two active users
    assertEquals(2, a.getAbstractUsersManager().getNumActiveUsers());
    // 1 container to user_0
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    assertEquals(3 * GB, a.getUsedResources().getMemorySize());
    assertEquals(3 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_1.getCurrentConsumption().getMemorySize());
    // Allocate one container to app_1. Even if app_0
    // submit earlier, it cannot get this container assigned since user_0
    // exceeded user-limit already. 
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    assertEquals(4 * GB, a.getUsedResources().getMemorySize());
    assertEquals(3 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(1 * GB, app_1.getCurrentConsumption().getMemorySize());
    // Allocate one container to app_0, before allocating this container,
    // user-limit = ceil((4 + 1) / 2) = 3G. app_0's used resource (3G) <=
    // user-limit.
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    assertEquals(7 * GB, a.getUsedResources().getMemorySize());
    assertEquals(6 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(1 * GB, app_1.getCurrentConsumption().getMemorySize());
    // app_0 doesn't have outstanding resources, there's only one active user.
    assertEquals("There should only be 1 active user!", 1, a.getAbstractUsersManager().getNumActiveUsers());
}
Also used : FiCaSchedulerNode(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode) Priority(org.apache.hadoop.yarn.api.records.Priority) FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp) ResourceLimits(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits) Resource(org.apache.hadoop.yarn.api.records.Resource) NodeId(org.apache.hadoop.yarn.api.records.NodeId) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) Test(org.junit.Test)

Aggregations

ResourceLimits (org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits)52 Resource (org.apache.hadoop.yarn.api.records.Resource)48 Test (org.junit.Test)40 FiCaSchedulerNode (org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode)37 FiCaSchedulerApp (org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp)34 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)32 Priority (org.apache.hadoop.yarn.api.records.Priority)31 NodeId (org.apache.hadoop.yarn.api.records.NodeId)29 ActiveUsersManager (org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager)20 HashMap (java.util.HashMap)10 RMContainer (org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer)10 ArrayList (java.util.ArrayList)9 ResourceRequest (org.apache.hadoop.yarn.api.records.ResourceRequest)9 RMApp (org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp)9 AMState (org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.AMState)7 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)6 PlacementSet (org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.PlacementSet)6 SchedulerRequestKey (org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey)6 InOrder (org.mockito.InOrder)5 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)4