Search in sources :

Example 46 with ApplicationAttemptId

use of org.apache.hadoop.yarn.api.records.ApplicationAttemptId in project hadoop by apache.

the class TestLeafQueue method testUserLimits.

@Test
public void testUserLimits() throws Exception {
    // Mock the queue
    LeafQueue a = stubLeafQueue((LeafQueue) queues.get(A));
    //unset maxCapacity
    a.setMaxCapacity(1.0f);
    when(csContext.getClusterResource()).thenReturn(Resources.createResource(16 * GB, 32));
    // Users
    final String user_0 = "user_0";
    final String user_1 = "user_1";
    // Submit applications
    final ApplicationAttemptId appAttemptId_0 = TestUtils.getMockApplicationAttemptId(0, 0);
    FiCaSchedulerApp app_0 = new FiCaSchedulerApp(appAttemptId_0, user_0, a, a.getAbstractUsersManager(), spyRMContext);
    a.submitApplicationAttempt(app_0, user_0);
    final ApplicationAttemptId appAttemptId_1 = TestUtils.getMockApplicationAttemptId(1, 0);
    FiCaSchedulerApp app_1 = new FiCaSchedulerApp(appAttemptId_1, user_1, a, a.getAbstractUsersManager(), spyRMContext);
    // different user
    a.submitApplicationAttempt(app_1, user_1);
    // Setup some nodes
    String host_0 = "127.0.0.1";
    FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 8 * GB);
    String host_1 = "127.0.0.2";
    FiCaSchedulerNode node_1 = TestUtils.getMockNode(host_1, DEFAULT_RACK, 0, 8 * GB);
    final int numNodes = 2;
    Resource clusterResource = Resources.createResource(numNodes * (8 * GB), numNodes * 16);
    when(csContext.getNumClusterNodes()).thenReturn(numNodes);
    // Setup resource-requests
    Priority priority = TestUtils.createMockPriority(1);
    app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 3 * GB, 2, true, priority, recordFactory)));
    app_1.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 2, true, priority, recordFactory)));
    Map<ApplicationAttemptId, FiCaSchedulerApp> apps = ImmutableMap.of(app_0.getApplicationAttemptId(), app_0, app_1.getApplicationAttemptId(), app_1);
    Map<NodeId, FiCaSchedulerNode> nodes = ImmutableMap.of(node_0.getNodeID(), node_0, node_1.getNodeID(), node_1);
    /**
     * Start testing...
     */
    // Set user-limit
    a.setUserLimit(50);
    a.setUserLimitFactor(2);
    // There're two active users
    assertEquals(2, a.getAbstractUsersManager().getNumActiveUsers());
    // 1 container to user_0
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    assertEquals(3 * GB, a.getUsedResources().getMemorySize());
    assertEquals(3 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_1.getCurrentConsumption().getMemorySize());
    // Allocate one container to app_1. Even if app_0
    // submit earlier, it cannot get this container assigned since user_0
    // exceeded user-limit already. 
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    assertEquals(4 * GB, a.getUsedResources().getMemorySize());
    assertEquals(3 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(1 * GB, app_1.getCurrentConsumption().getMemorySize());
    // Allocate one container to app_0, before allocating this container,
    // user-limit = ceil((4 + 1) / 2) = 3G. app_0's used resource (3G) <=
    // user-limit.
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    assertEquals(7 * GB, a.getUsedResources().getMemorySize());
    assertEquals(6 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(1 * GB, app_1.getCurrentConsumption().getMemorySize());
    // app_0 doesn't have outstanding resources, there's only one active user.
    assertEquals("There should only be 1 active user!", 1, a.getAbstractUsersManager().getNumActiveUsers());
}
Also used : FiCaSchedulerNode(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode) Priority(org.apache.hadoop.yarn.api.records.Priority) FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp) ResourceLimits(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits) Resource(org.apache.hadoop.yarn.api.records.Resource) NodeId(org.apache.hadoop.yarn.api.records.NodeId) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) Test(org.junit.Test)

Example 47 with ApplicationAttemptId

use of org.apache.hadoop.yarn.api.records.ApplicationAttemptId in project hadoop by apache.

the class TestLeafQueue method testHeadroomWithMaxCap.

@Test
public void testHeadroomWithMaxCap() throws Exception {
    // Mock the queue
    LeafQueue a = stubLeafQueue((LeafQueue) queues.get(A));
    //unset maxCapacity
    a.setMaxCapacity(1.0f);
    // Users
    final String user_0 = "user_0";
    final String user_1 = "user_1";
    // Submit applications
    final ApplicationAttemptId appAttemptId_0 = TestUtils.getMockApplicationAttemptId(0, 0);
    FiCaSchedulerApp app_0 = new FiCaSchedulerApp(appAttemptId_0, user_0, a, a.getAbstractUsersManager(), spyRMContext);
    a.submitApplicationAttempt(app_0, user_0);
    final ApplicationAttemptId appAttemptId_1 = TestUtils.getMockApplicationAttemptId(1, 0);
    FiCaSchedulerApp app_1 = new FiCaSchedulerApp(appAttemptId_1, user_0, a, a.getAbstractUsersManager(), spyRMContext);
    // same user
    a.submitApplicationAttempt(app_1, user_0);
    final ApplicationAttemptId appAttemptId_2 = TestUtils.getMockApplicationAttemptId(2, 0);
    FiCaSchedulerApp app_2 = new FiCaSchedulerApp(appAttemptId_2, user_1, a, a.getAbstractUsersManager(), spyRMContext);
    a.submitApplicationAttempt(app_2, user_1);
    // Setup some nodes
    String host_0 = "127.0.0.1";
    FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 8 * GB);
    String host_1 = "127.0.0.2";
    FiCaSchedulerNode node_1 = TestUtils.getMockNode(host_1, DEFAULT_RACK, 0, 8 * GB);
    Map<ApplicationAttemptId, FiCaSchedulerApp> apps = ImmutableMap.of(app_0.getApplicationAttemptId(), app_0, app_1.getApplicationAttemptId(), app_1, app_2.getApplicationAttemptId(), app_2);
    Map<NodeId, FiCaSchedulerNode> nodes = ImmutableMap.of(node_0.getNodeID(), node_0, node_1.getNodeID(), node_1);
    final int numNodes = 2;
    Resource clusterResource = Resources.createResource(numNodes * (8 * GB), 1);
    when(csContext.getNumClusterNodes()).thenReturn(numNodes);
    // Setup resource-requests
    Priority priority = TestUtils.createMockPriority(1);
    app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 2 * GB, 1, true, priority, recordFactory)));
    app_1.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 2, true, priority, recordFactory)));
    /**
     * Start testing...
     */
    // Set user-limit
    a.setUserLimit(50);
    a.setUserLimitFactor(2);
    // Now, only user_0 should be active since he is the only one with
    // outstanding requests
    assertEquals("There should only be 1 active user!", 1, a.getAbstractUsersManager().getNumActiveUsers());
    // 1 container to user_0
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    assertEquals(2 * GB, a.getUsedResources().getMemorySize());
    assertEquals(2 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_1.getCurrentConsumption().getMemorySize());
    // TODO, fix headroom in the future patch
    assertEquals(0 * GB, app_0.getHeadroom().getMemorySize());
    // User limit = 2G, 2 in use
    assertEquals(0 * GB, app_1.getHeadroom().getMemorySize());
    // the application is not yet active
    // Again one to user_0 since he hasn't exceeded user limit yet
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    assertEquals(3 * GB, a.getUsedResources().getMemorySize());
    assertEquals(2 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(1 * GB, app_1.getCurrentConsumption().getMemorySize());
    // 4G - 3G
    assertEquals(0 * GB, app_0.getHeadroom().getMemorySize());
    // 4G - 3G
    assertEquals(0 * GB, app_1.getHeadroom().getMemorySize());
    // Submit requests for app_1 and set max-cap
    a.setMaxCapacity(.1f);
    app_2.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 1, true, priority, recordFactory)));
    assertEquals(2, a.getAbstractUsersManager().getNumActiveUsers());
    // No more to user_0 since he is already over user-limit
    // and no more containers to queue since it's already at max-cap
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    assertEquals(3 * GB, a.getUsedResources().getMemorySize());
    assertEquals(2 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(1 * GB, app_1.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_2.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_0.getHeadroom().getMemorySize());
    assertEquals(0 * GB, app_1.getHeadroom().getMemorySize());
    // Check headroom for app_2 
    app_1.updateResourceRequests(// unset
    Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 0, true, priority, recordFactory)));
    assertEquals(1, a.getAbstractUsersManager().getNumActiveUsers());
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    // hit queue max-cap
    assertEquals(0 * GB, app_2.getHeadroom().getMemorySize());
}
Also used : FiCaSchedulerNode(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode) Priority(org.apache.hadoop.yarn.api.records.Priority) FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp) ResourceLimits(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits) NodeId(org.apache.hadoop.yarn.api.records.NodeId) Resource(org.apache.hadoop.yarn.api.records.Resource) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) Test(org.junit.Test)

Example 48 with ApplicationAttemptId

use of org.apache.hadoop.yarn.api.records.ApplicationAttemptId in project hadoop by apache.

the class TestLeafQueue method testSchedulingConstraints.

@Test
public void testSchedulingConstraints() throws Exception {
    // Manipulate queue 'a'
    LeafQueue a = stubLeafQueue((LeafQueue) queues.get(A));
    // User
    String user_0 = "user_0";
    // Submit applications
    final ApplicationAttemptId appAttemptId_0 = TestUtils.getMockApplicationAttemptId(0, 0);
    FiCaSchedulerApp app_0 = new FiCaSchedulerApp(appAttemptId_0, user_0, a, mock(ActiveUsersManager.class), spyRMContext);
    a.submitApplicationAttempt(app_0, user_0);
    // Setup some nodes and racks
    String host_0_0 = "127.0.0.1";
    String rack_0 = "rack_0";
    FiCaSchedulerNode node_0_0 = TestUtils.getMockNode(host_0_0, rack_0, 0, 8 * GB);
    String host_0_1 = "127.0.0.2";
    FiCaSchedulerNode node_0_1 = TestUtils.getMockNode(host_0_1, rack_0, 0, 8 * GB);
    String host_1_0 = "127.0.0.3";
    String rack_1 = "rack_1";
    FiCaSchedulerNode node_1_0 = TestUtils.getMockNode(host_1_0, rack_1, 0, 8 * GB);
    Map<ApplicationAttemptId, FiCaSchedulerApp> apps = ImmutableMap.of(app_0.getApplicationAttemptId(), app_0);
    Map<NodeId, FiCaSchedulerNode> nodes = ImmutableMap.of(node_0_0.getNodeID(), node_0_0, node_0_1.getNodeID(), node_0_1, node_1_0.getNodeID(), node_1_0);
    final int numNodes = 3;
    Resource clusterResource = Resources.createResource(numNodes * (8 * GB), numNodes * 16);
    when(csContext.getNumClusterNodes()).thenReturn(numNodes);
    // Setup resource-requests and submit
    Priority priority = TestUtils.createMockPriority(1);
    SchedulerRequestKey schedulerKey = toSchedulerKey(priority);
    List<ResourceRequest> app_0_requests_0 = new ArrayList<ResourceRequest>();
    app_0_requests_0.add(TestUtils.createResourceRequest(host_0_0, 1 * GB, 1, true, priority, recordFactory));
    app_0_requests_0.add(TestUtils.createResourceRequest(host_0_1, 1 * GB, 1, true, priority, recordFactory));
    app_0_requests_0.add(TestUtils.createResourceRequest(rack_0, 1 * GB, 1, true, priority, recordFactory));
    app_0_requests_0.add(TestUtils.createResourceRequest(host_1_0, 1 * GB, 1, true, priority, recordFactory));
    app_0_requests_0.add(TestUtils.createResourceRequest(rack_1, 1 * GB, 1, true, priority, recordFactory));
    app_0.updateResourceRequests(app_0_requests_0);
    // Start testing...
    // Add one request
    app_0_requests_0.clear();
    app_0_requests_0.add(// only 1
    TestUtils.createResourceRequest(// only 1
    ResourceRequest.ANY, // only 1
    1 * GB, // only 1
    1, true, priority, recordFactory));
    app_0.updateResourceRequests(app_0_requests_0);
    // NODE_LOCAL - node_0_1
    CSAssignment assignment = a.assignContainers(clusterResource, node_0_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    applyCSAssignment(clusterResource, assignment, a, nodes, apps);
    verifyContainerAllocated(assignment, NodeType.NODE_LOCAL);
    assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey));
    // should reset
    assertEquals(0, app_0.getOutstandingAsksCount(schedulerKey));
    // No allocation on node_1_0 even though it's node/rack local since
    // required(ANY) == 0
    assignment = a.assignContainers(clusterResource, node_1_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    applyCSAssignment(clusterResource, assignment, a, nodes, apps);
    verifyNoContainerAllocated(assignment);
    // Still zero
    // since #req=0
    assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey));
    assertEquals(0, app_0.getOutstandingAsksCount(schedulerKey));
    // Add one request
    app_0_requests_0.clear();
    app_0_requests_0.add(// only one
    TestUtils.createResourceRequest(// only one
    ResourceRequest.ANY, // only one
    1 * GB, // only one
    1, true, priority, recordFactory));
    app_0.updateResourceRequests(app_0_requests_0);
    // No allocation on node_0_1 even though it's node/rack local since
    // required(rack_1) == 0
    assignment = a.assignContainers(clusterResource, node_0_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    applyCSAssignment(clusterResource, assignment, a, nodes, apps);
    verifyNoContainerAllocated(assignment);
    assertEquals(1, app_0.getSchedulingOpportunities(schedulerKey));
    assertEquals(1, app_0.getOutstandingAsksCount(schedulerKey));
    // NODE_LOCAL - node_1
    assignment = a.assignContainers(clusterResource, node_1_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    applyCSAssignment(clusterResource, assignment, a, nodes, apps);
    verifyContainerAllocated(assignment, NodeType.NODE_LOCAL);
    // should reset
    assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey));
    assertEquals(0, app_0.getOutstandingAsksCount(schedulerKey));
}
Also used : FiCaSchedulerNode(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode) Priority(org.apache.hadoop.yarn.api.records.Priority) Resource(org.apache.hadoop.yarn.api.records.Resource) ArrayList(java.util.ArrayList) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) SchedulerRequestKey(org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey) FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp) ResourceLimits(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits) NodeId(org.apache.hadoop.yarn.api.records.NodeId) ActiveUsersManager(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager) ResourceRequest(org.apache.hadoop.yarn.api.records.ResourceRequest) Test(org.junit.Test)

Example 49 with ApplicationAttemptId

use of org.apache.hadoop.yarn.api.records.ApplicationAttemptId in project hadoop by apache.

the class TestLeafQueue method testSingleQueueOneUserMetrics.

@Test
public void testSingleQueueOneUserMetrics() throws Exception {
    // Manipulate queue 'a'
    LeafQueue a = stubLeafQueue((LeafQueue) queues.get(B));
    // Users
    final String user_0 = "user_0";
    // Submit applications
    final ApplicationAttemptId appAttemptId_0 = TestUtils.getMockApplicationAttemptId(0, 0);
    FiCaSchedulerApp app_0 = new FiCaSchedulerApp(appAttemptId_0, user_0, a, mock(ActiveUsersManager.class), spyRMContext);
    a.submitApplicationAttempt(app_0, user_0);
    final ApplicationAttemptId appAttemptId_1 = TestUtils.getMockApplicationAttemptId(1, 0);
    FiCaSchedulerApp app_1 = new FiCaSchedulerApp(appAttemptId_1, user_0, a, mock(ActiveUsersManager.class), spyRMContext);
    // same user
    a.submitApplicationAttempt(app_1, user_0);
    // Setup some nodes
    String host_0 = "127.0.0.1";
    FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 8 * GB);
    Map<ApplicationAttemptId, FiCaSchedulerApp> apps = ImmutableMap.of(app_0.getApplicationAttemptId(), app_0, app_1.getApplicationAttemptId(), app_1);
    Map<NodeId, FiCaSchedulerNode> nodes = ImmutableMap.of(node_0.getNodeID(), node_0);
    final int numNodes = 1;
    Resource clusterResource = Resources.createResource(numNodes * (8 * GB), numNodes * 16);
    when(csContext.getNumClusterNodes()).thenReturn(numNodes);
    // Setup resource-requests
    Priority priority = TestUtils.createMockPriority(1);
    app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 3, true, priority, recordFactory)));
    // Start testing...
    // Only 1 container
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    assertEquals((int) (node_0.getTotalResource().getMemorySize() * a.getCapacity()) - (1 * GB), a.getMetrics().getAvailableMB());
}
Also used : FiCaSchedulerNode(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode) Priority(org.apache.hadoop.yarn.api.records.Priority) Resource(org.apache.hadoop.yarn.api.records.Resource) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp) ResourceLimits(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits) NodeId(org.apache.hadoop.yarn.api.records.NodeId) ActiveUsersManager(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager) Test(org.junit.Test)

Example 50 with ApplicationAttemptId

use of org.apache.hadoop.yarn.api.records.ApplicationAttemptId in project hadoop by apache.

the class TestLeafQueue method testDRFUsageRatioRounding.

@Test
public void testDRFUsageRatioRounding() throws Exception {
    CSAssignment assign;
    setUpWithDominantResourceCalculator();
    // Mock the queue
    LeafQueue b = stubLeafQueue((LeafQueue) queues.get(E));
    // Users
    final String user0 = "user_0";
    // Submit applications
    final ApplicationAttemptId appAttemptId0 = TestUtils.getMockApplicationAttemptId(0, 0);
    FiCaSchedulerApp app0 = new FiCaSchedulerApp(appAttemptId0, user0, b, b.getAbstractUsersManager(), spyRMContext);
    b.submitApplicationAttempt(app0, user0);
    // Setup some nodes
    String host0 = "127.0.0.1";
    FiCaSchedulerNode node0 = TestUtils.getMockNode(host0, DEFAULT_RACK, 0, 80 * GB, 100);
    // Make cluster relatively large so usageRatios are small
    int numNodes = 1000;
    Resource clusterResource = Resources.createResource(numNodes * (80 * GB), numNodes * 100);
    when(csContext.getNumClusterNodes()).thenReturn(numNodes);
    // Set user-limit. Need a small queue within a large cluster.
    b.setUserLimit(50);
    b.setUserLimitFactor(1000000);
    b.setMaxCapacity(1.0f);
    b.setAbsoluteCapacity(0.00001f);
    // First allocation is larger than second but is still vcore dominant
    // so usage ratio will be based on vcores. If consumedRatio doesn't round
    // in our favor then new limit calculation will actually be less than
    // what is currently consumed and we will fail to allocate
    Priority priority = TestUtils.createMockPriority(1);
    app0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 20 * GB, 29, 1, true, priority, recordFactory, RMNodeLabelsManager.NO_LABEL)));
    assign = b.assignContainers(clusterResource, node0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    app0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 10 * GB, 29, 2, true, priority, recordFactory, RMNodeLabelsManager.NO_LABEL)));
    assign = b.assignContainers(clusterResource, node0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    assertTrue("Still within limits, should assign", assign.getResource().getMemorySize() > 0);
}
Also used : FiCaSchedulerNode(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode) Priority(org.apache.hadoop.yarn.api.records.Priority) FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp) ResourceLimits(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits) Resource(org.apache.hadoop.yarn.api.records.Resource) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) Test(org.junit.Test)

Aggregations

ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)514 Test (org.junit.Test)362 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)222 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)170 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)109 RMApp (org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp)104 Configuration (org.apache.hadoop.conf.Configuration)87 Resource (org.apache.hadoop.yarn.api.records.Resource)82 ArrayList (java.util.ArrayList)75 NodeId (org.apache.hadoop.yarn.api.records.NodeId)74 NodeAddedSchedulerEvent (org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent)65 RMNode (org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode)63 NodeUpdateSchedulerEvent (org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent)60 Path (org.apache.hadoop.fs.Path)55 JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)53 Priority (org.apache.hadoop.yarn.api.records.Priority)52 RMContainer (org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer)51 Container (org.apache.hadoop.yarn.api.records.Container)50 FiCaSchedulerApp (org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp)49 HashMap (java.util.HashMap)42