Search in sources :

Example 46 with ResourceLimits

use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits in project hadoop by apache.

the class TestParentQueue method testOffSwitchSchedulingMultiLevelQueues.

@Test
public void testOffSwitchSchedulingMultiLevelQueues() throws Exception {
    // Setup queue configs
    setupMultiLevelQueues(csConf);
    //B3
    Map<String, CSQueue> queues = new HashMap<String, CSQueue>();
    CSQueue root = CapacitySchedulerQueueManager.parseQueue(csContext, csConf, null, CapacitySchedulerConfiguration.ROOT, queues, queues, TestUtils.spyHook);
    // Setup some nodes
    final int memoryPerNode = 10;
    final int coresPerNode = 10;
    final int numNodes = 2;
    FiCaSchedulerNode node_0 = TestUtils.getMockNode("host_0", DEFAULT_RACK, 0, memoryPerNode * GB);
    FiCaSchedulerNode node_1 = TestUtils.getMockNode("host_1", DEFAULT_RACK, 0, memoryPerNode * GB);
    final Resource clusterResource = Resources.createResource(numNodes * (memoryPerNode * GB), numNodes * coresPerNode);
    when(csContext.getNumClusterNodes()).thenReturn(numNodes);
    // Start testing
    LeafQueue b3 = (LeafQueue) queues.get(B3);
    LeafQueue b2 = (LeafQueue) queues.get(B2);
    b2.getQueueResourceUsage().incPending(Resources.createResource(1 * GB));
    b3.getQueueResourceUsage().incPending(Resources.createResource(1 * GB));
    queues.get(CapacitySchedulerConfiguration.ROOT).getQueueResourceUsage().incPending(Resources.createResource(1 * GB));
    CSQueue b = queues.get(B);
    b.getQueueResourceUsage().incPending(Resources.createResource(1 * GB));
    // Simulate B3 returning a container on node_0
    stubQueueAllocation(b2, clusterResource, node_0, 0 * GB, NodeType.OFF_SWITCH);
    stubQueueAllocation(b3, clusterResource, node_0, 1 * GB, NodeType.OFF_SWITCH);
    root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    verifyQueueMetrics(b2, 0 * GB, clusterResource);
    verifyQueueMetrics(b3, 1 * GB, clusterResource);
    // Now, B2 should get the scheduling opportunity since B2=0G/2G, B3=1G/7G
    // also, B3 gets a scheduling opportunity since B2 allocates RACK_LOCAL
    stubQueueAllocation(b2, clusterResource, node_1, 1 * GB, NodeType.RACK_LOCAL);
    stubQueueAllocation(b3, clusterResource, node_1, 1 * GB, NodeType.OFF_SWITCH);
    root.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    root.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    InOrder allocationOrder = inOrder(b2, b3);
    allocationOrder.verify(b2).assignContainers(eq(clusterResource), any(PlacementSet.class), anyResourceLimits(), any(SchedulingMode.class));
    allocationOrder.verify(b3).assignContainers(eq(clusterResource), any(PlacementSet.class), anyResourceLimits(), any(SchedulingMode.class));
    verifyQueueMetrics(b2, 1 * GB, clusterResource);
    verifyQueueMetrics(b3, 2 * GB, clusterResource);
    // Now, B3 should get the scheduling opportunity 
    // since B2 has 1/2G while B3 has 2/7G, 
    // However, since B3 returns off-switch, B2 won't get an opportunity
    stubQueueAllocation(b2, clusterResource, node_0, 1 * GB, NodeType.NODE_LOCAL);
    stubQueueAllocation(b3, clusterResource, node_0, 1 * GB, NodeType.OFF_SWITCH);
    root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    allocationOrder = inOrder(b3, b2);
    allocationOrder.verify(b3).assignContainers(eq(clusterResource), any(PlacementSet.class), anyResourceLimits(), any(SchedulingMode.class));
    allocationOrder.verify(b2).assignContainers(eq(clusterResource), any(PlacementSet.class), anyResourceLimits(), any(SchedulingMode.class));
    verifyQueueMetrics(b2, 1 * GB, clusterResource);
    verifyQueueMetrics(b3, 3 * GB, clusterResource);
}
Also used : InOrder(org.mockito.InOrder) FiCaSchedulerNode(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode) HashMap(java.util.HashMap) ResourceLimits(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits) Resource(org.apache.hadoop.yarn.api.records.Resource) PlacementSet(org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.PlacementSet) Test(org.junit.Test)

Example 47 with ResourceLimits

use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits in project hadoop by apache.

the class TestLeafQueue method testGetTotalPendingResourcesConsideringUserLimitOneUser.

@Test
public void testGetTotalPendingResourcesConsideringUserLimitOneUser() throws Exception {
    // Manipulate queue 'e'
    LeafQueue e = stubLeafQueue((LeafQueue) queues.get(E));
    // Allow queue 'e' to use 100% of cluster resources (max capacity).
    e.setMaxCapacity(1.0f);
    // When used queue resources goes above capacity (in this case, 1%), user
    // resource limit (used in calculating headroom) is calculated in small
    // increments to ensure that user-limit-percent can be met for all users in
    // a queue. Take user-limit-percent out of the equation so that user
    // resource limit will always be calculated to its max possible value.
    e.setUserLimit(1000);
    final String user_0 = "user_0";
    // Submit 2 applications for user_0
    final ApplicationAttemptId appAttemptId_0 = TestUtils.getMockApplicationAttemptId(0, 0);
    FiCaSchedulerApp app_0 = new FiCaSchedulerApp(appAttemptId_0, user_0, e, mock(ActiveUsersManager.class), spyRMContext);
    e.submitApplicationAttempt(app_0, user_0);
    final ApplicationAttemptId appAttemptId_1 = TestUtils.getMockApplicationAttemptId(1, 0);
    FiCaSchedulerApp app_1 = new FiCaSchedulerApp(appAttemptId_1, user_0, e, mock(ActiveUsersManager.class), spyRMContext);
    // same user
    e.submitApplicationAttempt(app_1, user_0);
    // Setup 1 node with 100GB of memory resources.
    String host_0 = "127.0.0.1";
    FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 100 * GB);
    Map<ApplicationAttemptId, FiCaSchedulerApp> apps = ImmutableMap.of(app_0.getApplicationAttemptId(), app_0, app_1.getApplicationAttemptId(), app_1);
    Map<NodeId, FiCaSchedulerNode> nodes = ImmutableMap.of(node_0.getNodeID(), node_0);
    final int numNodes = 1;
    Resource clusterResource = Resources.createResource(numNodes * (100 * GB), numNodes * 128);
    when(csContext.getNumClusterNodes()).thenReturn(numNodes);
    // Pending resource requests for app_0 and app_1 total 5GB.
    Priority priority = TestUtils.createMockPriority(1);
    app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 3, true, priority, recordFactory)));
    app_1.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 2, true, priority, recordFactory)));
    // Start testing...
    // Assign 1st Container of 1GB
    applyCSAssignment(clusterResource, e.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), e, nodes, apps);
    // With queue capacity set at 1% of 100GB and user-limit-factor set to 1.0,
    // all users (only user_0) queue 'e' should be able to consume 1GB.
    // The first container should be assigned to app_0 with no headroom left
    // even though user_0's apps are still asking for a total of 4GB.
    assertEquals(1 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_1.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, e.getTotalPendingResourcesConsideringUserLimit(clusterResource, RMNodeLabelsManager.NO_LABEL, false).getMemorySize());
    // Assign 2nd container of 1GB
    applyCSAssignment(clusterResource, e.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), e, nodes, apps);
    // user_0 has no headroom due to user-limit-factor of 1.0. However capacity
    // scheduler will assign one container more than user-limit-factor.
    // This container also went to app_0. Still with no neadroom even though
    // app_0 and app_1 are asking for a cumulative 3GB.
    assertEquals(2 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_1.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, e.getTotalPendingResourcesConsideringUserLimit(clusterResource, RMNodeLabelsManager.NO_LABEL, false).getMemorySize());
    // Can't allocate 3rd container due to user-limit. Headroom still 0.
    applyCSAssignment(clusterResource, e.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), e, nodes, apps);
    assertEquals(2 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_1.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, e.getTotalPendingResourcesConsideringUserLimit(clusterResource, RMNodeLabelsManager.NO_LABEL, false).getMemorySize());
    // Increase user-limit-factor from 1GB to 10GB (1% * 10 * 100GB = 10GB).
    // Pending for both app_0 and app_1 are still 3GB, so user-limit-factor
    // is no longer limiting the return value of
    // getTotalPendingResourcesConsideringUserLimit()
    e.setUserLimitFactor(10.0f);
    assertEquals(3 * GB, e.getTotalPendingResourcesConsideringUserLimit(clusterResource, RMNodeLabelsManager.NO_LABEL, false).getMemorySize());
    applyCSAssignment(clusterResource, e.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), e, nodes, apps);
    // app_0 is now satisified, app_1 is still asking for 2GB.
    assertEquals(3 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_1.getCurrentConsumption().getMemorySize());
    assertEquals(2 * GB, e.getTotalPendingResourcesConsideringUserLimit(clusterResource, RMNodeLabelsManager.NO_LABEL, false).getMemorySize());
    // Get the last 2 containers for app_1, no more pending requests.
    applyCSAssignment(clusterResource, e.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), e, nodes, apps);
    applyCSAssignment(clusterResource, e.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), e, nodes, apps);
    assertEquals(3 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(2 * GB, app_1.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, e.getTotalPendingResourcesConsideringUserLimit(clusterResource, RMNodeLabelsManager.NO_LABEL, false).getMemorySize());
    // Release each container from app_0
    for (RMContainer rmContainer : app_0.getLiveContainers()) {
        e.completedContainer(clusterResource, app_0, node_0, rmContainer, ContainerStatus.newInstance(rmContainer.getContainerId(), ContainerState.COMPLETE, "", ContainerExitStatus.KILLED_BY_RESOURCEMANAGER), RMContainerEventType.KILL, null, true);
    }
    // Release each container from app_1
    for (RMContainer rmContainer : app_1.getLiveContainers()) {
        e.completedContainer(clusterResource, app_1, node_0, rmContainer, ContainerStatus.newInstance(rmContainer.getContainerId(), ContainerState.COMPLETE, "", ContainerExitStatus.KILLED_BY_RESOURCEMANAGER), RMContainerEventType.KILL, null, true);
    }
}
Also used : FiCaSchedulerNode(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode) Priority(org.apache.hadoop.yarn.api.records.Priority) Resource(org.apache.hadoop.yarn.api.records.Resource) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) RMContainer(org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer) FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp) ResourceLimits(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits) NodeId(org.apache.hadoop.yarn.api.records.NodeId) ActiveUsersManager(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager) Test(org.junit.Test)

Example 48 with ResourceLimits

use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits in project hadoop by apache.

the class TestLeafQueue method testFairAssignment.

@Test
public void testFairAssignment() throws Exception {
    LeafQueue a = stubLeafQueue((LeafQueue) queues.get(A));
    OrderingPolicy<FiCaSchedulerApp> schedulingOrder = new FairOrderingPolicy<FiCaSchedulerApp>();
    a.setOrderingPolicy(schedulingOrder);
    String host_0_0 = "127.0.0.1";
    String rack_0 = "rack_0";
    FiCaSchedulerNode node_0_0 = TestUtils.getMockNode(host_0_0, rack_0, 0, 16 * GB);
    final int numNodes = 4;
    Resource clusterResource = Resources.createResource(numNodes * (16 * GB), numNodes * 16);
    when(csContext.getNumClusterNodes()).thenReturn(numNodes);
    String user_0 = "user_0";
    final ApplicationAttemptId appAttemptId_0 = TestUtils.getMockApplicationAttemptId(0, 0);
    FiCaSchedulerApp app_0 = spy(new FiCaSchedulerApp(appAttemptId_0, user_0, a, mock(ActiveUsersManager.class), spyRMContext));
    a.submitApplicationAttempt(app_0, user_0);
    final ApplicationAttemptId appAttemptId_1 = TestUtils.getMockApplicationAttemptId(1, 0);
    FiCaSchedulerApp app_1 = spy(new FiCaSchedulerApp(appAttemptId_1, user_0, a, mock(ActiveUsersManager.class), spyRMContext));
    a.submitApplicationAttempt(app_1, user_0);
    Map<ApplicationAttemptId, FiCaSchedulerApp> apps = ImmutableMap.of(app_0.getApplicationAttemptId(), app_0, app_1.getApplicationAttemptId(), app_1);
    Map<NodeId, FiCaSchedulerNode> nodes = ImmutableMap.of(node_0_0.getNodeID(), node_0_0);
    Priority priority = TestUtils.createMockPriority(1);
    List<ResourceRequest> app_0_requests_0 = new ArrayList<ResourceRequest>();
    List<ResourceRequest> app_1_requests_0 = new ArrayList<ResourceRequest>();
    app_0_requests_0.clear();
    app_0_requests_0.add(TestUtils.createResourceRequest(ResourceRequest.ANY, 2 * GB, 1, true, priority, recordFactory));
    app_0.updateResourceRequests(app_0_requests_0);
    app_1_requests_0.clear();
    app_1_requests_0.add(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 1, true, priority, recordFactory));
    app_1.updateResourceRequests(app_1_requests_0);
    // app_0 will get containers as its submitted first.
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_0_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    Assert.assertEquals(2 * GB, app_0.getCurrentConsumption().getMemorySize());
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_0_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    Assert.assertEquals(1 * GB, app_1.getCurrentConsumption().getMemorySize());
    app_0_requests_0.clear();
    app_0_requests_0.add(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 1, true, priority, recordFactory));
    app_0.updateResourceRequests(app_0_requests_0);
    app_1_requests_0.clear();
    app_1_requests_0.add(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 1, true, priority, recordFactory));
    app_1.updateResourceRequests(app_1_requests_0);
    //Since it already has more resources, app_0 will not get
    //assigned first, but app_1 will
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_0_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    Assert.assertEquals(2 * GB, app_0.getCurrentConsumption().getMemorySize());
    Assert.assertEquals(2 * GB, app_1.getCurrentConsumption().getMemorySize());
    //and only then will app_0
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_0_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    Assert.assertEquals(3 * GB, app_0.getCurrentConsumption().getMemorySize());
}
Also used : FiCaSchedulerNode(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode) FairOrderingPolicy(org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.FairOrderingPolicy) Priority(org.apache.hadoop.yarn.api.records.Priority) Resource(org.apache.hadoop.yarn.api.records.Resource) ArrayList(java.util.ArrayList) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp) ResourceLimits(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits) NodeId(org.apache.hadoop.yarn.api.records.NodeId) ResourceRequest(org.apache.hadoop.yarn.api.records.ResourceRequest) Test(org.junit.Test)

Example 49 with ResourceLimits

use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits in project hadoop by apache.

the class TestLeafQueue method testFifoAssignment.

@Test
public void testFifoAssignment() throws Exception {
    LeafQueue a = stubLeafQueue((LeafQueue) queues.get(A));
    a.setOrderingPolicy(new FifoOrderingPolicy<FiCaSchedulerApp>());
    String host_0_0 = "127.0.0.1";
    String rack_0 = "rack_0";
    FiCaSchedulerNode node_0_0 = TestUtils.getMockNode(host_0_0, rack_0, 0, 16 * GB);
    final int numNodes = 4;
    Resource clusterResource = Resources.createResource(numNodes * (16 * GB), numNodes * 16);
    when(csContext.getNumClusterNodes()).thenReturn(numNodes);
    String user_0 = "user_0";
    final ApplicationAttemptId appAttemptId_0 = TestUtils.getMockApplicationAttemptId(0, 0);
    FiCaSchedulerApp app_0 = spy(new FiCaSchedulerApp(appAttemptId_0, user_0, a, mock(ActiveUsersManager.class), spyRMContext, Priority.newInstance(3), false));
    a.submitApplicationAttempt(app_0, user_0);
    final ApplicationAttemptId appAttemptId_1 = TestUtils.getMockApplicationAttemptId(1, 0);
    FiCaSchedulerApp app_1 = spy(new FiCaSchedulerApp(appAttemptId_1, user_0, a, mock(ActiveUsersManager.class), spyRMContext, Priority.newInstance(5), false));
    a.submitApplicationAttempt(app_1, user_0);
    Map<ApplicationAttemptId, FiCaSchedulerApp> apps = ImmutableMap.of(app_0.getApplicationAttemptId(), app_0, app_1.getApplicationAttemptId(), app_1);
    Map<NodeId, FiCaSchedulerNode> nodes = ImmutableMap.of(node_0_0.getNodeID(), node_0_0);
    Priority priority = TestUtils.createMockPriority(1);
    List<ResourceRequest> app_0_requests_0 = new ArrayList<ResourceRequest>();
    List<ResourceRequest> app_1_requests_0 = new ArrayList<ResourceRequest>();
    app_0_requests_0.clear();
    app_0_requests_0.add(TestUtils.createResourceRequest(ResourceRequest.ANY, 2 * GB, 1, true, priority, recordFactory));
    app_0.updateResourceRequests(app_0_requests_0);
    app_1_requests_0.clear();
    app_1_requests_0.add(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 1, true, priority, recordFactory));
    app_1.updateResourceRequests(app_1_requests_0);
    // app_1 will get containers as it has high priority
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_0_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    Assert.assertEquals(1 * GB, app_1.getCurrentConsumption().getMemorySize());
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_0_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    Assert.assertEquals(2 * GB, app_0.getCurrentConsumption().getMemorySize());
    app_0_requests_0.clear();
    app_0_requests_0.add(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 1, true, priority, recordFactory));
    app_0.updateResourceRequests(app_0_requests_0);
    app_1_requests_0.clear();
    app_1_requests_0.add(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 1, true, priority, recordFactory));
    app_1.updateResourceRequests(app_1_requests_0);
    //app_1 will still get assigned first as priority is more.
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_0_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    Assert.assertEquals(2 * GB, app_1.getCurrentConsumption().getMemorySize());
    Assert.assertEquals(2 * GB, app_0.getCurrentConsumption().getMemorySize());
    //and only then will app_2
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_0_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    Assert.assertEquals(3 * GB, app_0.getCurrentConsumption().getMemorySize());
}
Also used : FiCaSchedulerNode(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode) Priority(org.apache.hadoop.yarn.api.records.Priority) Resource(org.apache.hadoop.yarn.api.records.Resource) ArrayList(java.util.ArrayList) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp) ResourceLimits(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits) NodeId(org.apache.hadoop.yarn.api.records.NodeId) ResourceRequest(org.apache.hadoop.yarn.api.records.ResourceRequest) Test(org.junit.Test)

Example 50 with ResourceLimits

use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits in project hadoop by apache.

the class TestLeafQueue method testMaxAMResourcePerQueuePercentAfterQueueRefresh.

@Test
public void testMaxAMResourcePerQueuePercentAfterQueueRefresh() throws Exception {
    CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration();
    Resource clusterResource = Resources.createResource(100 * 16 * GB, 100 * 32);
    CapacitySchedulerContext csContext = mockCSContext(csConf, clusterResource);
    when(csContext.getRMContext()).thenReturn(rmContext);
    csConf.setFloat(CapacitySchedulerConfiguration.MAXIMUM_APPLICATION_MASTERS_RESOURCE_PERCENT, 0.1f);
    ParentQueue root = new ParentQueue(csContext, CapacitySchedulerConfiguration.ROOT, null, null);
    csConf.setCapacity(CapacitySchedulerConfiguration.ROOT + "." + A, 80);
    LeafQueue a = new LeafQueue(csContext, A, root, null);
    assertEquals(0.1f, a.getMaxAMResourcePerQueuePercent(), 1e-3f);
    assertEquals(a.calculateAndGetAMResourceLimit(), Resources.createResource(160 * GB, 1));
    csConf.setFloat(CapacitySchedulerConfiguration.MAXIMUM_APPLICATION_MASTERS_RESOURCE_PERCENT, 0.2f);
    LeafQueue newA = new LeafQueue(csContext, A, root, null);
    a.reinitialize(newA, clusterResource);
    assertEquals(0.2f, a.getMaxAMResourcePerQueuePercent(), 1e-3f);
    assertEquals(a.calculateAndGetAMResourceLimit(), Resources.createResource(320 * GB, 1));
    Resource newClusterResource = Resources.createResource(100 * 20 * GB, 100 * 32);
    a.updateClusterResource(newClusterResource, new ResourceLimits(newClusterResource));
    //  100 * 20 * 0.2 = 400
    assertEquals(a.calculateAndGetAMResourceLimit(), Resources.createResource(400 * GB, 1));
}
Also used : ResourceLimits(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits) Resource(org.apache.hadoop.yarn.api.records.Resource) Test(org.junit.Test)

Aggregations

ResourceLimits (org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits)52 Resource (org.apache.hadoop.yarn.api.records.Resource)48 Test (org.junit.Test)40 FiCaSchedulerNode (org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode)37 FiCaSchedulerApp (org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp)34 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)32 Priority (org.apache.hadoop.yarn.api.records.Priority)31 NodeId (org.apache.hadoop.yarn.api.records.NodeId)29 ActiveUsersManager (org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager)20 HashMap (java.util.HashMap)10 RMContainer (org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer)10 ArrayList (java.util.ArrayList)9 ResourceRequest (org.apache.hadoop.yarn.api.records.ResourceRequest)9 RMApp (org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp)9 AMState (org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt.AMState)7 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)6 PlacementSet (org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.PlacementSet)6 SchedulerRequestKey (org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey)6 InOrder (org.mockito.InOrder)5 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)4