Search in sources :

Example 1 with Priority

use of org.apache.hadoop.yarn.api.records.Priority in project hadoop by apache.

the class TestFSAppAttempt method testDelayScheduling.

@Test
public void testDelayScheduling() {
    FSLeafQueue queue = Mockito.mock(FSLeafQueue.class);
    Priority pri = Mockito.mock(Priority.class);
    SchedulerRequestKey prio = TestUtils.toSchedulerKey(pri);
    Mockito.when(pri.getPriority()).thenReturn(1);
    double nodeLocalityThreshold = .5;
    double rackLocalityThreshold = .6;
    ApplicationAttemptId applicationAttemptId = createAppAttemptId(1, 1);
    RMContext rmContext = resourceManager.getRMContext();
    FSAppAttempt schedulerApp = new FSAppAttempt(scheduler, applicationAttemptId, "user1", queue, null, rmContext);
    // Default level should be node-local
    assertEquals(NodeType.NODE_LOCAL, schedulerApp.getAllowedLocalityLevel(prio, 10, nodeLocalityThreshold, rackLocalityThreshold));
    // First five scheduling opportunities should remain node local
    for (int i = 0; i < 5; i++) {
        schedulerApp.addSchedulingOpportunity(prio);
        assertEquals(NodeType.NODE_LOCAL, schedulerApp.getAllowedLocalityLevel(prio, 10, nodeLocalityThreshold, rackLocalityThreshold));
    }
    // After five it should switch to rack local
    schedulerApp.addSchedulingOpportunity(prio);
    assertEquals(NodeType.RACK_LOCAL, schedulerApp.getAllowedLocalityLevel(prio, 10, nodeLocalityThreshold, rackLocalityThreshold));
    // Manually set back to node local
    schedulerApp.resetAllowedLocalityLevel(prio, NodeType.NODE_LOCAL);
    schedulerApp.resetSchedulingOpportunities(prio);
    assertEquals(NodeType.NODE_LOCAL, schedulerApp.getAllowedLocalityLevel(prio, 10, nodeLocalityThreshold, rackLocalityThreshold));
    // Now escalate again to rack-local, then to off-switch
    for (int i = 0; i < 5; i++) {
        schedulerApp.addSchedulingOpportunity(prio);
        assertEquals(NodeType.NODE_LOCAL, schedulerApp.getAllowedLocalityLevel(prio, 10, nodeLocalityThreshold, rackLocalityThreshold));
    }
    schedulerApp.addSchedulingOpportunity(prio);
    assertEquals(NodeType.RACK_LOCAL, schedulerApp.getAllowedLocalityLevel(prio, 10, nodeLocalityThreshold, rackLocalityThreshold));
    for (int i = 0; i < 6; i++) {
        schedulerApp.addSchedulingOpportunity(prio);
        assertEquals(NodeType.RACK_LOCAL, schedulerApp.getAllowedLocalityLevel(prio, 10, nodeLocalityThreshold, rackLocalityThreshold));
    }
    schedulerApp.addSchedulingOpportunity(prio);
    assertEquals(NodeType.OFF_SWITCH, schedulerApp.getAllowedLocalityLevel(prio, 10, nodeLocalityThreshold, rackLocalityThreshold));
}
Also used : RMContext(org.apache.hadoop.yarn.server.resourcemanager.RMContext) Priority(org.apache.hadoop.yarn.api.records.Priority) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) SchedulerRequestKey(org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey) Test(org.junit.Test)

Example 2 with Priority

use of org.apache.hadoop.yarn.api.records.Priority in project hadoop by apache.

the class TestFSAppAttempt method testLocalityLevelWithoutDelays.

@Test
public /**
   * Ensure that when negative paramaters are given (signaling delay scheduling
   * no tin use), the least restrictive locality level is returned.
   */
void testLocalityLevelWithoutDelays() {
    FSLeafQueue queue = Mockito.mock(FSLeafQueue.class);
    Priority pri = Mockito.mock(Priority.class);
    SchedulerRequestKey prio = TestUtils.toSchedulerKey(pri);
    Mockito.when(pri.getPriority()).thenReturn(1);
    RMContext rmContext = resourceManager.getRMContext();
    ApplicationAttemptId applicationAttemptId = createAppAttemptId(1, 1);
    FSAppAttempt schedulerApp = new FSAppAttempt(scheduler, applicationAttemptId, "user1", queue, null, rmContext);
    assertEquals(NodeType.OFF_SWITCH, schedulerApp.getAllowedLocalityLevel(prio, 10, -1.0, -1.0));
}
Also used : RMContext(org.apache.hadoop.yarn.server.resourcemanager.RMContext) Priority(org.apache.hadoop.yarn.api.records.Priority) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) SchedulerRequestKey(org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey) Test(org.junit.Test)

Example 3 with Priority

use of org.apache.hadoop.yarn.api.records.Priority in project hadoop by apache.

the class TestFSAppAttempt method testDelaySchedulingForContinuousScheduling.

@Test
public void testDelaySchedulingForContinuousScheduling() throws InterruptedException {
    FSLeafQueue queue = scheduler.getQueueManager().getLeafQueue("queue", true);
    Priority pri = Mockito.mock(Priority.class);
    SchedulerRequestKey prio = TestUtils.toSchedulerKey(pri);
    Mockito.when(pri.getPriority()).thenReturn(1);
    ControlledClock clock = new ControlledClock();
    scheduler.setClock(clock);
    // 5 seconds
    long nodeLocalityDelayMs = 5 * 1000L;
    // 6 seconds
    long rackLocalityDelayMs = 6 * 1000L;
    RMContext rmContext = resourceManager.getRMContext();
    ApplicationAttemptId applicationAttemptId = createAppAttemptId(1, 1);
    FSAppAttempt schedulerApp = new FSAppAttempt(scheduler, applicationAttemptId, "user1", queue, null, rmContext);
    // Default level should be node-local
    assertEquals(NodeType.NODE_LOCAL, schedulerApp.getAllowedLocalityLevelByTime(prio, nodeLocalityDelayMs, rackLocalityDelayMs, clock.getTime()));
    // after 4 seconds should remain node local
    clock.tickSec(4);
    assertEquals(NodeType.NODE_LOCAL, schedulerApp.getAllowedLocalityLevelByTime(prio, nodeLocalityDelayMs, rackLocalityDelayMs, clock.getTime()));
    // after 6 seconds should switch to rack local
    clock.tickSec(2);
    assertEquals(NodeType.RACK_LOCAL, schedulerApp.getAllowedLocalityLevelByTime(prio, nodeLocalityDelayMs, rackLocalityDelayMs, clock.getTime()));
    // manually set back to node local
    schedulerApp.resetAllowedLocalityLevel(prio, NodeType.NODE_LOCAL);
    schedulerApp.resetSchedulingOpportunities(prio, clock.getTime());
    assertEquals(NodeType.NODE_LOCAL, schedulerApp.getAllowedLocalityLevelByTime(prio, nodeLocalityDelayMs, rackLocalityDelayMs, clock.getTime()));
    // Now escalate again to rack-local, then to off-switch
    clock.tickSec(6);
    assertEquals(NodeType.RACK_LOCAL, schedulerApp.getAllowedLocalityLevelByTime(prio, nodeLocalityDelayMs, rackLocalityDelayMs, clock.getTime()));
    clock.tickSec(7);
    assertEquals(NodeType.OFF_SWITCH, schedulerApp.getAllowedLocalityLevelByTime(prio, nodeLocalityDelayMs, rackLocalityDelayMs, clock.getTime()));
}
Also used : RMContext(org.apache.hadoop.yarn.server.resourcemanager.RMContext) Priority(org.apache.hadoop.yarn.api.records.Priority) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) ControlledClock(org.apache.hadoop.yarn.util.ControlledClock) SchedulerRequestKey(org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey) Test(org.junit.Test)

Example 4 with Priority

use of org.apache.hadoop.yarn.api.records.Priority in project hadoop by apache.

the class TestChildQueueOrder method testSortedQueues.

@Test
@SuppressWarnings("unchecked")
public void testSortedQueues() throws Exception {
    // Setup queue configs
    setupSortedQueues(csConf);
    Map<String, CSQueue> queues = new HashMap<String, CSQueue>();
    CSQueue root = CapacitySchedulerQueueManager.parseQueue(csContext, csConf, null, CapacitySchedulerConfiguration.ROOT, queues, queues, TestUtils.spyHook);
    // Setup some nodes
    final int memoryPerNode = 10;
    final int coresPerNode = 16;
    final int numNodes = 1;
    FiCaSchedulerNode node_0 = TestUtils.getMockNode("host_0", DEFAULT_RACK, 0, memoryPerNode * GB);
    doNothing().when(node_0).releaseContainer(any(ContainerId.class), anyBoolean());
    final Resource clusterResource = Resources.createResource(numNodes * (memoryPerNode * GB), numNodes * coresPerNode);
    when(csContext.getNumClusterNodes()).thenReturn(numNodes);
    // Start testing
    CSQueue a = queues.get(A);
    CSQueue b = queues.get(B);
    CSQueue c = queues.get(C);
    CSQueue d = queues.get(D);
    // Make a/b/c/d has >0 pending resource, so that allocation will continue.
    queues.get(CapacitySchedulerConfiguration.ROOT).getQueueResourceUsage().incPending(Resources.createResource(1 * GB));
    a.getQueueResourceUsage().incPending(Resources.createResource(1 * GB));
    b.getQueueResourceUsage().incPending(Resources.createResource(1 * GB));
    c.getQueueResourceUsage().incPending(Resources.createResource(1 * GB));
    d.getQueueResourceUsage().incPending(Resources.createResource(1 * GB));
    final String user_0 = "user_0";
    // Stub an App and its containerCompleted
    FiCaSchedulerApp app_0 = getMockApplication(0, user_0);
    doReturn(true).when(app_0).containerCompleted(any(RMContainer.class), any(ContainerStatus.class), any(RMContainerEventType.class), any(String.class));
    Priority priority = TestUtils.createMockPriority(1);
    ContainerAllocationExpirer expirer = mock(ContainerAllocationExpirer.class);
    DrainDispatcher drainDispatcher = new DrainDispatcher();
    RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class);
    SystemMetricsPublisher publisher = mock(SystemMetricsPublisher.class);
    RMContext rmContext = mock(RMContext.class);
    when(rmContext.getContainerAllocationExpirer()).thenReturn(expirer);
    when(rmContext.getDispatcher()).thenReturn(drainDispatcher);
    when(rmContext.getRMApplicationHistoryWriter()).thenReturn(writer);
    when(rmContext.getSystemMetricsPublisher()).thenReturn(publisher);
    when(rmContext.getYarnConfiguration()).thenReturn(new YarnConfiguration());
    ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(app_0.getApplicationId(), 1);
    ContainerId containerId = BuilderUtils.newContainerId(appAttemptId, 1);
    Container container = TestUtils.getMockContainer(containerId, node_0.getNodeID(), Resources.createResource(1 * GB), priority);
    RMContainer rmContainer = new RMContainerImpl(container, SchedulerRequestKey.extractFrom(container), appAttemptId, node_0.getNodeID(), "user", rmContext);
    // Assign {1,2,3,4} 1GB containers respectively to queues
    stubQueueAllocation(a, clusterResource, node_0, 1 * GB);
    stubQueueAllocation(b, clusterResource, node_0, 0 * GB);
    stubQueueAllocation(c, clusterResource, node_0, 0 * GB);
    stubQueueAllocation(d, clusterResource, node_0, 0 * GB);
    root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    for (int i = 0; i < 2; i++) {
        stubQueueAllocation(a, clusterResource, node_0, 0 * GB);
        stubQueueAllocation(b, clusterResource, node_0, 1 * GB);
        stubQueueAllocation(c, clusterResource, node_0, 0 * GB);
        stubQueueAllocation(d, clusterResource, node_0, 0 * GB);
        root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    }
    for (int i = 0; i < 3; i++) {
        stubQueueAllocation(a, clusterResource, node_0, 0 * GB);
        stubQueueAllocation(b, clusterResource, node_0, 0 * GB);
        stubQueueAllocation(c, clusterResource, node_0, 1 * GB);
        stubQueueAllocation(d, clusterResource, node_0, 0 * GB);
        root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    }
    for (int i = 0; i < 4; i++) {
        stubQueueAllocation(a, clusterResource, node_0, 0 * GB);
        stubQueueAllocation(b, clusterResource, node_0, 0 * GB);
        stubQueueAllocation(c, clusterResource, node_0, 0 * GB);
        stubQueueAllocation(d, clusterResource, node_0, 1 * GB);
        root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    }
    verifyQueueMetrics(a, 1 * GB, clusterResource);
    verifyQueueMetrics(b, 2 * GB, clusterResource);
    verifyQueueMetrics(c, 3 * GB, clusterResource);
    verifyQueueMetrics(d, 4 * GB, clusterResource);
    LOG.info("status child-queues: " + ((ParentQueue) root).getChildQueuesToPrint());
    //Release 3 x 1GB containers from D
    for (int i = 0; i < 3; i++) {
        d.completedContainer(clusterResource, app_0, node_0, rmContainer, null, RMContainerEventType.KILL, null, true);
    }
    verifyQueueMetrics(a, 1 * GB, clusterResource);
    verifyQueueMetrics(b, 2 * GB, clusterResource);
    verifyQueueMetrics(c, 3 * GB, clusterResource);
    verifyQueueMetrics(d, 1 * GB, clusterResource);
    //reset manually resources on node
    node_0 = TestUtils.getMockNode("host_0", DEFAULT_RACK, 0, (memoryPerNode - 1 - 2 - 3 - 1) * GB);
    LOG.info("status child-queues: " + ((ParentQueue) root).getChildQueuesToPrint());
    // Assign 2 x 1GB Containers to A 
    for (int i = 0; i < 2; i++) {
        stubQueueAllocation(a, clusterResource, node_0, 1 * GB);
        stubQueueAllocation(b, clusterResource, node_0, 0 * GB);
        stubQueueAllocation(c, clusterResource, node_0, 0 * GB);
        stubQueueAllocation(d, clusterResource, node_0, 0 * GB);
        root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    }
    verifyQueueMetrics(a, 3 * GB, clusterResource);
    verifyQueueMetrics(b, 2 * GB, clusterResource);
    verifyQueueMetrics(c, 3 * GB, clusterResource);
    verifyQueueMetrics(d, 1 * GB, clusterResource);
    LOG.info("status child-queues: " + ((ParentQueue) root).getChildQueuesToPrint());
    //Release 1GB Container from A
    a.completedContainer(clusterResource, app_0, node_0, rmContainer, null, RMContainerEventType.KILL, null, true);
    verifyQueueMetrics(a, 2 * GB, clusterResource);
    verifyQueueMetrics(b, 2 * GB, clusterResource);
    verifyQueueMetrics(c, 3 * GB, clusterResource);
    verifyQueueMetrics(d, 1 * GB, clusterResource);
    //reset manually resources on node
    node_0 = TestUtils.getMockNode("host_0", DEFAULT_RACK, 0, (memoryPerNode - 2 - 2 - 3 - 1) * GB);
    LOG.info("status child-queues: " + ((ParentQueue) root).getChildQueuesToPrint());
    // Assign 1GB container to B 
    stubQueueAllocation(a, clusterResource, node_0, 0 * GB);
    stubQueueAllocation(b, clusterResource, node_0, 1 * GB);
    stubQueueAllocation(c, clusterResource, node_0, 0 * GB);
    stubQueueAllocation(d, clusterResource, node_0, 0 * GB);
    root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    verifyQueueMetrics(a, 2 * GB, clusterResource);
    verifyQueueMetrics(b, 3 * GB, clusterResource);
    verifyQueueMetrics(c, 3 * GB, clusterResource);
    verifyQueueMetrics(d, 1 * GB, clusterResource);
    LOG.info("status child-queues: " + ((ParentQueue) root).getChildQueuesToPrint());
    //Release 1GB container resources from B
    b.completedContainer(clusterResource, app_0, node_0, rmContainer, null, RMContainerEventType.KILL, null, true);
    verifyQueueMetrics(a, 2 * GB, clusterResource);
    verifyQueueMetrics(b, 2 * GB, clusterResource);
    verifyQueueMetrics(c, 3 * GB, clusterResource);
    verifyQueueMetrics(d, 1 * GB, clusterResource);
    //reset manually resources on node
    node_0 = TestUtils.getMockNode("host_0", DEFAULT_RACK, 0, (memoryPerNode - 2 - 2 - 3 - 1) * GB);
    LOG.info("status child-queues: " + ((ParentQueue) root).getChildQueuesToPrint());
    // Assign 1GB container to A
    stubQueueAllocation(a, clusterResource, node_0, 1 * GB);
    stubQueueAllocation(b, clusterResource, node_0, 0 * GB);
    stubQueueAllocation(c, clusterResource, node_0, 0 * GB);
    stubQueueAllocation(d, clusterResource, node_0, 0 * GB);
    root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    verifyQueueMetrics(a, 3 * GB, clusterResource);
    verifyQueueMetrics(b, 2 * GB, clusterResource);
    verifyQueueMetrics(c, 3 * GB, clusterResource);
    verifyQueueMetrics(d, 1 * GB, clusterResource);
    LOG.info("status child-queues: " + ((ParentQueue) root).getChildQueuesToPrint());
    // Now do the real test, where B and D request a 1GB container
    // D should should get the next container if the order is correct
    stubQueueAllocation(a, clusterResource, node_0, 0 * GB);
    stubQueueAllocation(b, clusterResource, node_0, 1 * GB);
    stubQueueAllocation(c, clusterResource, node_0, 0 * GB);
    stubQueueAllocation(d, clusterResource, node_0, 1 * GB);
    root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    InOrder allocationOrder = inOrder(d, b);
    allocationOrder.verify(d).assignContainers(eq(clusterResource), any(PlacementSet.class), any(ResourceLimits.class), any(SchedulingMode.class));
    allocationOrder.verify(b).assignContainers(eq(clusterResource), any(PlacementSet.class), any(ResourceLimits.class), any(SchedulingMode.class));
    verifyQueueMetrics(a, 3 * GB, clusterResource);
    verifyQueueMetrics(b, 2 * GB, clusterResource);
    verifyQueueMetrics(c, 3 * GB, clusterResource);
    //D got the container
    verifyQueueMetrics(d, 2 * GB, clusterResource);
    LOG.info("status child-queues: " + ((ParentQueue) root).getChildQueuesToPrint());
}
Also used : DrainDispatcher(org.apache.hadoop.yarn.event.DrainDispatcher) HashMap(java.util.HashMap) ContainerAllocationExpirer(org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer) RMContainer(org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer) ContainerStatus(org.apache.hadoop.yarn.api.records.ContainerStatus) RMContainer(org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer) Container(org.apache.hadoop.yarn.api.records.Container) RMContainerImpl(org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) SystemMetricsPublisher(org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp) PlacementSet(org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.PlacementSet) RMContext(org.apache.hadoop.yarn.server.resourcemanager.RMContext) InOrder(org.mockito.InOrder) FiCaSchedulerNode(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode) Priority(org.apache.hadoop.yarn.api.records.Priority) Resource(org.apache.hadoop.yarn.api.records.Resource) RMApplicationHistoryWriter(org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) RMContainerEventType(org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType) ResourceLimits(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits) Test(org.junit.Test)

Example 5 with Priority

use of org.apache.hadoop.yarn.api.records.Priority in project hadoop by apache.

the class TestLeafQueue method testComputeUserLimitAndSetHeadroom.

@SuppressWarnings({ "unchecked", "rawtypes" })
@Test
public void testComputeUserLimitAndSetHeadroom() throws IOException {
    LeafQueue qb = stubLeafQueue((LeafQueue) queues.get(B));
    qb.setMaxCapacity(1.0f);
    // Users
    final String user_0 = "user_0";
    final String user_1 = "user_1";
    //create nodes
    String host_0 = "127.0.0.1";
    FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 8 * GB);
    String host_1 = "127.0.0.2";
    FiCaSchedulerNode node_1 = TestUtils.getMockNode(host_1, DEFAULT_RACK, 0, 8 * GB);
    Map<NodeId, FiCaSchedulerNode> nodes = ImmutableMap.of(node_0.getNodeID(), node_0, node_1.getNodeID(), node_1);
    final int numNodes = 2;
    Resource clusterResource = Resources.createResource(numNodes * (8 * GB), 1);
    when(csContext.getNumClusterNodes()).thenReturn(numNodes);
    CapacitySchedulerQueueManager mockCapacitySchedulerQueueManager = mock(CapacitySchedulerQueueManager.class);
    QueueStateManager mockQueueStateManager = mock(QueueStateManager.class);
    when(mockCapacitySchedulerQueueManager.getQueueStateManager()).thenReturn(mockQueueStateManager);
    when(csContext.getCapacitySchedulerQueueManager()).thenReturn(mockCapacitySchedulerQueueManager);
    //our test plan contains three cases
    //1. single user dominate the queue, we test the headroom
    //2. two users, but user_0 is assigned 100% of the queue resource,
    //   submit user_1's application, check headroom correctness
    //3. two users, each is assigned 50% of the queue resource
    //   each user submit one application and check their headrooms
    //4. similarly to 3. but user_0 has no quote left and there are
    //   free resources left, check headroom
    //test case 1
    qb.setUserLimit(100);
    qb.setUserLimitFactor(1);
    final ApplicationAttemptId appAttemptId_0 = TestUtils.getMockApplicationAttemptId(0, 0);
    FiCaSchedulerApp app_0 = new FiCaSchedulerApp(appAttemptId_0, user_0, qb, qb.getAbstractUsersManager(), spyRMContext);
    Map<ApplicationAttemptId, FiCaSchedulerApp> apps = new HashMap<>();
    apps.put(app_0.getApplicationAttemptId(), app_0);
    qb.submitApplicationAttempt(app_0, user_0);
    Priority u0Priority = TestUtils.createMockPriority(1);
    SchedulerRequestKey u0SchedKey = toSchedulerKey(u0Priority);
    app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 4 * GB, 1, true, u0Priority, recordFactory)));
    assertEquals("There should only be 1 active user!", 1, qb.getAbstractUsersManager().getNumActiveUsers());
    //get headroom
    applyCSAssignment(clusterResource, qb.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), qb, nodes, apps);
    qb.computeUserLimitAndSetHeadroom(app_0, clusterResource, "", SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    //maxqueue 16G, userlimit 13G, - 4G used = 9G
    assertEquals(9 * GB, app_0.getHeadroom().getMemorySize());
    //test case 2
    final ApplicationAttemptId appAttemptId_2 = TestUtils.getMockApplicationAttemptId(2, 0);
    FiCaSchedulerApp app_2 = new FiCaSchedulerApp(appAttemptId_2, user_1, qb, qb.getAbstractUsersManager(), spyRMContext);
    apps.put(app_2.getApplicationAttemptId(), app_2);
    Priority u1Priority = TestUtils.createMockPriority(2);
    SchedulerRequestKey u1SchedKey = toSchedulerKey(u1Priority);
    app_2.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 4 * GB, 1, true, u1Priority, recordFactory)));
    qb.submitApplicationAttempt(app_2, user_1);
    applyCSAssignment(clusterResource, qb.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), qb, nodes, apps);
    qb.computeUserLimitAndSetHeadroom(app_0, clusterResource, "", SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    assertEquals(8 * GB, qb.getUsedResources().getMemorySize());
    assertEquals(4 * GB, app_0.getCurrentConsumption().getMemorySize());
    //maxqueue 16G, userlimit 13G, - 4G used = 9G BUT
    //maxqueue 16G - used 8G (4 each app/user) = 8G max headroom (the new logic)
    assertEquals(8 * GB, app_0.getHeadroom().getMemorySize());
    assertEquals(4 * GB, app_2.getCurrentConsumption().getMemorySize());
    assertEquals(8 * GB, app_2.getHeadroom().getMemorySize());
    //test case 3
    qb.finishApplication(app_0.getApplicationId(), user_0);
    qb.finishApplication(app_2.getApplicationId(), user_1);
    qb.releaseResource(clusterResource, app_0, app_0.getAppSchedulingInfo().getPendingAsk(u0SchedKey).getPerAllocationResource(), null, null);
    qb.releaseResource(clusterResource, app_2, app_2.getAppSchedulingInfo().getPendingAsk(u1SchedKey).getPerAllocationResource(), null, null);
    qb.setUserLimit(50);
    qb.setUserLimitFactor(1);
    final ApplicationAttemptId appAttemptId_1 = TestUtils.getMockApplicationAttemptId(1, 0);
    FiCaSchedulerApp app_1 = new FiCaSchedulerApp(appAttemptId_1, user_0, qb, qb.getAbstractUsersManager(), spyRMContext);
    apps.put(app_1.getApplicationAttemptId(), app_1);
    final ApplicationAttemptId appAttemptId_3 = TestUtils.getMockApplicationAttemptId(3, 0);
    FiCaSchedulerApp app_3 = new FiCaSchedulerApp(appAttemptId_3, user_1, qb, qb.getAbstractUsersManager(), spyRMContext);
    apps.put(app_3.getApplicationAttemptId(), app_3);
    app_1.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 2 * GB, 1, true, u0Priority, recordFactory)));
    app_3.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 2 * GB, 1, true, u1Priority, recordFactory)));
    qb.submitApplicationAttempt(app_1, user_0);
    qb.submitApplicationAttempt(app_3, user_1);
    applyCSAssignment(clusterResource, qb.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), qb, nodes, apps);
    applyCSAssignment(clusterResource, qb.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), qb, nodes, apps);
    qb.computeUserLimitAndSetHeadroom(app_3, clusterResource, "", SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    assertEquals(4 * GB, qb.getUsedResources().getMemorySize());
    //maxqueue 16G, userlimit 7G, used (by each user) 2G, headroom 5G (both)
    assertEquals(5 * GB, app_3.getHeadroom().getMemorySize());
    assertEquals(5 * GB, app_1.getHeadroom().getMemorySize());
    //test case 4
    final ApplicationAttemptId appAttemptId_4 = TestUtils.getMockApplicationAttemptId(4, 0);
    FiCaSchedulerApp app_4 = new FiCaSchedulerApp(appAttemptId_4, user_0, qb, qb.getAbstractUsersManager(), spyRMContext);
    apps.put(app_4.getApplicationAttemptId(), app_4);
    qb.submitApplicationAttempt(app_4, user_0);
    app_4.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 6 * GB, 1, true, u0Priority, recordFactory)));
    applyCSAssignment(clusterResource, qb.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), qb, nodes, apps);
    qb.computeUserLimitAndSetHeadroom(app_4, clusterResource, "", SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    qb.computeUserLimitAndSetHeadroom(app_3, clusterResource, "", SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    //app3 is user1, active from last test case
    //maxqueue 16G, userlimit 13G, used 2G, would be headroom 10G BUT
    //10G in use, so max possible headroom is 6G (new logic)
    assertEquals(6 * GB, app_3.getHeadroom().getMemorySize());
    //testcase3 still active - 2+2+6=10
    assertEquals(10 * GB, qb.getUsedResources().getMemorySize());
    //app4 is user 0
    //maxqueue 16G, userlimit 7G, used 8G, headroom 5G
    //(8G used is 6G from this test case - app4, 2 from last test case, app_1)
    assertEquals(0 * GB, app_4.getHeadroom().getMemorySize());
}
Also used : FiCaSchedulerNode(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) Priority(org.apache.hadoop.yarn.api.records.Priority) Resource(org.apache.hadoop.yarn.api.records.Resource) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) SchedulerRequestKey(org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey) FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp) ResourceLimits(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits) NodeId(org.apache.hadoop.yarn.api.records.NodeId) QueueStateManager(org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueStateManager) Test(org.junit.Test)

Aggregations

Priority (org.apache.hadoop.yarn.api.records.Priority)216 Test (org.junit.Test)139 Resource (org.apache.hadoop.yarn.api.records.Resource)109 Container (org.apache.hadoop.yarn.api.records.Container)64 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)62 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)49 NodeId (org.apache.hadoop.yarn.api.records.NodeId)43 FiCaSchedulerApp (org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp)40 TezTaskAttemptID (org.apache.tez.dag.records.TezTaskAttemptID)40 Configuration (org.apache.hadoop.conf.Configuration)37 FiCaSchedulerNode (org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode)34 ArrayList (java.util.ArrayList)33 ResourceLimits (org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits)31 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)30 TezConfiguration (org.apache.tez.dag.api.TezConfiguration)29 TaskSchedulerContextDrainable (org.apache.tez.dag.app.rm.TestTaskSchedulerHelpers.TaskSchedulerContextDrainable)27 ResourceRequest (org.apache.hadoop.yarn.api.records.ResourceRequest)26 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)25 RMApp (org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp)24 LocalResource (org.apache.hadoop.yarn.api.records.LocalResource)23