Search in sources :

Example 26 with FiCaSchedulerApp

use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp in project hadoop by apache.

the class TestLeafQueue method testSingleQueueWithMultipleUsers.

@Test
public void testSingleQueueWithMultipleUsers() throws Exception {
    // Mock the queue
    LeafQueue a = stubLeafQueue((LeafQueue) queues.get(A));
    //unset maxCapacity
    a.setMaxCapacity(1.0f);
    // Users
    final String user_0 = "user_0";
    final String user_1 = "user_1";
    final String user_2 = "user_2";
    // Submit applications
    final ApplicationAttemptId appAttemptId_0 = TestUtils.getMockApplicationAttemptId(0, 0);
    FiCaSchedulerApp app_0 = new FiCaSchedulerApp(appAttemptId_0, user_0, a, a.getAbstractUsersManager(), spyRMContext);
    a.submitApplicationAttempt(app_0, user_0);
    final ApplicationAttemptId appAttemptId_1 = TestUtils.getMockApplicationAttemptId(1, 0);
    FiCaSchedulerApp app_1 = new FiCaSchedulerApp(appAttemptId_1, user_0, a, a.getAbstractUsersManager(), spyRMContext);
    // same user
    a.submitApplicationAttempt(app_1, user_0);
    final ApplicationAttemptId appAttemptId_2 = TestUtils.getMockApplicationAttemptId(2, 0);
    FiCaSchedulerApp app_2 = new FiCaSchedulerApp(appAttemptId_2, user_1, a, a.getAbstractUsersManager(), spyRMContext);
    a.submitApplicationAttempt(app_2, user_1);
    final ApplicationAttemptId appAttemptId_3 = TestUtils.getMockApplicationAttemptId(3, 0);
    FiCaSchedulerApp app_3 = new FiCaSchedulerApp(appAttemptId_3, user_2, a, a.getAbstractUsersManager(), spyRMContext);
    a.submitApplicationAttempt(app_3, user_2);
    Map<ApplicationAttemptId, FiCaSchedulerApp> apps = ImmutableMap.of(app_0.getApplicationAttemptId(), app_0, app_1.getApplicationAttemptId(), app_1, app_2.getApplicationAttemptId(), app_2, app_3.getApplicationAttemptId(), app_3);
    // Setup some nodes
    String host_0 = "127.0.0.1";
    FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 8 * GB);
    Map<NodeId, FiCaSchedulerNode> nodes = ImmutableMap.of(node_0.getNodeID(), node_0);
    final int numNodes = 1;
    Resource clusterResource = Resources.createResource(numNodes * (8 * GB), numNodes * 16);
    when(csContext.getNumClusterNodes()).thenReturn(numNodes);
    when(csContext.getClusterResource()).thenReturn(clusterResource);
    // Setup resource-requests
    Priority priority = TestUtils.createMockPriority(1);
    app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 10, true, priority, recordFactory)));
    app_1.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 10, true, priority, recordFactory)));
    /** 
     * Start testing... 
     */
    // Only 1 container
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    assertEquals(1 * GB, a.getUsedResources().getMemorySize());
    assertEquals(1 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_1.getCurrentConsumption().getMemorySize());
    // Also 2nd -> minCapacity = 1024 since (.1 * 8G) < minAlloc, also
    // you can get one container more than user-limit
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    assertEquals(2 * GB, a.getUsedResources().getMemorySize());
    assertEquals(2 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_1.getCurrentConsumption().getMemorySize());
    // Can't allocate 3rd due to user-limit
    a.setUserLimit(25);
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    assertEquals(2 * GB, a.getUsedResources().getMemorySize());
    assertEquals(2 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_1.getCurrentConsumption().getMemorySize());
    // Submit resource requests for other apps now to 'activate' them
    app_2.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 3 * GB, 1, true, priority, recordFactory)));
    app_3.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 2, true, priority, recordFactory)));
    // Now allocations should goto app_2 since 
    // user_0 is at limit inspite of high user-limit-factor
    a.setUserLimitFactor(10);
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    assertEquals(5 * GB, a.getUsedResources().getMemorySize());
    assertEquals(2 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_1.getCurrentConsumption().getMemorySize());
    assertEquals(3 * GB, app_2.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_3.getCurrentConsumption().getMemorySize());
    // Now allocations should goto app_0 since 
    // user_0 is at user-limit not above it
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    assertEquals(6 * GB, a.getUsedResources().getMemorySize());
    assertEquals(3 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_1.getCurrentConsumption().getMemorySize());
    assertEquals(3 * GB, app_2.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_3.getCurrentConsumption().getMemorySize());
    // Test max-capacity
    // Now - no more allocs since we are at max-cap
    a.setMaxCapacity(0.5f);
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    assertEquals(6 * GB, a.getUsedResources().getMemorySize());
    assertEquals(3 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_1.getCurrentConsumption().getMemorySize());
    assertEquals(3 * GB, app_2.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_3.getCurrentConsumption().getMemorySize());
    // Revert max-capacity and user-limit-factor
    // Now, allocations should goto app_3 since it's under user-limit 
    a.setMaxCapacity(1.0f);
    a.setUserLimitFactor(1);
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    assertEquals(7 * GB, a.getUsedResources().getMemorySize());
    assertEquals(3 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_1.getCurrentConsumption().getMemorySize());
    assertEquals(3 * GB, app_2.getCurrentConsumption().getMemorySize());
    assertEquals(1 * GB, app_3.getCurrentConsumption().getMemorySize());
    // Now we should assign to app_3 again since user_2 is under user-limit
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    assertEquals(8 * GB, a.getUsedResources().getMemorySize());
    assertEquals(3 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_1.getCurrentConsumption().getMemorySize());
    assertEquals(3 * GB, app_2.getCurrentConsumption().getMemorySize());
    assertEquals(2 * GB, app_3.getCurrentConsumption().getMemorySize());
    // 8. Release each container from app_0
    for (RMContainer rmContainer : app_0.getLiveContainers()) {
        a.completedContainer(clusterResource, app_0, node_0, rmContainer, ContainerStatus.newInstance(rmContainer.getContainerId(), ContainerState.COMPLETE, "", ContainerExitStatus.KILLED_BY_RESOURCEMANAGER), RMContainerEventType.KILL, null, true);
    }
    assertEquals(5 * GB, a.getUsedResources().getMemorySize());
    assertEquals(0 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_1.getCurrentConsumption().getMemorySize());
    assertEquals(3 * GB, app_2.getCurrentConsumption().getMemorySize());
    assertEquals(2 * GB, app_3.getCurrentConsumption().getMemorySize());
    // 9. Release each container from app_2
    for (RMContainer rmContainer : app_2.getLiveContainers()) {
        a.completedContainer(clusterResource, app_2, node_0, rmContainer, ContainerStatus.newInstance(rmContainer.getContainerId(), ContainerState.COMPLETE, "", ContainerExitStatus.KILLED_BY_RESOURCEMANAGER), RMContainerEventType.KILL, null, true);
    }
    assertEquals(2 * GB, a.getUsedResources().getMemorySize());
    assertEquals(0 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_1.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_2.getCurrentConsumption().getMemorySize());
    assertEquals(2 * GB, app_3.getCurrentConsumption().getMemorySize());
    // 10. Release each container from app_3
    for (RMContainer rmContainer : app_3.getLiveContainers()) {
        a.completedContainer(clusterResource, app_3, node_0, rmContainer, ContainerStatus.newInstance(rmContainer.getContainerId(), ContainerState.COMPLETE, "", ContainerExitStatus.KILLED_BY_RESOURCEMANAGER), RMContainerEventType.KILL, null, true);
    }
    assertEquals(0 * GB, a.getUsedResources().getMemorySize());
    assertEquals(0 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_1.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_2.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_3.getCurrentConsumption().getMemorySize());
}
Also used : FiCaSchedulerNode(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode) Priority(org.apache.hadoop.yarn.api.records.Priority) Resource(org.apache.hadoop.yarn.api.records.Resource) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) RMContainer(org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer) FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp) ResourceLimits(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits) NodeId(org.apache.hadoop.yarn.api.records.NodeId) Test(org.junit.Test)

Example 27 with FiCaSchedulerApp

use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp in project hadoop by apache.

the class TestCapacitySchedulerLazyPreemption method testPreemptionConsidersNodeLocalityDelay.

@Test(timeout = 60000)
public void testPreemptionConsidersNodeLocalityDelay() throws Exception {
    /**
     * Test case: same as testSimplePreemption steps 1-3.
     *
     * Step 4: app2 asks for 1G container with locality specified, so it needs
     * to wait for missed-opportunity before get scheduled.
     * Check if system waits missed-opportunity before finish killable container
     */
    MockRM rm1 = new MockRM(conf);
    rm1.getRMContext().setNodeLabelManager(mgr);
    rm1.start();
    MockNM nm1 = rm1.registerNode("h1:1234", 4 * GB);
    MockNM nm2 = rm1.registerNode("h2:1234", 4 * GB);
    CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
    RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId());
    RMNode rmNode2 = rm1.getRMContext().getRMNodes().get(nm2.getNodeId());
    // launch an app to queue, AM container should be launched in nm1
    RMApp app1 = rm1.submitApp(1 * GB, "app", "user", null, "a");
    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
    am1.allocate("*", 1 * GB, 6, new ArrayList<ContainerId>());
    // Do allocation 3 times for node1/node2
    for (int i = 0; i < 3; i++) {
        cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
        cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
    }
    // App1 should have 7 containers now, and no available resource for cluster
    FiCaSchedulerApp schedulerApp1 = cs.getApplicationAttempt(am1.getApplicationAttemptId());
    Assert.assertEquals(7, schedulerApp1.getLiveContainers().size());
    // Submit app2 to queue-c and asks for a 1G container for AM
    RMApp app2 = rm1.submitApp(1 * GB, "app", "user", null, "c");
    MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm2);
    // NM1/NM2 has available resource = 0G
    Assert.assertEquals(0 * GB, cs.getNode(nm1.getNodeId()).getUnallocatedResource().getMemorySize());
    Assert.assertEquals(0 * GB, cs.getNode(nm2.getNodeId()).getUnallocatedResource().getMemorySize());
    // AM asks for a 1 * GB container with unknown host and unknown rack
    am2.allocate(Arrays.asList(ResourceRequest.newInstance(Priority.newInstance(1), ResourceRequest.ANY, Resources.createResource(1 * GB), 1), ResourceRequest.newInstance(Priority.newInstance(1), "unknownhost", Resources.createResource(1 * GB), 1), ResourceRequest.newInstance(Priority.newInstance(1), "/default-rack", Resources.createResource(1 * GB), 1)), null);
    // Get edit policy and do one update
    SchedulingEditPolicy editPolicy = getSchedulingEditPolicy(rm1);
    // Call edit schedule twice, and check if one container from app1 marked
    // to be "killable"
    editPolicy.editSchedule();
    editPolicy.editSchedule();
    PreemptionManager pm = cs.getPreemptionManager();
    Map<ContainerId, RMContainer> killableContainers = waitKillableContainersSize(pm, "a", RMNodeLabelsManager.NO_LABEL, 1);
    Assert.assertEquals(killableContainers.entrySet().iterator().next().getKey().getApplicationAttemptId(), am1.getApplicationAttemptId());
    // Call CS.handle once to see if container preempted
    cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
    FiCaSchedulerApp schedulerApp2 = cs.getApplicationAttempt(am2.getApplicationAttemptId());
    // App1 has 7 containers, and app2 has 1 containers (no container preempted)
    Assert.assertEquals(7, schedulerApp1.getLiveContainers().size());
    Assert.assertEquals(1, schedulerApp2.getLiveContainers().size());
    // Do allocation again, one container will be preempted
    cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
    // App1 has 6 containers, and app2 has 2 containers (new container allocated)
    Assert.assertEquals(6, schedulerApp1.getLiveContainers().size());
    Assert.assertEquals(2, schedulerApp2.getLiveContainers().size());
    rm1.close();
}
Also used : RMApp(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp) NodeUpdateSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent) MockNM(org.apache.hadoop.yarn.server.resourcemanager.MockNM) SchedulingEditPolicy(org.apache.hadoop.yarn.server.resourcemanager.monitor.SchedulingEditPolicy) MockRM(org.apache.hadoop.yarn.server.resourcemanager.MockRM) RMContainer(org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer) RMNode(org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp) MockAM(org.apache.hadoop.yarn.server.resourcemanager.MockAM) PreemptionManager(org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.PreemptionManager) Test(org.junit.Test)

Example 28 with FiCaSchedulerApp

use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp in project hadoop by apache.

the class TestCapacitySchedulerLazyPreemption method testPreemptionPolicyShouldRespectAlreadyMarkedKillableContainers.

@Test(timeout = 60000)
public void testPreemptionPolicyShouldRespectAlreadyMarkedKillableContainers() throws Exception {
    /**
     * Test case:
     * <pre>
     *             Root
     *            /  |  \
     *           a   b   c
     *          10   20  70
     * </pre>
     * Submit applications to two queues, one uses more than the other, so
     * preemption will happen.
     *
     * Check:
     * 1) Killable containers resources will be excluded from PCPP (no duplicated
     *    container added to killable list)
     * 2) When more resources need to be preempted, new containers will be selected
     *    and killable containers will be considered
     */
    MockRM rm1 = new MockRM(conf);
    rm1.getRMContext().setNodeLabelManager(mgr);
    rm1.start();
    MockNM nm1 = rm1.registerNode("h1:1234", 8 * GB);
    CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
    RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId());
    // launch an app to queue, AM container should be launched in nm1
    RMApp app1 = rm1.submitApp(1 * GB, "app", "user", null, "a");
    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
    am1.allocate("*", 1 * GB, 6, new ArrayList<ContainerId>());
    // Do allocation 6 times for node1
    for (int i = 0; i < 6; i++) {
        cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
    }
    // App1 should have 7 containers now, and no available resource for cluster
    FiCaSchedulerApp schedulerApp1 = cs.getApplicationAttempt(am1.getApplicationAttemptId());
    Assert.assertEquals(7, schedulerApp1.getLiveContainers().size());
    // Submit app2 to queue-c and asks for a 1G container for AM
    RMApp app2 = rm1.submitApp(1 * GB, "app", "user", null, "c");
    MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm1);
    // NM1 has available resource = 0G
    Assert.assertEquals(0 * GB, cs.getNode(nm1.getNodeId()).getUnallocatedResource().getMemorySize());
    am2.allocate("*", 1 * GB, 1, new ArrayList<ContainerId>());
    // Get edit policy and do one update
    ProportionalCapacityPreemptionPolicy editPolicy = (ProportionalCapacityPreemptionPolicy) getSchedulingEditPolicy(rm1);
    // Call edit schedule twice, and check if one container from app1 marked
    // to be "killable"
    editPolicy.editSchedule();
    editPolicy.editSchedule();
    PreemptionManager pm = cs.getPreemptionManager();
    waitKillableContainersSize(pm, "a", RMNodeLabelsManager.NO_LABEL, 1);
    // Check killable containers and to-be-preempted containers in edit policy
    Assert.assertEquals(0, editPolicy.getToPreemptContainers().size());
    // Run edit schedule again, confirm status doesn't changed
    editPolicy.editSchedule();
    Assert.assertEquals(0, editPolicy.getToPreemptContainers().size());
    // Save current to kill containers
    Set<ContainerId> previousKillableContainers = new HashSet<>(pm.getKillableContainersMap("a", RMNodeLabelsManager.NO_LABEL).keySet());
    // Update request resource of c from 1 to 2, so we need to preempt
    // one more container
    am2.allocate("*", 1 * GB, 2, new ArrayList<ContainerId>());
    // Call editPolicy.editSchedule() once, we should have 1 container in to-preempt map
    // and 1 container in killable map
    editPolicy.editSchedule();
    Assert.assertEquals(1, editPolicy.getToPreemptContainers().size());
    // Call editPolicy.editSchedule() once more, we should have 2 containers killable map
    editPolicy.editSchedule();
    Assert.assertEquals(0, editPolicy.getToPreemptContainers().size());
    // Check if previous killable containers included by new killable containers
    Map<ContainerId, RMContainer> killableContainers = waitKillableContainersSize(pm, "a", RMNodeLabelsManager.NO_LABEL, 2);
    Assert.assertTrue(Sets.difference(previousKillableContainers, killableContainers.keySet()).isEmpty());
}
Also used : RMApp(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp) NodeUpdateSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent) MockNM(org.apache.hadoop.yarn.server.resourcemanager.MockNM) MockRM(org.apache.hadoop.yarn.server.resourcemanager.MockRM) ProportionalCapacityPreemptionPolicy(org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy) RMContainer(org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer) RMNode(org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp) MockAM(org.apache.hadoop.yarn.server.resourcemanager.MockAM) PreemptionManager(org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.PreemptionManager) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 29 with FiCaSchedulerApp

use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp in project hadoop by apache.

the class TestCapacitySchedulerLazyPreemption method testPreemptionConsidersUserLimit.

@Test(timeout = 60000)
public void testPreemptionConsidersUserLimit() throws Exception {
    /**
     * Test case: Submit two application (app1/app2) to different queues, queue
     * structure:
     *
     * <pre>
     *             Root
     *            /  |  \
     *           a   b   c
     *          10   20  70
     * </pre>
     *
     * Queue-c's user-limit-factor = 0.1, so single user cannot allocate >1 containers in queue-c
     *
     * 1) Two nodes in the cluster, each of them has 4G.
     *
     * 2) app1 submit to queue-a first, it asked 7 * 1G containers, so there's no
     * more resource available.
     *
     * 3) app2 submit to queue-c, ask for one 1G container (for AM)
     *
     * Now the cluster is fulfilled.
     *
     * 4) app2 asks for another 1G container, system will preempt one container
     * from app1, and app2 will receive the preempted container
     */
    CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration(conf);
    csConf.setUserLimitFactor(CapacitySchedulerConfiguration.ROOT + ".c", 0.1f);
    MockRM rm1 = new MockRM(csConf);
    rm1.getRMContext().setNodeLabelManager(mgr);
    rm1.start();
    MockNM nm1 = rm1.registerNode("h1:1234", 4 * GB);
    MockNM nm2 = rm1.registerNode("h2:1234", 4 * GB);
    CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
    RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId());
    RMNode rmNode2 = rm1.getRMContext().getRMNodes().get(nm2.getNodeId());
    // launch an app to queue, AM container should be launched in nm1
    RMApp app1 = rm1.submitApp(1 * GB, "app", "user", null, "a");
    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
    am1.allocate("*", 1 * GB, 6, new ArrayList<ContainerId>());
    // Do allocation 3 times for node1/node2
    for (int i = 0; i < 3; i++) {
        cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
        cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
    }
    // App1 should have 7 containers now, and no available resource for cluster
    FiCaSchedulerApp schedulerApp1 = cs.getApplicationAttempt(am1.getApplicationAttemptId());
    Assert.assertEquals(7, schedulerApp1.getLiveContainers().size());
    // Submit app2 to queue-c and asks for a 1G container for AM
    RMApp app2 = rm1.submitApp(1 * GB, "app", "user", null, "c");
    MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm2);
    // NM1/NM2 has available resource = 0G
    Assert.assertEquals(0 * GB, cs.getNode(nm1.getNodeId()).getUnallocatedResource().getMemorySize());
    Assert.assertEquals(0 * GB, cs.getNode(nm2.getNodeId()).getUnallocatedResource().getMemorySize());
    // AM asks for a 1 * GB container
    am2.allocate(Arrays.asList(ResourceRequest.newInstance(Priority.newInstance(1), ResourceRequest.ANY, Resources.createResource(1 * GB), 1)), null);
    // Get edit policy and do one update
    SchedulingEditPolicy editPolicy = getSchedulingEditPolicy(rm1);
    // Call edit schedule twice, and check if no container from app1 marked
    // to be "killable"
    editPolicy.editSchedule();
    editPolicy.editSchedule();
    // No preemption happens
    PreemptionManager pm = cs.getPreemptionManager();
    Map<ContainerId, RMContainer> killableContainers = waitKillableContainersSize(pm, "a", RMNodeLabelsManager.NO_LABEL, 0);
    Assert.assertEquals(0, killableContainers.size());
    // Call CS.handle once to see if container preempted
    cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
    FiCaSchedulerApp schedulerApp2 = cs.getApplicationAttempt(am2.getApplicationAttemptId());
    // App1 has 7 containers, and app2 has 1 containers (nothing preempted)
    Assert.assertEquals(7, schedulerApp1.getLiveContainers().size());
    Assert.assertEquals(1, schedulerApp2.getLiveContainers().size());
    rm1.close();
}
Also used : RMApp(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp) NodeUpdateSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent) MockNM(org.apache.hadoop.yarn.server.resourcemanager.MockNM) SchedulingEditPolicy(org.apache.hadoop.yarn.server.resourcemanager.monitor.SchedulingEditPolicy) MockRM(org.apache.hadoop.yarn.server.resourcemanager.MockRM) RMContainer(org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer) RMNode(org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp) MockAM(org.apache.hadoop.yarn.server.resourcemanager.MockAM) PreemptionManager(org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.PreemptionManager) Test(org.junit.Test)

Example 30 with FiCaSchedulerApp

use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp in project hadoop by apache.

the class TestCapacitySchedulerNodeLabelUpdate method testComplexResourceUsageWhenNodeUpdatesPartition.

@Test(timeout = 60000)
public void testComplexResourceUsageWhenNodeUpdatesPartition() throws Exception {
    /*
     * This test is similar to testResourceUsageWhenNodeUpdatesPartition, this
     * will include multiple applications, multiple users and multiple
     * containers running on a single node, size of each container is 1G
     *
     * Node 1
     * ------
     * App1-container3
     * App2-container2
     * App2-Container3
     *
     * Node 2
     * ------
     * App2-container1
     * App1-container1
     * App1-container2
     */
    // set node -> label
    mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("x", "y", "z"));
    // set mapping:
    // h1 -> x
    // h2 -> y
    mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x")));
    // inject node label manager
    MockRM rm = new MockRM(getConfigurationWithQueueLabels(conf)) {

        @Override
        public RMNodeLabelsManager createNodeLabelManager() {
            return mgr;
        }
    };
    rm.getRMContext().setNodeLabelManager(mgr);
    rm.start();
    MockNM nm1 = rm.registerNode("h1:1234", 80000);
    MockNM nm2 = rm.registerNode("h2:1234", 80000);
    // app1
    RMApp app1 = rm.submitApp(GB, "app", "u1", null, "a");
    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm2);
    // c2 on n1, c3 on n2
    am1.allocate("*", GB, 1, new ArrayList<ContainerId>(), "x");
    ContainerId containerId = ContainerId.newContainerId(am1.getApplicationAttemptId(), 2);
    Assert.assertTrue(rm.waitForState(nm1, containerId, RMContainerState.ALLOCATED));
    am1.allocate("*", GB, 1, new ArrayList<ContainerId>());
    containerId = ContainerId.newContainerId(am1.getApplicationAttemptId(), 3);
    Assert.assertTrue(rm.waitForState(nm2, containerId, RMContainerState.ALLOCATED));
    // app2
    RMApp app2 = rm.submitApp(GB, "app", "u2", null, "a");
    MockAM am2 = MockRM.launchAndRegisterAM(app2, rm, nm2);
    // c2/c3 on n1
    am2.allocate("*", GB, 2, new ArrayList<ContainerId>(), "x");
    containerId = ContainerId.newContainerId(am2.getApplicationAttemptId(), 3);
    Assert.assertTrue(rm.waitForState(nm1, containerId, RMContainerState.ALLOCATED));
    // check used resource:
    // queue-a used x=1G, ""=1G
    checkUsedResource(rm, "a", 3 * GB, "x");
    checkUsedResource(rm, "a", 3 * GB);
    CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
    FiCaSchedulerApp application1 = cs.getApplicationAttempt(am1.getApplicationAttemptId());
    FiCaSchedulerApp application2 = cs.getApplicationAttempt(am2.getApplicationAttemptId());
    // change h1's label to z
    cs.handle(new NodeLabelsUpdateSchedulerEvent(ImmutableMap.of(nm1.getNodeId(), toSet("z"))));
    checkUsedResource(rm, "a", 0, "x");
    checkUsedResource(rm, "a", 3 * GB, "z");
    checkUsedResource(rm, "a", 3 * GB);
    checkUsedResource(rm, "root", 0, "x");
    checkUsedResource(rm, "root", 3 * GB, "z");
    checkUsedResource(rm, "root", 3 * GB);
    checkUserUsedResource(rm, "a", "u1", "x", 0 * GB);
    checkUserUsedResource(rm, "a", "u1", "z", 1 * GB);
    checkUserUsedResource(rm, "a", "u1", "", 2 * GB);
    checkUserUsedResource(rm, "a", "u2", "x", 0 * GB);
    checkUserUsedResource(rm, "a", "u2", "z", 2 * GB);
    checkUserUsedResource(rm, "a", "u2", "", 1 * GB);
    Assert.assertEquals(0, application1.getAppAttemptResourceUsage().getUsed("x").getMemorySize());
    Assert.assertEquals(1 * GB, application1.getAppAttemptResourceUsage().getUsed("z").getMemorySize());
    Assert.assertEquals(2 * GB, application1.getAppAttemptResourceUsage().getUsed("").getMemorySize());
    Assert.assertEquals(0, application2.getAppAttemptResourceUsage().getUsed("x").getMemorySize());
    Assert.assertEquals(2 * GB, application2.getAppAttemptResourceUsage().getUsed("z").getMemorySize());
    Assert.assertEquals(1 * GB, application2.getAppAttemptResourceUsage().getUsed("").getMemorySize());
    rm.close();
}
Also used : RMApp(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp) NodeLabelsUpdateSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeLabelsUpdateSchedulerEvent) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) MockNM(org.apache.hadoop.yarn.server.resourcemanager.MockNM) FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp) MockAM(org.apache.hadoop.yarn.server.resourcemanager.MockAM) MockRM(org.apache.hadoop.yarn.server.resourcemanager.MockRM) Test(org.junit.Test)

Aggregations

FiCaSchedulerApp (org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp)142 Test (org.junit.Test)97 RMApp (org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp)60 Resource (org.apache.hadoop.yarn.api.records.Resource)53 MockRM (org.apache.hadoop.yarn.server.resourcemanager.MockRM)51 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)49 MockNM (org.apache.hadoop.yarn.server.resourcemanager.MockNM)48 FiCaSchedulerNode (org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode)48 MockAM (org.apache.hadoop.yarn.server.resourcemanager.MockAM)47 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)40 Priority (org.apache.hadoop.yarn.api.records.Priority)40 RMContainer (org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer)35 ResourceLimits (org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits)34 NodeId (org.apache.hadoop.yarn.api.records.NodeId)31 NodeUpdateSchedulerEvent (org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent)31 RMNode (org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode)30 ActiveUsersManager (org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager)24 ArrayList (java.util.ArrayList)19 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)19 Container (org.apache.hadoop.yarn.api.records.Container)13