use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp in project hadoop by apache.
the class TestCapacitySchedulerSurgicalPreemption method testPriorityPreemptionRequiresMoveReservation.
@Test(timeout = 300000)
public void testPriorityPreemptionRequiresMoveReservation() throws Exception {
/**
* Test case: Submit two application (app1/app2) to different queues, queue
* structure:
*
* <pre>
* Root
* / | \
* a b c
* 10 20 70
* </pre>
*
* 1) 3 nodes in the cluster, 10G for each
*
* 2) app1 submit to queue-b first, it asked 2G each,
* it can get 2G on n1 (AM), 2 * 2G on n2
*
* 3) app2 submit to queue-c, with 2G AM container (allocated on n3)
* app2 requires 9G resource, which will be reserved on n3
*
* We should expect container unreserved from n3 and allocated on n1/n2
*/
conf.setPUOrderingPolicyUnderUtilizedPreemptionEnabled(true);
conf.setPUOrderingPolicyUnderUtilizedPreemptionDelay(1000);
conf.setQueueOrderingPolicy(CapacitySchedulerConfiguration.ROOT, CapacitySchedulerConfiguration.QUEUE_PRIORITY_UTILIZATION_ORDERING_POLICY);
conf.setPUOrderingPolicyUnderUtilizedPreemptionMoveReservation(true);
// Queue c has higher priority than a/b
conf.setQueuePriority(CapacitySchedulerConfiguration.ROOT + ".c", 1);
MockRM rm1 = new MockRM(conf);
rm1.getRMContext().setNodeLabelManager(mgr);
rm1.start();
MockNM nm1 = rm1.registerNode("h1:1234", 10 * GB);
MockNM nm2 = rm1.registerNode("h2:1234", 10 * GB);
MockNM nm3 = rm1.registerNode("h3:1234", 10 * GB);
CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId());
RMNode rmNode2 = rm1.getRMContext().getRMNodes().get(nm2.getNodeId());
RMNode rmNode3 = rm1.getRMContext().getRMNodes().get(nm3.getNodeId());
// launch an app to queue, AM container should be launched in nm1
RMApp app1 = rm1.submitApp(2 * GB, "app", "user", null, "b");
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
am1.allocate("*", 2 * GB, 2, new ArrayList<>());
// Do allocation for node2 twice
for (int i = 0; i < 2; i++) {
cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
}
FiCaSchedulerApp schedulerApp1 = cs.getApplicationAttempt(am1.getApplicationAttemptId());
Assert.assertEquals(3, schedulerApp1.getLiveContainers().size());
// 1 from n1 and 2 from n2
waitNumberOfLiveContainersOnNodeFromApp(cs.getNode(rmNode1.getNodeID()), am1.getApplicationAttemptId(), 1);
waitNumberOfLiveContainersOnNodeFromApp(cs.getNode(rmNode2.getNodeID()), am1.getApplicationAttemptId(), 2);
// Submit app2 to queue-c and asks for a 2G container for AM, on n3
RMApp app2 = rm1.submitApp(2 * GB, "app", "user", null, "c");
MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm3);
FiCaSchedulerApp schedulerApp2 = cs.getApplicationAttempt(ApplicationAttemptId.newInstance(app2.getApplicationId(), 1));
// Asks 1 * 9G container
am2.allocate("*", 9 * GB, 1, new ArrayList<>());
// Do allocation for node3 once
cs.handle(new NodeUpdateSchedulerEvent(rmNode3));
// Make sure container reserved on node3
Assert.assertNotNull(cs.getNode(rmNode3.getNodeID()).getReservedContainer());
// Call editSchedule immediately: nothing happens
ProportionalCapacityPreemptionPolicy editPolicy = (ProportionalCapacityPreemptionPolicy) getSchedulingEditPolicy(rm1);
editPolicy.editSchedule();
Assert.assertNotNull(cs.getNode(rmNode3.getNodeID()).getReservedContainer());
// Sleep the timeout interval, we should be able to see reserved container
// moved to n2 (n1 occupied by AM)
Thread.sleep(1000);
editPolicy.editSchedule();
Assert.assertNull(cs.getNode(rmNode3.getNodeID()).getReservedContainer());
Assert.assertNotNull(cs.getNode(rmNode2.getNodeID()).getReservedContainer());
Assert.assertEquals(am2.getApplicationAttemptId(), cs.getNode(rmNode2.getNodeID()).getReservedContainer().getApplicationAttemptId());
// Do it again, we should see containers marked to be preempt
editPolicy.editSchedule();
Assert.assertEquals(2, editPolicy.getToPreemptContainers().size());
// Call editSchedule again: selected containers are killed
editPolicy.editSchedule();
// Do allocation till reserved container allocated
while (schedulerApp2.getLiveContainers().size() < 2) {
cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
Thread.sleep(200);
}
waitNumberOfLiveContainersFromApp(schedulerApp1, 1);
rm1.close();
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp in project hadoop by apache.
the class TestContainerResizing method testSimpleIncreaseContainer.
@Test
public void testSimpleIncreaseContainer() throws Exception {
/**
* Application has a container running, and the node has enough available
* resource. Add a increase request to see if container will be increased
*/
MockRM rm1 = new MockRM() {
@Override
public RMNodeLabelsManager createNodeLabelManager() {
return mgr;
}
};
rm1.start();
MockNM nm1 = rm1.registerNode("h1:1234", 20 * GB);
// app1 -> a1
RMApp app1 = rm1.submitApp(1 * GB, "app", "user", null, "default");
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
ContainerId containerId1 = ContainerId.newContainerId(am1.getApplicationAttemptId(), 1);
sentRMContainerLaunched(rm1, containerId1);
// am1 asks to change its AM container from 1GB to 3GB
am1.sendContainerResizingRequest(Arrays.asList(UpdateContainerRequest.newInstance(0, containerId1, ContainerUpdateType.INCREASE_RESOURCE, Resources.createResource(3 * GB), null)));
FiCaSchedulerApp app = TestUtils.getFiCaSchedulerApp(rm1, app1.getApplicationId());
checkPendingResource(rm1, "default", 2 * GB, null);
Assert.assertEquals(2 * GB, app.getAppAttemptResourceUsage().getPending().getMemorySize());
// NM1 do 1 heartbeats
CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId());
cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
// Pending resource should be deducted
checkPendingResource(rm1, "default", 0 * GB, null);
Assert.assertEquals(0 * GB, app.getAppAttemptResourceUsage().getPending().getMemorySize());
verifyContainerIncreased(am1.allocate(null, null), containerId1, 3 * GB);
verifyAvailableResourceOfSchedulerNode(rm1, nm1.getNodeId(), 17 * GB);
rm1.close();
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp in project hadoop by apache.
the class TestContainerResizing method testExcessiveReservationWhenCancelIncreaseRequest.
@Test
public void testExcessiveReservationWhenCancelIncreaseRequest() throws Exception {
/**
* Application has two containers running, try to increase one of then, node
* doesn't have enough resource, so the increase request will be reserved.
* Check resource usage after container reserved, finish a container &
* cancel the increase request, reservation should be cancelled
*/
MockRM rm1 = new MockRM() {
@Override
public RMNodeLabelsManager createNodeLabelManager() {
return mgr;
}
};
rm1.start();
MockNM nm1 = rm1.registerNode("h1:1234", 8 * GB);
MockNM nm2 = rm1.registerNode("h2:1234", 8 * GB);
// app1 -> a1
RMApp app1 = rm1.submitApp(1 * GB, "app", "user", null, "default");
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
FiCaSchedulerApp app = TestUtils.getFiCaSchedulerApp(rm1, app1.getApplicationId());
// Allocate two more containers
am1.allocate(Arrays.asList(ResourceRequest.newInstance(Priority.newInstance(1), "*", Resources.createResource(2 * GB), 1)), null);
ContainerId containerId2 = ContainerId.newContainerId(am1.getApplicationAttemptId(), 2);
Assert.assertTrue(rm1.waitForState(nm1, containerId2, RMContainerState.ALLOCATED));
// Acquire them, and NM report RUNNING
am1.allocate(null, null);
sentRMContainerLaunched(rm1, containerId2);
ContainerId containerId1 = ContainerId.newContainerId(am1.getApplicationAttemptId(), 1);
sentRMContainerLaunched(rm1, containerId1);
// am1 asks to change its AM container from 1GB to 7GB
am1.sendContainerResizingRequest(Arrays.asList(UpdateContainerRequest.newInstance(0, containerId1, ContainerUpdateType.INCREASE_RESOURCE, Resources.createResource(7 * GB), null)));
checkPendingResource(rm1, "default", 6 * GB, null);
Assert.assertEquals(6 * GB, app.getAppAttemptResourceUsage().getPending().getMemorySize());
// NM1 do 1 heartbeats
CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId());
cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
/* Check reservation statuses */
// Increase request should be reserved
Assert.assertFalse(app.getReservedContainers().isEmpty());
Assert.assertNotNull(cs.getNode(nm1.getNodeId()).getReservedContainer());
// Pending resource will not be changed since it's not satisfied
checkPendingResource(rm1, "default", 6 * GB, null);
Assert.assertEquals(6 * GB, app.getAppAttemptResourceUsage().getPending().getMemorySize());
// Queue/user/application's usage will be updated
checkUsedResource(rm1, "default", 9 * GB, null);
Assert.assertEquals(9 * GB, ((LeafQueue) cs.getQueue("default")).getUser("user").getUsed().getMemorySize());
Assert.assertEquals(3 * GB, app.getAppAttemptResourceUsage().getUsed().getMemorySize());
Assert.assertEquals(6 * GB, app.getAppAttemptResourceUsage().getReserved().getMemorySize());
// Complete one container and cancel increase request (via send a increase
// request, make target_capacity=existing_capacity)
am1.allocate(null, Arrays.asList(containerId2));
// am1 asks to change its AM container from 1G to 1G (cancel the increase
// request actually)
am1.sendContainerResizingRequest(Arrays.asList(UpdateContainerRequest.newInstance(0, containerId1, ContainerUpdateType.INCREASE_RESOURCE, Resources.createResource(1 * GB), null)));
// Trigger a node heartbeat..
cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
/* Check statuses after reservation satisfied */
// Increase request should be unreserved
Assert.assertTrue(app.getReservedContainers().isEmpty());
Assert.assertNull(cs.getNode(nm1.getNodeId()).getReservedContainer());
// Pending resource will be changed since it's satisfied
checkPendingResource(rm1, "default", 0 * GB, null);
Assert.assertEquals(0 * GB, app.getAppAttemptResourceUsage().getPending().getMemorySize());
// Queue/user/application's usage will be updated
checkUsedResource(rm1, "default", 1 * GB, null);
Assert.assertEquals(1 * GB, ((LeafQueue) cs.getQueue("default")).getUser("user").getUsed().getMemorySize());
Assert.assertEquals(0 * GB, app.getAppAttemptResourceUsage().getReserved().getMemorySize());
Assert.assertEquals(1 * GB, app.getAppAttemptResourceUsage().getUsed().getMemorySize());
rm1.close();
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp in project hadoop by apache.
the class TestContainerResizing method testIncreaseContainerUnreservedWhenContainerCompleted.
@Test
public void testIncreaseContainerUnreservedWhenContainerCompleted() throws Exception {
/**
* App has two containers on the same node (node.resource = 8G), container1
* = 2G, container2 = 2G. App asks to increase container2 to 8G.
*
* So increase container request will be reserved. When app releases
* container2, reserved part should be released as well.
*/
MockRM rm1 = new MockRM() {
@Override
public RMNodeLabelsManager createNodeLabelManager() {
return mgr;
}
};
rm1.start();
MockNM nm1 = rm1.registerNode("h1:1234", 8 * GB);
MockNM nm2 = rm1.registerNode("h2:1234", 8 * GB);
// app1 -> a1
RMApp app1 = rm1.submitApp(1 * GB, "app", "user", null, "default");
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
FiCaSchedulerApp app = TestUtils.getFiCaSchedulerApp(rm1, app1.getApplicationId());
// Allocate two more containers
am1.allocate(Arrays.asList(ResourceRequest.newInstance(Priority.newInstance(1), "*", Resources.createResource(2 * GB), 1)), null);
ContainerId containerId2 = ContainerId.newContainerId(am1.getApplicationAttemptId(), 2);
Assert.assertTrue(rm1.waitForState(nm1, containerId2, RMContainerState.ALLOCATED));
// Acquire them, and NM report RUNNING
am1.allocate(null, null);
sentRMContainerLaunched(rm1, containerId2);
rm1.waitForState(Arrays.asList(nm1, nm2), containerId2, RMContainerState.RUNNING);
// am1 asks to change its AM container from 2GB to 8GB
am1.sendContainerResizingRequest(Arrays.asList(UpdateContainerRequest.newInstance(0, containerId2, ContainerUpdateType.INCREASE_RESOURCE, Resources.createResource(8 * GB), null)));
checkPendingResource(rm1, "default", 6 * GB, null);
Assert.assertEquals(6 * GB, app.getAppAttemptResourceUsage().getPending().getMemorySize());
// NM1 do 1 heartbeats
CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId());
cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
/* Check reservation statuses */
// Increase request should be reserved
Assert.assertFalse(app.getReservedContainers().isEmpty());
Assert.assertNotNull(cs.getNode(nm1.getNodeId()).getReservedContainer());
// Pending resource will not be changed since it's not satisfied
checkPendingResource(rm1, "default", 6 * GB, null);
Assert.assertEquals(6 * GB, app.getAppAttemptResourceUsage().getPending().getMemorySize());
// Queue/user/application's usage will be updated
checkUsedResource(rm1, "default", 9 * GB, null);
Assert.assertEquals(9 * GB, ((LeafQueue) cs.getQueue("default")).getUser("user").getUsed().getMemorySize());
Assert.assertEquals(3 * GB, app.getAppAttemptResourceUsage().getUsed().getMemorySize());
Assert.assertEquals(6 * GB, app.getAppAttemptResourceUsage().getReserved().getMemorySize());
// Complete container2, container will be unreserved and completed
am1.allocate(null, Arrays.asList(containerId2));
cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
am1.allocate(null, null);
/* Check statuses after reservation satisfied */
// Increase request should be unreserved
Assert.assertTrue(app.getReservedContainers().isEmpty());
Assert.assertNull(cs.getNode(nm1.getNodeId()).getReservedContainer());
// Pending resource will be changed since it's satisfied
checkPendingResource(rm1, "default", 0 * GB, null);
Assert.assertEquals(0 * GB, app.getAppAttemptResourceUsage().getPending().getMemorySize());
// Queue/user/application's usage will be updated
checkUsedResource(rm1, "default", 1 * GB, null);
Assert.assertEquals(1 * GB, ((LeafQueue) cs.getQueue("default")).getUser("user").getUsed().getMemorySize());
Assert.assertEquals(0 * GB, app.getAppAttemptResourceUsage().getReserved().getMemorySize());
Assert.assertEquals(1 * GB, app.getAppAttemptResourceUsage().getUsed().getMemorySize());
rm1.close();
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp in project hadoop by apache.
the class TestContainerResizing method testIncreaseRequestWithNoHeadroomLeft.
@Test
public void testIncreaseRequestWithNoHeadroomLeft() throws Exception {
/**
* Application has two containers running, try to increase one of them, the
* requested amount exceeds user's headroom for the queue.
*/
MockRM rm1 = new MockRM() {
@Override
public RMNodeLabelsManager createNodeLabelManager() {
return mgr;
}
};
rm1.start();
MockNM nm1 = rm1.registerNode("h1:1234", 8 * GB);
// app1 -> a1
RMApp app1 = rm1.submitApp(1 * GB, "app", "user", null, "default");
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
FiCaSchedulerApp app = TestUtils.getFiCaSchedulerApp(rm1, app1.getApplicationId());
// Allocate 1 container
am1.allocate(Arrays.asList(ResourceRequest.newInstance(Priority.newInstance(1), "*", Resources.createResource(2 * GB), 1)), null);
ContainerId containerId2 = ContainerId.newContainerId(am1.getApplicationAttemptId(), 2);
Assert.assertTrue(rm1.waitForState(nm1, containerId2, RMContainerState.ALLOCATED));
// Acquire them, and NM report RUNNING
am1.allocate(null, null);
sentRMContainerLaunched(rm1, containerId2);
// am1 asks to change container2 from 2GB to 8GB, which will exceed user
// limit
am1.sendContainerResizingRequest(Arrays.asList(UpdateContainerRequest.newInstance(0, containerId2, ContainerUpdateType.INCREASE_RESOURCE, Resources.createResource(8 * GB), null)));
checkPendingResource(rm1, "default", 6 * GB, null);
Assert.assertEquals(6 * GB, app.getAppAttemptResourceUsage().getPending().getMemorySize());
// NM1 do 1 heartbeats
CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId());
cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
/* Check reservation statuses */
// Increase request should *NOT* be reserved as it exceeds user limit
Assert.assertTrue(app.getReservedContainers().isEmpty());
Assert.assertNull(cs.getNode(nm1.getNodeId()).getReservedContainer());
// Pending resource will not be changed since it's not satisfied
checkPendingResource(rm1, "default", 6 * GB, null);
Assert.assertEquals(6 * GB, app.getAppAttemptResourceUsage().getPending().getMemorySize());
// Queue/user/application's usage will *NOT* be updated
checkUsedResource(rm1, "default", 3 * GB, null);
Assert.assertEquals(3 * GB, ((LeafQueue) cs.getQueue("default")).getUser("user").getUsed().getMemorySize());
Assert.assertEquals(3 * GB, app.getAppAttemptResourceUsage().getUsed().getMemorySize());
Assert.assertEquals(0 * GB, app.getAppAttemptResourceUsage().getReserved().getMemorySize());
rm1.close();
}
Aggregations