use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent in project hadoop by apache.
the class TestFairScheduler method testSteadyFairShareWithReloadAndNodeAddRemove.
@Test
public void testSteadyFairShareWithReloadAndNodeAddRemove() throws Exception {
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);
PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE));
out.println("<?xml version=\"1.0\"?>");
out.println("<allocations>");
out.println("<defaultQueueSchedulingPolicy>fair</defaultQueueSchedulingPolicy>");
out.println("<queue name=\"root\">");
out.println(" <schedulingPolicy>drf</schedulingPolicy>");
out.println(" <queue name=\"child1\">");
out.println(" <weight>1</weight>");
out.println(" </queue>");
out.println(" <queue name=\"child2\">");
out.println(" <weight>1</weight>");
out.println(" </queue>");
out.println("</queue>");
out.println("</allocations>");
out.close();
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf, resourceManager.getRMContext());
// The steady fair share for all queues should be 0
QueueManager queueManager = scheduler.getQueueManager();
assertEquals(0, queueManager.getLeafQueue("child1", false).getSteadyFairShare().getMemorySize());
assertEquals(0, queueManager.getLeafQueue("child2", false).getSteadyFairShare().getMemorySize());
// Add one node
RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(6144), 1, "127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
assertEquals(6144, scheduler.getClusterResource().getMemorySize());
// The steady fair shares for all queues should be updated
assertEquals(2048, queueManager.getLeafQueue("child1", false).getSteadyFairShare().getMemorySize());
assertEquals(2048, queueManager.getLeafQueue("child2", false).getSteadyFairShare().getMemorySize());
// Reload the allocation configuration file
out = new PrintWriter(new FileWriter(ALLOC_FILE));
out.println("<?xml version=\"1.0\"?>");
out.println("<allocations>");
out.println("<defaultQueueSchedulingPolicy>fair</defaultQueueSchedulingPolicy>");
out.println("<queue name=\"root\">");
out.println(" <schedulingPolicy>drf</schedulingPolicy>");
out.println(" <queue name=\"child1\">");
out.println(" <weight>1</weight>");
out.println(" </queue>");
out.println(" <queue name=\"child2\">");
out.println(" <weight>2</weight>");
out.println(" </queue>");
out.println(" <queue name=\"child3\">");
out.println(" <weight>2</weight>");
out.println(" </queue>");
out.println("</queue>");
out.println("</allocations>");
out.close();
scheduler.reinitialize(conf, resourceManager.getRMContext());
// The steady fair shares for all queues should be updated
assertEquals(1024, queueManager.getLeafQueue("child1", false).getSteadyFairShare().getMemorySize());
assertEquals(2048, queueManager.getLeafQueue("child2", false).getSteadyFairShare().getMemorySize());
assertEquals(2048, queueManager.getLeafQueue("child3", false).getSteadyFairShare().getMemorySize());
// Remove the node, steady fair shares should back to 0
NodeRemovedSchedulerEvent nodeEvent2 = new NodeRemovedSchedulerEvent(node1);
scheduler.handle(nodeEvent2);
assertEquals(0, scheduler.getClusterResource().getMemorySize());
assertEquals(0, queueManager.getLeafQueue("child1", false).getSteadyFairShare().getMemorySize());
assertEquals(0, queueManager.getLeafQueue("child2", false).getSteadyFairShare().getMemorySize());
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent in project hadoop by apache.
the class TestFairScheduler method testRequestAMResourceInZeroFairShareQueue.
/**
* The test verifies that zero-FairShare queues (because of zero/tiny
* weight) can get resources for the AM.
*/
@Test
public void testRequestAMResourceInZeroFairShareQueue() throws Exception {
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);
PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE));
out.println("<?xml version=\"1.0\"?>");
out.println("<allocations>");
out.println("<queue name=\"queue1\">");
out.println("<weight>0.0</weight>");
out.println("<maxResources>4096mb,10vcores</maxResources>");
out.println("<maxAMShare>0.5</maxAMShare>");
out.println("</queue>");
out.println("<queue name=\"queue2\">");
out.println("<weight>2.0</weight>");
out.println("</queue>");
out.println("<queue name=\"queue3\">");
out.println("<weight>0.000001</weight>");
out.println("</queue>");
out.println("</allocations>");
out.close();
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf, resourceManager.getRMContext());
RMNode node = MockNodes.newNodeInfo(1, Resources.createResource(8192, 20), 0, "127.0.0.1");
NodeAddedSchedulerEvent nodeEvent = new NodeAddedSchedulerEvent(node);
NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node);
scheduler.handle(nodeEvent);
scheduler.update();
//create request for non-zero weight queue
createSchedulingRequest(1024, "root.queue2", "user2", 1);
scheduler.update();
scheduler.handle(updateEvent);
// A managed AM which need 3G memory will not get resource,
// since it request more than the maxAMShare (4G * 0.5 = 2G).
Resource amResource = Resource.newInstance(1024, 1);
int amPriority = RMAppAttemptImpl.AM_CONTAINER_PRIORITY.getPriority();
ApplicationAttemptId attId1 = createAppAttemptId(1, 1);
createApplicationWithAMResource(attId1, "root.queue1", "user1", amResource);
createSchedulingRequestExistingApplication(3 * 1024, 1, amPriority, attId1);
FSAppAttempt app1 = scheduler.getSchedulerApp(attId1);
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Application 1 should not be running", 0, app1.getLiveContainers().size());
// A managed AM which need 2G memory will get resource,
// since it request no more than the maxAMShare (4G * 0.5 = 2G).
ApplicationAttemptId attId2 = createAppAttemptId(2, 1);
createApplicationWithAMResource(attId2, "root.queue1", "user1", amResource);
createSchedulingRequestExistingApplication(2 * 1024, 1, amPriority, attId2);
FSAppAttempt app2 = scheduler.getSchedulerApp(attId2);
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Application 2 should be running", 1, app2.getLiveContainers().size());
// A managed AM which need 1G memory will get resource, even thought its
// fair share is 0 because its weight is tiny(0.000001).
ApplicationAttemptId attId3 = createAppAttemptId(3, 1);
createApplicationWithAMResource(attId3, "root.queue3", "user1", amResource);
createSchedulingRequestExistingApplication(1024, 1, amPriority, attId3);
FSAppAttempt app3 = scheduler.getSchedulerApp(attId3);
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Application 3 should be running", 1, app3.getLiveContainers().size());
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent in project hadoop by apache.
the class TestFairScheduler method testFairShareWithZeroWeightNoneZeroMinRes.
@Test
public void testFairShareWithZeroWeightNoneZeroMinRes() throws IOException {
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);
// set queueA and queueB weight zero.
// set queueA and queueB minResources 1.
PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE));
out.println("<?xml version=\"1.0\"?>");
out.println("<allocations>");
out.println("<queue name=\"queueA\">");
out.println("<minResources>1 mb 1 vcores</minResources>");
out.println("<weight>0.0</weight>");
out.println("</queue>");
out.println("<queue name=\"queueB\">");
out.println("<minResources>1 mb 1 vcores</minResources>");
out.println("<weight>0.0</weight>");
out.println("</queue>");
out.println("</allocations>");
out.close();
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf, resourceManager.getRMContext());
// Add one big node (only care about aggregate capacity)
RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(8 * 1024, 8), 1, "127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
// Queue A wants 2 * 1024.
createSchedulingRequest(2 * 1024, "queueA", "user1");
// Queue B wants 6 * 1024
createSchedulingRequest(6 * 1024, "queueB", "user1");
scheduler.update();
FSLeafQueue queue = scheduler.getQueueManager().getLeafQueue("queueA", false);
// queueA's weight is 0.0 and minResources is 1,
// so its fair share should be 1 (minShare).
assertEquals(1, queue.getFairShare().getMemorySize());
// queueB's weight is 0.0 and minResources is 1,
// so its fair share should be 1 (minShare).
queue = scheduler.getQueueManager().getLeafQueue("queueB", false);
assertEquals(1, queue.getFairShare().getMemorySize());
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent in project hadoop by apache.
the class TestFairScheduler method testFairShareWithNoneZeroWeightNoneZeroMinRes.
@Test
public void testFairShareWithNoneZeroWeightNoneZeroMinRes() throws IOException {
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);
// set queueA and queueB weight 0.5.
// set queueA and queueB minResources 1024.
PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE));
out.println("<?xml version=\"1.0\"?>");
out.println("<allocations>");
out.println("<queue name=\"queueA\">");
out.println("<minResources>1024 mb 1 vcores</minResources>");
out.println("<weight>0.5</weight>");
out.println("</queue>");
out.println("<queue name=\"queueB\">");
out.println("<minResources>1024 mb 1 vcores</minResources>");
out.println("<weight>0.5</weight>");
out.println("</queue>");
out.println("</allocations>");
out.close();
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf, resourceManager.getRMContext());
// Add one big node (only care about aggregate capacity)
RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(8 * 1024, 8), 1, "127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
// Queue A wants 4 * 1024.
createSchedulingRequest(4 * 1024, "queueA", "user1");
// Queue B wants 4 * 1024
createSchedulingRequest(4 * 1024, "queueB", "user1");
scheduler.update();
FSLeafQueue queue = scheduler.getQueueManager().getLeafQueue("queueA", false);
// queueA's weight is 0.5 and minResources is 1024,
// so its fair share should be 4096.
assertEquals(4096, queue.getFairShare().getMemorySize());
// queueB's weight is 0.5 and minResources is 1024,
// so its fair share should be 4096.
queue = scheduler.getQueueManager().getLeafQueue("queueB", false);
assertEquals(4096, queue.getFairShare().getMemorySize());
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent in project hadoop by apache.
the class TestFairScheduler method testMoveWouldViolateMaxResourcesConstraints.
@Test(expected = YarnException.class)
public void testMoveWouldViolateMaxResourcesConstraints() throws Exception {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf, resourceManager.getRMContext());
QueueManager queueMgr = scheduler.getQueueManager();
FSLeafQueue oldQueue = queueMgr.getLeafQueue("queue1", true);
FSQueue queue2 = queueMgr.getLeafQueue("queue2", true);
queue2.setMaxShare(Resource.newInstance(1024, 1));
ApplicationAttemptId appAttId = createSchedulingRequest(1024, 1, "queue1", "user1", 3);
RMNode node = MockNodes.newNodeInfo(1, Resources.createResource(2048, 2));
NodeAddedSchedulerEvent nodeEvent = new NodeAddedSchedulerEvent(node);
NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node);
scheduler.handle(nodeEvent);
scheduler.handle(updateEvent);
scheduler.handle(updateEvent);
assertEquals(Resource.newInstance(2048, 2), oldQueue.getResourceUsage());
scheduler.moveApplication(appAttId.getApplicationId(), "queue2");
}
Aggregations