use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.FifoPolicy in project hadoop by apache.
the class TestSchedulingPolicy method testPolicyReinitilization.
@Test
public void testPolicyReinitilization() throws IOException {
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);
PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE));
out.println("<?xml version=\"1.0\"?>");
out.println("<allocations>");
out.println("<queue name=\"root\">");
out.println("<schedulingPolicy>fair</schedulingPolicy>");
out.println(" <queue name=\"child1\">");
out.println(" <schedulingPolicy>fair</schedulingPolicy>");
out.println(" </queue>");
out.println(" <queue name=\"child2\">");
out.println(" <schedulingPolicy>fair</schedulingPolicy>");
out.println(" </queue>");
out.println("</queue>");
out.println("</allocations>");
out.close();
scheduler.init(conf);
// Set child1 to 'drf' which is not allowed, then reload the allocation file
out = new PrintWriter(new FileWriter(ALLOC_FILE));
out.println("<?xml version=\"1.0\"?>");
out.println("<allocations>");
out.println("<queue name=\"root\">");
out.println("<schedulingPolicy>fair</schedulingPolicy>");
out.println(" <queue name=\"child1\">");
out.println(" <schedulingPolicy>drf</schedulingPolicy>");
out.println(" </queue>");
out.println(" <queue name=\"child2\">");
out.println(" <schedulingPolicy>fifo</schedulingPolicy>");
out.println(" </queue>");
out.println("</queue>");
out.println("</allocations>");
out.close();
scheduler.reinitialize(conf, null);
FSQueue child1 = scheduler.getQueueManager().getQueue("child1");
assertTrue("Queue 'child1' should still be 'fair' since 'drf' isn't allowed" + " if its parent policy is 'fair'.", child1.getPolicy() instanceof FairSharePolicy);
FSQueue child2 = scheduler.getQueueManager().getQueue("child2");
assertTrue("Queue 'child2' should still be 'fair' there is a policy" + " violation while reinitialization.", child2.getPolicy() instanceof FairSharePolicy);
// Set both child1 and root to 'drf', then reload the allocation file
out = new PrintWriter(new FileWriter(ALLOC_FILE));
out.println("<?xml version=\"1.0\"?>");
out.println("<allocations>");
out.println("<queue name=\"root\">");
out.println("<schedulingPolicy>drf</schedulingPolicy>");
out.println(" <queue name=\"child1\">");
out.println(" <schedulingPolicy>drf</schedulingPolicy>");
out.println(" </queue>");
out.println(" <queue name=\"child2\">");
out.println(" <schedulingPolicy>fifo</schedulingPolicy>");
out.println(" </queue>");
out.println("</queue>");
out.println("</allocations>");
out.close();
scheduler.reinitialize(conf, null);
child1 = scheduler.getQueueManager().getQueue("child1");
assertTrue("Queue 'child1' should be 'drf' since both 'root' and 'child1'" + " are 'drf'.", child1.getPolicy() instanceof DominantResourceFairnessPolicy);
child2 = scheduler.getQueueManager().getQueue("child2");
assertTrue("Queue 'child2' should still be 'fifo' there is no policy" + " violation while reinitialization.", child2.getPolicy() instanceof FifoPolicy);
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.FifoPolicy in project hadoop by apache.
the class TestFairScheduler method testFifoWithinQueue.
@Test(timeout = 5000)
public void testFifoWithinQueue() throws Exception {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf, resourceManager.getRMContext());
RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(3072, 3), 1, "127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
// Even if submitted at exact same time, apps will be deterministically
// ordered by name.
ApplicationAttemptId attId1 = createSchedulingRequest(1024, "queue1", "user1", 2);
ApplicationAttemptId attId2 = createSchedulingRequest(1024, "queue1", "user1", 2);
FSAppAttempt app1 = scheduler.getSchedulerApp(attId1);
FSAppAttempt app2 = scheduler.getSchedulerApp(attId2);
FSLeafQueue queue1 = scheduler.getQueueManager().getLeafQueue("queue1", true);
queue1.setPolicy(new FifoPolicy());
scheduler.update();
// First two containers should go to app 1, third should go to app 2.
// Because tests set assignmultiple to false, each heartbeat assigns a single
// container.
NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node1);
scheduler.handle(updateEvent);
assertEquals(1, app1.getLiveContainers().size());
assertEquals(0, app2.getLiveContainers().size());
scheduler.handle(updateEvent);
assertEquals(2, app1.getLiveContainers().size());
assertEquals(0, app2.getLiveContainers().size());
scheduler.handle(updateEvent);
assertEquals(2, app1.getLiveContainers().size());
assertEquals(1, app2.getLiveContainers().size());
}
Aggregations