use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent in project hadoop by apache.
the class TestFairScheduler method testQueueMaxAMShareDefault.
@Test
public void testQueueMaxAMShareDefault() throws Exception {
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);
conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES, 6);
PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE));
out.println("<?xml version=\"1.0\"?>");
out.println("<allocations>");
out.println("<queue name=\"queue1\">");
out.println("</queue>");
out.println("<queue name=\"queue2\">");
out.println("<maxAMShare>0.4</maxAMShare>");
out.println("</queue>");
out.println("<queue name=\"queue3\">");
out.println("<maxResources>10240 mb 4 vcores</maxResources>");
out.println("</queue>");
out.println("<queue name=\"queue4\">");
out.println("</queue>");
out.println("<queue name=\"queue5\">");
out.println("</queue>");
out.println("<defaultQueueSchedulingPolicy>fair</defaultQueueSchedulingPolicy>");
out.println("</allocations>");
out.close();
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf, resourceManager.getRMContext());
RMNode node = MockNodes.newNodeInfo(1, Resources.createResource(8192, 10), 0, "127.0.0.1");
NodeAddedSchedulerEvent nodeEvent = new NodeAddedSchedulerEvent(node);
NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node);
scheduler.handle(nodeEvent);
scheduler.update();
FSLeafQueue queue1 = scheduler.getQueueManager().getLeafQueue("queue1", true);
assertEquals("Queue queue1's fair share should be 0", 0, queue1.getFairShare().getMemorySize());
FSLeafQueue queue2 = scheduler.getQueueManager().getLeafQueue("queue2", true);
assertEquals("Queue queue2's fair share should be 0", 0, queue2.getFairShare().getMemorySize());
FSLeafQueue queue3 = scheduler.getQueueManager().getLeafQueue("queue3", true);
assertEquals("Queue queue3's fair share should be 0", 0, queue3.getFairShare().getMemorySize());
FSLeafQueue queue4 = scheduler.getQueueManager().getLeafQueue("queue4", true);
assertEquals("Queue queue4's fair share should be 0", 0, queue4.getFairShare().getMemorySize());
FSLeafQueue queue5 = scheduler.getQueueManager().getLeafQueue("queue5", true);
assertEquals("Queue queue5's fair share should be 0", 0, queue5.getFairShare().getMemorySize());
List<String> queues = Arrays.asList("root.queue3", "root.queue4", "root.queue5");
for (String queue : queues) {
createSchedulingRequest(1 * 1024, queue, "user1");
scheduler.update();
scheduler.handle(updateEvent);
}
Resource amResource1 = Resource.newInstance(1024, 1);
int amPriority = RMAppAttemptImpl.AM_CONTAINER_PRIORITY.getPriority();
// The fair share is 2048 MB, and the default maxAMShare is 0.5f,
// so the AM is accepted.
ApplicationAttemptId attId1 = createAppAttemptId(1, 1);
createApplicationWithAMResource(attId1, "queue1", "test1", amResource1);
createSchedulingRequestExistingApplication(1024, 1, amPriority, attId1);
FSAppAttempt app1 = scheduler.getSchedulerApp(attId1);
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Application1's AM requests 1024 MB memory", 1024, app1.getAMResource().getMemorySize());
assertEquals("Application1's AM should be running", 1, app1.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 1024 MB memory", 1024, queue1.getAmResourceUsage().getMemorySize());
// Now the fair share is 1639 MB, and the maxAMShare is 0.4f,
// so the AM is not accepted.
ApplicationAttemptId attId2 = createAppAttemptId(2, 1);
createApplicationWithAMResource(attId2, "queue2", "test1", amResource1);
createSchedulingRequestExistingApplication(1024, 1, amPriority, attId2);
FSAppAttempt app2 = scheduler.getSchedulerApp(attId2);
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Application2's AM resource shouldn't be updated", 0, app2.getAMResource().getMemorySize());
assertEquals("Application2's AM should not be running", 0, app2.getLiveContainers().size());
assertEquals("Queue2's AM resource usage should be 0 MB memory", 0, queue2.getAmResourceUsage().getMemorySize());
// Remove the app2
AppAttemptRemovedSchedulerEvent appRemovedEvent2 = new AppAttemptRemovedSchedulerEvent(attId2, RMAppAttemptState.FINISHED, false);
scheduler.handle(appRemovedEvent2);
scheduler.update();
// AM3 can pass the fair share checking, but it takes all available VCore,
// So the AM3 is not accepted.
ApplicationAttemptId attId3 = createAppAttemptId(3, 1);
createApplicationWithAMResource(attId3, "queue3", "test1", amResource1);
createSchedulingRequestExistingApplication(1024, 6, amPriority, attId3);
FSAppAttempt app3 = scheduler.getSchedulerApp(attId3);
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Application3's AM resource shouldn't be updated", 0, app3.getAMResource().getMemorySize());
assertEquals("Application3's AM should not be running", 0, app3.getLiveContainers().size());
assertEquals("Queue3's AM resource usage should be 0 MB memory", 0, queue3.getAmResourceUsage().getMemorySize());
// AM4 can pass the fair share checking and it doesn't takes all
// available VCore, but it need 5 VCores which are more than
// maxResources(4 VCores). So the AM4 is not accepted.
ApplicationAttemptId attId4 = createAppAttemptId(4, 1);
createApplicationWithAMResource(attId4, "queue3", "test1", amResource1);
createSchedulingRequestExistingApplication(1024, 5, amPriority, attId4);
FSAppAttempt app4 = scheduler.getSchedulerApp(attId4);
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Application4's AM resource shouldn't be updated", 0, app4.getAMResource().getMemorySize());
assertEquals("Application4's AM should not be running", 0, app4.getLiveContainers().size());
assertEquals("Queue3's AM resource usage should be 0 MB memory", 0, queue3.getAmResourceUsage().getMemorySize());
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent in project hadoop by apache.
the class TestFairScheduler method testSchedulerRootQueueMetrics.
@Test
public void testSchedulerRootQueueMetrics() throws Exception {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf, resourceManager.getRMContext());
// Add a node
RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(1024));
NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
// Queue 1 requests full capacity of node
createSchedulingRequest(1024, "queue1", "user1", 1);
scheduler.update();
NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node1);
scheduler.handle(updateEvent);
// Now queue 2 requests likewise
createSchedulingRequest(1024, "queue2", "user1", 1);
scheduler.update();
scheduler.handle(updateEvent);
// Make sure reserved memory gets updated correctly
assertEquals(1024, scheduler.rootMetrics.getReservedMB());
// Now another node checks in with capacity
RMNode node2 = MockNodes.newNodeInfo(1, Resources.createResource(1024));
NodeAddedSchedulerEvent nodeEvent2 = new NodeAddedSchedulerEvent(node2);
NodeUpdateSchedulerEvent updateEvent2 = new NodeUpdateSchedulerEvent(node2);
scheduler.handle(nodeEvent2);
scheduler.handle(updateEvent2);
// The old reservation should still be there...
assertEquals(1024, scheduler.rootMetrics.getReservedMB());
// ... but it should disappear when we update the first node.
scheduler.handle(updateEvent);
assertEquals(0, scheduler.rootMetrics.getReservedMB());
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent in project hadoop by apache.
the class TestFairScheduler method testDynamicMaxAssign.
/**
* Test to verify the behavior of dynamic-max-assign.
* 1. Verify the value of maxassign doesn't affect number of containers
* affected.
* 2. Verify the node is fully allocated.
*/
@Test(timeout = 3000)
public void testDynamicMaxAssign() throws Exception {
conf.setBoolean(FairSchedulerConfiguration.ASSIGN_MULTIPLE, true);
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf, resourceManager.getRMContext());
RMNode node = MockNodes.newNodeInfo(1, Resources.createResource(8192, 8), 0, "127.0.0.1");
NodeAddedSchedulerEvent nodeEvent = new NodeAddedSchedulerEvent(node);
NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node);
scheduler.handle(nodeEvent);
ApplicationAttemptId attId = createSchedulingRequest(1024, 1, "root.default", "user", 12);
FSAppAttempt app = scheduler.getSchedulerApp(attId);
// Set maxassign to a value smaller than half the remaining resources
scheduler.maxAssign = 2;
scheduler.update();
scheduler.handle(updateEvent);
// New container allocations should be floor(8/2) + 1 = 5
assertEquals("Incorrect number of containers allocated", 5, app.getLiveContainers().size());
// Set maxassign to a value larger than half the remaining resources
scheduler.maxAssign = 4;
scheduler.update();
scheduler.handle(updateEvent);
// New container allocations should be floor(3/2) + 1 = 2
assertEquals("Incorrect number of containers allocated", 7, app.getLiveContainers().size());
scheduler.update();
scheduler.handle(updateEvent);
// New container allocations should be 1
assertEquals("Incorrect number of containers allocated", 8, app.getLiveContainers().size());
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent in project hadoop by apache.
the class TestFairSchedulerFairShare method createClusterWithQueuesAndOneNode.
private void createClusterWithQueuesAndOneNode(int mem, int vCores, String policy) throws IOException {
PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE));
out.println("<?xml version=\"1.0\"?>");
out.println("<allocations>");
out.println("<queue name=\"root\" >");
out.println(" <queue name=\"parentA\" >");
out.println(" <weight>8</weight>");
out.println(" <queue name=\"childA1\" />");
out.println(" <queue name=\"childA2\" />");
out.println(" <queue name=\"childA3\" />");
out.println(" <queue name=\"childA4\" />");
out.println(" </queue>");
out.println(" <queue name=\"parentB\" >");
out.println(" <weight>1</weight>");
out.println(" <queue name=\"childB1\" />");
out.println(" <queue name=\"childB2\" />");
out.println(" </queue>");
out.println("</queue>");
out.println("<defaultQueueSchedulingPolicy>" + policy + "</defaultQueueSchedulingPolicy>");
out.println("</allocations>");
out.close();
resourceManager = new MockRM(conf);
resourceManager.start();
scheduler = (FairScheduler) resourceManager.getResourceScheduler();
RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(mem, vCores), 1, "127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent in project hadoop by apache.
the class TestSchedulingUpdate method testSchedulingUpdateOnNodeJoinLeave.
@Test(timeout = 3000)
public void testSchedulingUpdateOnNodeJoinLeave() throws InterruptedException {
verifyNoCalls();
// Add one node
String host = "127.0.0.1";
final int memory = 4096;
final int cores = 4;
RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(memory, cores), 1, host);
NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
long expectedCalls = 1;
verifyExpectedCalls(expectedCalls, memory, cores);
// Remove the node
NodeRemovedSchedulerEvent nodeEvent2 = new NodeRemovedSchedulerEvent(node1);
scheduler.handle(nodeEvent2);
expectedCalls = 2;
verifyExpectedCalls(expectedCalls, 0, 0);
}
Aggregations