use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent in project hadoop by apache.
the class TestFairScheduler method testSimpleHierarchicalFairShareCalculation.
@Test
public void testSimpleHierarchicalFairShareCalculation() throws IOException {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf, resourceManager.getRMContext());
// Add one big node (only care about aggregate capacity)
int capacity = 10 * 24;
RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(capacity), 1, "127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
// Have two queues which want entire cluster capacity
createSchedulingRequest(10 * 1024, "parent.queue2", "user1");
createSchedulingRequest(10 * 1024, "parent.queue3", "user1");
createSchedulingRequest(10 * 1024, "root.default", "user1");
scheduler.update();
scheduler.getQueueManager().getRootQueue().setSteadyFairShare(scheduler.getClusterResource());
scheduler.getQueueManager().getRootQueue().recomputeSteadyShares();
QueueManager queueManager = scheduler.getQueueManager();
Collection<FSLeafQueue> queues = queueManager.getLeafQueues();
assertEquals(3, queues.size());
FSLeafQueue queue1 = queueManager.getLeafQueue("default", true);
FSLeafQueue queue2 = queueManager.getLeafQueue("parent.queue2", true);
FSLeafQueue queue3 = queueManager.getLeafQueue("parent.queue3", true);
assertEquals(capacity / 2, queue1.getFairShare().getMemorySize());
assertEquals(capacity / 2, queue1.getMetrics().getFairShareMB());
assertEquals(capacity / 2, queue1.getSteadyFairShare().getMemorySize());
assertEquals(capacity / 2, queue1.getMetrics().getSteadyFairShareMB());
assertEquals(capacity / 4, queue2.getFairShare().getMemorySize());
assertEquals(capacity / 4, queue2.getMetrics().getFairShareMB());
assertEquals(capacity / 4, queue2.getSteadyFairShare().getMemorySize());
assertEquals(capacity / 4, queue2.getMetrics().getSteadyFairShareMB());
assertEquals(capacity / 4, queue3.getFairShare().getMemorySize());
assertEquals(capacity / 4, queue3.getMetrics().getFairShareMB());
assertEquals(capacity / 4, queue3.getSteadyFairShare().getMemorySize());
assertEquals(capacity / 4, queue3.getMetrics().getSteadyFairShareMB());
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent in project hadoop by apache.
the class TestFairScheduler method testFairShareWithHighMaxResources.
/**
* Test fair shares when max resources are set but are too high to impact
* the shares.
*
* @throws IOException if scheduler reinitialization fails
*/
@Test
public void testFairShareWithHighMaxResources() throws IOException {
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);
// set queueA and queueB maxResources,
// the sum of queueA and queueB maxResources is more than
// Integer.MAX_VALUE.
PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE));
out.println("<?xml version=\"1.0\"?>");
out.println("<allocations>");
out.println("<queue name=\"queueA\">");
out.println("<maxResources>1073741824 mb 1000 vcores</maxResources>");
out.println("<weight>.25</weight>");
out.println("</queue>");
out.println("<queue name=\"queueB\">");
out.println("<maxResources>1073741824 mb 1000 vcores</maxResources>");
out.println("<weight>.75</weight>");
out.println("</queue>");
out.println("</allocations>");
out.close();
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf, resourceManager.getRMContext());
// Add one big node (only care about aggregate capacity)
RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(8 * 1024, 8), 1, "127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
// Queue A wants 1 * 1024.
createSchedulingRequest(1 * 1024, "queueA", "user1");
// Queue B wants 6 * 1024
createSchedulingRequest(6 * 1024, "queueB", "user1");
scheduler.update();
FSLeafQueue queue = scheduler.getQueueManager().getLeafQueue("queueA", false);
// queueA's weight is 0.25, so its fair share should be 2 * 1024.
assertEquals("Queue A did not get its expected fair share", 2 * 1024, queue.getFairShare().getMemorySize());
// queueB's weight is 0.75, so its fair share should be 6 * 1024.
queue = scheduler.getQueueManager().getLeafQueue("queueB", false);
assertEquals("Queue B did not get its expected fair share", 6 * 1024, queue.getFairShare().getMemorySize());
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent in project hadoop by apache.
the class TestFairScheduler method testContainerAllocationWithContainerIdLeap.
@Test(timeout = 120000)
public void testContainerAllocationWithContainerIdLeap() throws Exception {
conf.setFloat(FairSchedulerConfiguration.RESERVABLE_NODES, 0.50f);
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf, resourceManager.getRMContext());
// Add two node
RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(3072, 10), 1, "127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
RMNode node2 = MockNodes.newNodeInfo(1, Resources.createResource(3072, 10), 1, "127.0.0.2");
NodeAddedSchedulerEvent nodeEvent2 = new NodeAddedSchedulerEvent(node2);
scheduler.handle(nodeEvent2);
ApplicationAttemptId app1 = createSchedulingRequest(2048, "queue1", "user1", 2);
scheduler.update();
scheduler.handle(new NodeUpdateSchedulerEvent(node1));
scheduler.handle(new NodeUpdateSchedulerEvent(node2));
ApplicationAttemptId app2 = createSchedulingRequest(2048, "queue1", "user1", 1);
scheduler.update();
scheduler.handle(new NodeUpdateSchedulerEvent(node1));
scheduler.handle(new NodeUpdateSchedulerEvent(node2));
assertEquals(4096, scheduler.getQueueManager().getQueue("queue1").getResourceUsage().getMemorySize());
//container will be reserved at node1
RMContainer reservedContainer1 = scheduler.getSchedulerNode(node1.getNodeID()).getReservedContainer();
assertNotEquals(reservedContainer1, null);
RMContainer reservedContainer2 = scheduler.getSchedulerNode(node2.getNodeID()).getReservedContainer();
assertEquals(reservedContainer2, null);
for (int i = 0; i < 10; i++) {
scheduler.handle(new NodeUpdateSchedulerEvent(node1));
scheduler.handle(new NodeUpdateSchedulerEvent(node2));
}
// release resource
scheduler.handle(new AppAttemptRemovedSchedulerEvent(app1, RMAppAttemptState.KILLED, false));
assertEquals(0, scheduler.getQueueManager().getQueue("queue1").getResourceUsage().getMemorySize());
// container will be allocated at node2
scheduler.handle(new NodeUpdateSchedulerEvent(node2));
assertEquals(scheduler.getSchedulerApp(app2).getLiveContainers().size(), 1);
long maxId = 0;
for (RMContainer container : scheduler.getSchedulerApp(app2).getLiveContainers()) {
assertTrue(container.getContainer().getNodeId().equals(node2.getNodeID()));
if (container.getContainerId().getContainerId() > maxId) {
maxId = container.getContainerId().getContainerId();
}
}
long reservedId = reservedContainer1.getContainerId().getContainerId();
assertEquals(reservedId + 1, maxId);
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent in project hadoop by apache.
the class TestFairScheduler method testFairShareWithMinAlloc.
@Test
public void testFairShareWithMinAlloc() throws Exception {
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);
PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE));
out.println("<?xml version=\"1.0\"?>");
out.println("<allocations>");
out.println("<queue name=\"queueA\">");
out.println("<minResources>1024mb,0vcores</minResources>");
out.println("</queue>");
out.println("<queue name=\"queueB\">");
out.println("<minResources>2048mb,0vcores</minResources>");
out.println("</queue>");
out.println("</allocations>");
out.close();
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf, resourceManager.getRMContext());
// Add one big node (only care about aggregate capacity)
RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(3 * 1024), 1, "127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
createSchedulingRequest(2 * 1024, "queueA", "user1");
createSchedulingRequest(2 * 1024, "queueB", "user1");
scheduler.update();
Collection<FSLeafQueue> queues = scheduler.getQueueManager().getLeafQueues();
assertEquals(3, queues.size());
for (FSLeafQueue p : queues) {
if (p.getName().equals("root.queueA")) {
assertEquals(1024, p.getFairShare().getMemorySize());
} else if (p.getName().equals("root.queueB")) {
assertEquals(2048, p.getFairShare().getMemorySize());
}
}
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent in project hadoop by apache.
the class TestFairScheduler method testFairShareAndWeightsInNestedUserQueueRule.
@Test
public void testFairShareAndWeightsInNestedUserQueueRule() throws Exception {
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);
PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE));
out.println("<?xml version=\"1.0\"?>");
out.println("<allocations>");
out.println("<queue name=\"parentq\" type=\"parent\">");
out.println("<minResources>1024mb,0vcores</minResources>");
out.println("</queue>");
out.println("<queuePlacementPolicy>");
out.println("<rule name=\"nestedUserQueue\">");
out.println(" <rule name=\"specified\" create=\"false\" />");
out.println("</rule>");
out.println("<rule name=\"default\" />");
out.println("</queuePlacementPolicy>");
out.println("</allocations>");
out.close();
RMApp rmApp1 = new MockRMApp(0, 0, RMAppState.NEW);
RMApp rmApp2 = new MockRMApp(1, 1, RMAppState.NEW);
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf, resourceManager.getRMContext());
int capacity = 16 * 1024;
// create node with 16 G
RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(capacity), 1, "127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
// user1,user2 submit their apps to parentq and create user queues
createSchedulingRequest(10 * 1024, "root.parentq", "user1");
createSchedulingRequest(10 * 1024, "root.parentq", "user2");
// user3 submits app in default queue
createSchedulingRequest(10 * 1024, "root.default", "user3");
scheduler.update();
scheduler.getQueueManager().getRootQueue().setSteadyFairShare(scheduler.getClusterResource());
scheduler.getQueueManager().getRootQueue().recomputeSteadyShares();
Collection<FSLeafQueue> leafQueues = scheduler.getQueueManager().getLeafQueues();
for (FSLeafQueue leaf : leafQueues) {
if (leaf.getName().equals("root.parentq.user1") || leaf.getName().equals("root.parentq.user2")) {
// assert that the fair share is 1/4th node1's capacity
assertEquals(capacity / 4, leaf.getFairShare().getMemorySize());
// assert that the steady fair share is 1/4th node1's capacity
assertEquals(capacity / 4, leaf.getSteadyFairShare().getMemorySize());
// assert weights are equal for both the user queues
assertEquals(1.0, leaf.getWeights().getWeight(ResourceType.MEMORY), 0);
}
}
}
Aggregations