use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent in project hadoop by apache.
the class TestFairScheduler method testComputeMaxAMResource.
/**
* Test if we compute the maximum AM resource correctly.
*
* @throws IOException if scheduler reinitialization fails
*/
@Test
public void testComputeMaxAMResource() throws IOException {
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);
PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE));
out.println("<?xml version=\"1.0\"?>");
out.println("<allocations>");
out.println("<queue name=\"queueFSZeroWithMax\">");
out.println("<weight>0</weight>");
out.println("<maxAMShare>0.5</maxAMShare>");
out.println("<maxResources>4096 mb 4 vcores</maxResources>");
out.println("</queue>");
out.println("<queue name=\"queueFSZeroWithAVL\">");
out.println("<weight>0.0</weight>");
out.println("<maxAMShare>0.5</maxAMShare>");
out.println("</queue>");
out.println("<queue name=\"queueFSNonZero\">");
out.println("<weight>1</weight>");
out.println("<maxAMShare>0.5</maxAMShare>");
out.println("</queue>");
out.println("<defaultQueueSchedulingPolicy>drf" + "</defaultQueueSchedulingPolicy>");
out.println("</allocations>");
out.close();
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf, resourceManager.getRMContext());
long memCapacity = 20 * GB;
int cpuCapacity = 20;
RMNode node = MockNodes.newNodeInfo(1, Resources.createResource(memCapacity, cpuCapacity), 0, "127.0.0.1");
NodeAddedSchedulerEvent nodeEvent = new NodeAddedSchedulerEvent(node);
NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node);
scheduler.handle(nodeEvent);
scheduler.update();
Resource amResource = Resource.newInstance(1 * GB, 1);
int amPriority = RMAppAttemptImpl.AM_CONTAINER_PRIORITY.getPriority();
// queueFSZeroWithMax
FSLeafQueue queueFSZeroWithMax = scheduler.getQueueManager().getLeafQueue("queueFSZeroWithMax", true);
ApplicationAttemptId attId1 = createAppAttemptId(1, 1);
createApplicationWithAMResource(attId1, "queueFSZeroWithMax", "user1", amResource);
createSchedulingRequestExistingApplication(1 * GB, 1, amPriority, attId1);
scheduler.update();
scheduler.handle(updateEvent);
// queueFSZeroWithMax's weight is 0.0, so its fair share should be 0, we use
// the min(maxShare, available resource) to compute maxAMShare, in this
// case, we use maxShare, since it is smaller than available resource.
assertEquals("QueueFSZeroWithMax's fair share should be zero", 0, queueFSZeroWithMax.getFairShare().getMemorySize());
Resource expectedAMResource = Resources.multiplyAndRoundUp(queueFSZeroWithMax.getMaxShare(), queueFSZeroWithMax.getMaxAMShare());
assertEquals("QueueFSZeroWithMax's maximum AM resource should be " + "maxShare * maxAMShare", expectedAMResource.getMemorySize(), queueFSZeroWithMax.getMetrics().getMaxAMShareMB());
assertEquals("QueueFSZeroWithMax's maximum AM resource should be " + "maxShare * maxAMShare", expectedAMResource.getVirtualCores(), queueFSZeroWithMax.getMetrics().getMaxAMShareVCores());
assertEquals("QueueFSZeroWithMax's AM resource usage should be the same to " + "AM resource request", amResource.getMemorySize(), queueFSZeroWithMax.getMetrics().getAMResourceUsageMB());
// queueFSZeroWithAVL
amResource = Resources.createResource(1 * GB, 1);
FSLeafQueue queueFSZeroWithAVL = scheduler.getQueueManager().getLeafQueue("queueFSZeroWithAVL", true);
ApplicationAttemptId attId2 = createAppAttemptId(2, 1);
createApplicationWithAMResource(attId2, "queueFSZeroWithAVL", "user1", amResource);
createSchedulingRequestExistingApplication(1 * GB, 1, amPriority, attId2);
scheduler.update();
scheduler.handle(updateEvent);
// queueFSZeroWithAVL's weight is 0.0, so its fair share is 0, and we use
// the min(maxShare, available resource) to compute maxAMShare, in this
// case, we use available resource since it is smaller than the
// default maxShare.
expectedAMResource = Resources.multiplyAndRoundUp(Resources.createResource(memCapacity - amResource.getMemorySize(), cpuCapacity - amResource.getVirtualCores()), queueFSZeroWithAVL.getMaxAMShare());
assertEquals("QueueFSZeroWithAVL's fair share should be zero", 0, queueFSZeroWithAVL.getFairShare().getMemorySize());
assertEquals("QueueFSZeroWithAVL's maximum AM resource should be " + " available resource * maxAMShare", expectedAMResource.getMemorySize(), queueFSZeroWithAVL.getMetrics().getMaxAMShareMB());
assertEquals("QueueFSZeroWithAVL's maximum AM resource should be " + " available resource * maxAMShare", expectedAMResource.getVirtualCores(), queueFSZeroWithAVL.getMetrics().getMaxAMShareVCores());
assertEquals("QueueFSZeroWithMax's AM resource usage should be the same to " + "AM resource request", amResource.getMemorySize(), queueFSZeroWithAVL.getMetrics().getAMResourceUsageMB());
// queueFSNonZero
amResource = Resources.createResource(1 * GB, 1);
FSLeafQueue queueFSNonZero = scheduler.getQueueManager().getLeafQueue("queueFSNonZero", true);
ApplicationAttemptId attId3 = createAppAttemptId(3, 1);
createApplicationWithAMResource(attId3, "queueFSNonZero", "user1", amResource);
createSchedulingRequestExistingApplication(1 * GB, 1, amPriority, attId3);
scheduler.update();
scheduler.handle(updateEvent);
// queueFSNonZero's weight is 1, so its fair share is not 0, and we use the
// fair share to compute maxAMShare
assertNotEquals("QueueFSNonZero's fair share shouldn't be zero", 0, queueFSNonZero.getFairShare().getMemorySize());
expectedAMResource = Resources.multiplyAndRoundUp(queueFSNonZero.getFairShare(), queueFSNonZero.getMaxAMShare());
assertEquals("QueueFSNonZero's maximum AM resource should be " + " fair share * maxAMShare", expectedAMResource.getMemorySize(), queueFSNonZero.getMetrics().getMaxAMShareMB());
assertEquals("QueueFSNonZero's maximum AM resource should be " + " fair share * maxAMShare", expectedAMResource.getVirtualCores(), queueFSNonZero.getMetrics().getMaxAMShareVCores());
assertEquals("QueueFSNonZero's AM resource usage should be the same to " + "AM resource request", amResource.getMemorySize(), queueFSNonZero.getMetrics().getAMResourceUsageMB());
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent in project hadoop by apache.
the class TestFairScheduler method testSimpleContainerReservation.
@Test(timeout = 5000)
public void testSimpleContainerReservation() throws Exception {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf, resourceManager.getRMContext());
// Add a node
RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(1024), 1, "127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
// Queue 1 requests full capacity of node
createSchedulingRequest(1024, "queue1", "user1", 1);
scheduler.update();
NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node1);
scheduler.handle(updateEvent);
// Make sure queue 1 is allocated app capacity
assertEquals(1024, scheduler.getQueueManager().getQueue("queue1").getResourceUsage().getMemorySize());
// Now queue 2 requests likewise
ApplicationAttemptId attId = createSchedulingRequest(1024, "queue2", "user1", 1);
scheduler.update();
scheduler.handle(updateEvent);
// Make sure queue 2 is waiting with a reservation
assertEquals(0, scheduler.getQueueManager().getQueue("queue2").getResourceUsage().getMemorySize());
assertEquals(1024, scheduler.getSchedulerApp(attId).getCurrentReservation().getMemorySize());
// Now another node checks in with capacity
RMNode node2 = MockNodes.newNodeInfo(1, Resources.createResource(1024), 2, "127.0.0.2");
NodeAddedSchedulerEvent nodeEvent2 = new NodeAddedSchedulerEvent(node2);
NodeUpdateSchedulerEvent updateEvent2 = new NodeUpdateSchedulerEvent(node2);
scheduler.handle(nodeEvent2);
scheduler.handle(updateEvent2);
// Make sure this goes to queue 2
assertEquals(1024, scheduler.getQueueManager().getQueue("queue2").getResourceUsage().getMemorySize());
// The old reservation should still be there...
assertEquals(1024, scheduler.getSchedulerApp(attId).getCurrentReservation().getMemorySize());
// ... but it should disappear when we update the first node.
scheduler.handle(updateEvent);
assertEquals(0, scheduler.getSchedulerApp(attId).getCurrentReservation().getMemorySize());
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent in project hadoop by apache.
the class TestFairScheduler method testMultipleNodesSingleRackRequest.
@Test(timeout = 5000)
public void testMultipleNodesSingleRackRequest() throws Exception {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf, resourceManager.getRMContext());
RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(1024), 1, "127.0.0.1");
RMNode node2 = MockNodes.newNodeInfo(1, Resources.createResource(1024), 2, "127.0.0.2");
RMNode node3 = MockNodes.newNodeInfo(2, Resources.createResource(1024), 3, "127.0.0.3");
NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
NodeAddedSchedulerEvent nodeEvent2 = new NodeAddedSchedulerEvent(node2);
scheduler.handle(nodeEvent2);
ApplicationAttemptId attemptId = createAppAttemptId(this.APP_ID++, this.ATTEMPT_ID++);
createMockRMApp(attemptId);
scheduler.addApplication(attemptId.getApplicationId(), "queue1", "user1", false);
scheduler.addApplicationAttempt(attemptId, false, false);
// 1 request with 2 nodes on the same rack. another request with 1 node on
// a different rack
List<ResourceRequest> asks = new ArrayList<ResourceRequest>();
asks.add(createResourceRequest(1024, node1.getHostName(), 1, 1, true));
asks.add(createResourceRequest(1024, node2.getHostName(), 1, 1, true));
asks.add(createResourceRequest(1024, node3.getHostName(), 1, 1, true));
asks.add(createResourceRequest(1024, node1.getRackName(), 1, 1, true));
asks.add(createResourceRequest(1024, node3.getRackName(), 1, 1, true));
asks.add(createResourceRequest(1024, ResourceRequest.ANY, 1, 2, true));
scheduler.allocate(attemptId, asks, new ArrayList<ContainerId>(), null, null, NULL_UPDATE_REQUESTS);
// node 1 checks in
scheduler.update();
NodeUpdateSchedulerEvent updateEvent1 = new NodeUpdateSchedulerEvent(node1);
scheduler.handle(updateEvent1);
// should assign node local
assertEquals(1, scheduler.getSchedulerApp(attemptId).getLiveContainers().size());
// node 2 checks in
scheduler.update();
NodeUpdateSchedulerEvent updateEvent2 = new NodeUpdateSchedulerEvent(node2);
scheduler.handle(updateEvent2);
// should assign rack local
assertEquals(2, scheduler.getSchedulerApp(attemptId).getLiveContainers().size());
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent in project hadoop by apache.
the class TestFairScheduler method testFairShareWithLowMaxResources.
/**
* Test fair shares when max resources are set and are low enough to impact
* the shares.
*
* @throws IOException if scheduler reinitialization fails
*/
@Test
public void testFairShareWithLowMaxResources() throws IOException {
PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE));
out.println("<?xml version=\"1.0\"?>");
out.println("<allocations>");
out.println(" <queue name=\"queueA\">");
out.println(" <maxResources>1024 mb 1 vcores</maxResources>");
out.println(" <weight>0.75</weight>");
out.println(" </queue>");
out.println(" <queue name=\"queueB\">");
out.println(" <maxResources>3072 mb 3 vcores</maxResources>");
out.println(" <weight>0.25</weight>");
out.println(" </queue>");
out.println("</allocations>");
out.close();
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf, resourceManager.getRMContext());
// Add one big node (only care about aggregate capacity)
RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(8 * 1024, 8), 1, "127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
ApplicationAttemptId attId1 = createSchedulingRequest(1024, 1, "queueA", "user1", 2);
ApplicationAttemptId attId2 = createSchedulingRequest(1024, 1, "queueB", "user1", 4);
scheduler.update();
FSLeafQueue queue = scheduler.getQueueManager().getLeafQueue("queueA", false);
// queueA's weight is 0.5, so its fair share should be 6GB, but it's
// capped at 1GB.
assertEquals("Queue A did not get its expected fair share", 1 * 1024, queue.getFairShare().getMemorySize());
// queueB's weight is 0.5, so its fair share should be 2GB, but the
// other queue is capped at 1GB, so queueB's share is 7GB,
// capped at 3GB.
queue = scheduler.getQueueManager().getLeafQueue("queueB", false);
assertEquals("Queue B did not get its expected fair share", 3 * 1024, queue.getFairShare().getMemorySize());
NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node1);
scheduler.handle(updateEvent);
scheduler.handle(updateEvent);
scheduler.handle(updateEvent);
scheduler.handle(updateEvent);
scheduler.handle(updateEvent);
scheduler.handle(updateEvent);
// App 1 should be running with 1 container
assertEquals("App 1 is not running with the correct number of containers", 1, scheduler.getSchedulerApp(attId1).getLiveContainers().size());
// App 2 should be running with 3 containers
assertEquals("App 2 is not running with the correct number of containers", 3, scheduler.getSchedulerApp(attId2).getLiveContainers().size());
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent in project hadoop by apache.
the class TestFairScheduler method testFifoWithinQueue.
@Test(timeout = 5000)
public void testFifoWithinQueue() throws Exception {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf, resourceManager.getRMContext());
RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(3072, 3), 1, "127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
// Even if submitted at exact same time, apps will be deterministically
// ordered by name.
ApplicationAttemptId attId1 = createSchedulingRequest(1024, "queue1", "user1", 2);
ApplicationAttemptId attId2 = createSchedulingRequest(1024, "queue1", "user1", 2);
FSAppAttempt app1 = scheduler.getSchedulerApp(attId1);
FSAppAttempt app2 = scheduler.getSchedulerApp(attId2);
FSLeafQueue queue1 = scheduler.getQueueManager().getLeafQueue("queue1", true);
queue1.setPolicy(new FifoPolicy());
scheduler.update();
// First two containers should go to app 1, third should go to app 2.
// Because tests set assignmultiple to false, each heartbeat assigns a single
// container.
NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node1);
scheduler.handle(updateEvent);
assertEquals(1, app1.getLiveContainers().size());
assertEquals(0, app2.getLiveContainers().size());
scheduler.handle(updateEvent);
assertEquals(2, app1.getLiveContainers().size());
assertEquals(0, app2.getLiveContainers().size());
scheduler.handle(updateEvent);
assertEquals(2, app1.getLiveContainers().size());
assertEquals(1, app2.getLiveContainers().size());
}
Aggregations