Search in sources :

Example 16 with ApplicationAttemptId

use of org.apache.hadoop.yarn.api.records.ApplicationAttemptId in project hadoop by apache.

the class TestFairScheduler method testMultipleContainersWaitingForReservation.

@Test(timeout = 5000)
public void testMultipleContainersWaitingForReservation() throws IOException {
    scheduler.init(conf);
    scheduler.start();
    scheduler.reinitialize(conf, resourceManager.getRMContext());
    // Add a node
    RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(1024), 1, "127.0.0.1");
    NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
    scheduler.handle(nodeEvent1);
    // Request full capacity of node
    createSchedulingRequest(1024, "queue1", "user1", 1);
    scheduler.update();
    NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node1);
    scheduler.handle(updateEvent);
    ApplicationAttemptId attId1 = createSchedulingRequest(1024, "queue2", "user2", 1);
    ApplicationAttemptId attId2 = createSchedulingRequest(1024, "queue3", "user3", 1);
    scheduler.update();
    scheduler.handle(updateEvent);
    // One container should get reservation and the other should get nothing
    assertEquals(1024, scheduler.getSchedulerApp(attId1).getCurrentReservation().getMemorySize());
    assertEquals(0, scheduler.getSchedulerApp(attId2).getCurrentReservation().getMemorySize());
}
Also used : NodeUpdateSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent) RMNode(org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode) NodeAddedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) Test(org.junit.Test)

Example 17 with ApplicationAttemptId

use of org.apache.hadoop.yarn.api.records.ApplicationAttemptId in project hadoop by apache.

the class TestFairScheduler method testChildMaxResources.

/**
   * Test the child max resource settings.
   *
   * @throws IOException if scheduler reinitialization fails
   */
@Test
public void testChildMaxResources() throws IOException {
    PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE));
    out.println("<?xml version=\"1.0\"?>");
    out.println("<allocations>");
    out.println("  <queue name=\"queueA\" type=\"parent\">");
    out.println("    <maxChildResources>2048mb,2vcores</maxChildResources>");
    out.println("  </queue>");
    out.println("</allocations>");
    out.close();
    conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);
    scheduler.init(conf);
    scheduler.start();
    scheduler.reinitialize(conf, resourceManager.getRMContext());
    // Add one big node (only care about aggregate capacity)
    RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(8 * 1024, 8), 1, "127.0.0.1");
    NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
    scheduler.handle(nodeEvent1);
    ApplicationAttemptId attId1 = createSchedulingRequest(1024, 1, "queueA.queueB", "user1", 8);
    ApplicationAttemptId attId2 = createSchedulingRequest(1024, 1, "queueA.queueC", "user1", 8);
    scheduler.update();
    NodeUpdateSchedulerEvent nodeEvent = new NodeUpdateSchedulerEvent(node1);
    scheduler.handle(nodeEvent);
    scheduler.handle(nodeEvent);
    scheduler.handle(nodeEvent);
    scheduler.handle(nodeEvent);
    scheduler.handle(nodeEvent);
    scheduler.handle(nodeEvent);
    scheduler.handle(nodeEvent);
    scheduler.handle(nodeEvent);
    // Apps should be running with 2 containers
    assertEquals("App 1 is not running with the correct number of containers", 2, scheduler.getSchedulerApp(attId1).getLiveContainers().size());
    assertEquals("App 2 is not running with the correct number of containers", 2, scheduler.getSchedulerApp(attId2).getLiveContainers().size());
    out = new PrintWriter(new FileWriter(ALLOC_FILE));
    out.println("<?xml version=\"1.0\"?>");
    out.println("<allocations>");
    out.println("  <queue name=\"queueA\" type=\"parent\">");
    out.println("    <maxChildResources>3072mb,3vcores</maxChildResources>");
    out.println("  </queue>");
    out.println("</allocations>");
    out.close();
    scheduler.reinitialize(conf, resourceManager.getRMContext());
    scheduler.update();
    scheduler.handle(nodeEvent);
    scheduler.handle(nodeEvent);
    scheduler.handle(nodeEvent);
    scheduler.handle(nodeEvent);
    // Apps should be running with 3 containers now
    assertEquals("App 1 is not running with the correct number of containers", 3, scheduler.getSchedulerApp(attId1).getLiveContainers().size());
    assertEquals("App 2 is not running with the correct number of containers", 3, scheduler.getSchedulerApp(attId2).getLiveContainers().size());
    out = new PrintWriter(new FileWriter(ALLOC_FILE));
    out.println("<?xml version=\"1.0\"?>");
    out.println("<allocations>");
    out.println("  <queue name=\"queueA\" type=\"parent\">");
    out.println("    <maxChildResources>1024mb,1vcores</maxChildResources>");
    out.println("  </queue>");
    out.println("</allocations>");
    out.close();
    scheduler.reinitialize(conf, resourceManager.getRMContext());
    scheduler.update();
    scheduler.handle(nodeEvent);
    // Apps still should be running with 3 containers because we don't preempt
    assertEquals("App 1 is not running with the correct number of containers", 3, scheduler.getSchedulerApp(attId1).getLiveContainers().size());
    assertEquals("App 2 is not running with the correct number of containers", 3, scheduler.getSchedulerApp(attId2).getLiveContainers().size());
}
Also used : NodeUpdateSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent) RMNode(org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode) NodeAddedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent) FileWriter(java.io.FileWriter) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) PrintWriter(java.io.PrintWriter) Test(org.junit.Test)

Example 18 with ApplicationAttemptId

use of org.apache.hadoop.yarn.api.records.ApplicationAttemptId in project hadoop by apache.

the class TestFairScheduler method testReservationThresholdWithAssignMultiple.

@Test(timeout = 5000)
public void testReservationThresholdWithAssignMultiple() throws Exception {
    // set reservable-nodes to 0 which make reservation exceed
    conf.setFloat(FairSchedulerConfiguration.RESERVABLE_NODES, 0f);
    conf.setBoolean(FairSchedulerConfiguration.ASSIGN_MULTIPLE, true);
    conf.setBoolean(FairSchedulerConfiguration.DYNAMIC_MAX_ASSIGN, false);
    scheduler.init(conf);
    scheduler.start();
    scheduler.reinitialize(conf, resourceManager.getRMContext());
    // Add two node
    RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(4096, 4), 1, "127.0.0.1");
    NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
    scheduler.handle(nodeEvent1);
    RMNode node2 = MockNodes.newNodeInfo(2, Resources.createResource(4096, 4), 1, "127.0.0.2");
    NodeAddedSchedulerEvent nodeEvent2 = new NodeAddedSchedulerEvent(node2);
    scheduler.handle(nodeEvent2);
    //create one request and assign containers
    ApplicationAttemptId attId = createSchedulingRequest(1024, "queue1", "user1", 10);
    scheduler.update();
    scheduler.handle(new NodeUpdateSchedulerEvent(node1));
    scheduler.update();
    scheduler.handle(new NodeUpdateSchedulerEvent(node2));
    // Verify capacity allocation
    assertEquals(8192, scheduler.getQueueManager().getQueue("queue1").getResourceUsage().getMemorySize());
    // Verify number of reservations have decremented
    assertEquals(0, scheduler.getSchedulerApp(attId).getNumReservations(null, true));
}
Also used : NodeUpdateSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent) RMNode(org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode) NodeAddedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) Test(org.junit.Test)

Example 19 with ApplicationAttemptId

use of org.apache.hadoop.yarn.api.records.ApplicationAttemptId in project hadoop by apache.

the class TestFairScheduler method testBlacklistNodes.

@Test
public void testBlacklistNodes() throws Exception {
    scheduler.init(conf);
    scheduler.start();
    scheduler.reinitialize(conf, resourceManager.getRMContext());
    final int GB = 1024;
    String host = "127.0.0.1";
    RMNode node = MockNodes.newNodeInfo(1, Resources.createResource(16 * GB, 16), 0, host);
    NodeAddedSchedulerEvent nodeEvent = new NodeAddedSchedulerEvent(node);
    NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node);
    scheduler.handle(nodeEvent);
    ApplicationAttemptId appAttemptId = createSchedulingRequest(GB, "root.default", "user", 1);
    FSAppAttempt app = scheduler.getSchedulerApp(appAttemptId);
    // Verify the blacklist can be updated independent of requesting containers
    scheduler.allocate(appAttemptId, Collections.<ResourceRequest>emptyList(), Collections.<ContainerId>emptyList(), Collections.singletonList(host), null, NULL_UPDATE_REQUESTS);
    assertTrue(app.isPlaceBlacklisted(host));
    scheduler.allocate(appAttemptId, Collections.<ResourceRequest>emptyList(), Collections.<ContainerId>emptyList(), null, Collections.singletonList(host), NULL_UPDATE_REQUESTS);
    assertFalse(scheduler.getSchedulerApp(appAttemptId).isPlaceBlacklisted(host));
    List<ResourceRequest> update = Arrays.asList(createResourceRequest(GB, node.getHostName(), 1, 0, true));
    // Verify a container does not actually get placed on the blacklisted host
    scheduler.allocate(appAttemptId, update, Collections.<ContainerId>emptyList(), Collections.singletonList(host), null, NULL_UPDATE_REQUESTS);
    assertTrue(app.isPlaceBlacklisted(host));
    scheduler.update();
    scheduler.handle(updateEvent);
    assertEquals("Incorrect number of containers allocated", 0, app.getLiveContainers().size());
    // Verify a container gets placed on the empty blacklist
    scheduler.allocate(appAttemptId, update, Collections.<ContainerId>emptyList(), null, Collections.singletonList(host), NULL_UPDATE_REQUESTS);
    assertFalse(app.isPlaceBlacklisted(host));
    createSchedulingRequest(GB, "root.default", "user", 1);
    scheduler.update();
    scheduler.handle(updateEvent);
    assertEquals("Incorrect number of containers allocated", 1, app.getLiveContainers().size());
}
Also used : NodeUpdateSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent) RMNode(org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode) NodeAddedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) ResourceRequest(org.apache.hadoop.yarn.api.records.ResourceRequest) Test(org.junit.Test)

Example 20 with ApplicationAttemptId

use of org.apache.hadoop.yarn.api.records.ApplicationAttemptId in project hadoop by apache.

the class TestFairScheduler method testDecreaseQueueSettingOnTheFlyInternal.

private void testDecreaseQueueSettingOnTheFlyInternal(String allocBefore, String allocAfter) throws Exception {
    // Set max running apps
    conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);
    PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE));
    out.println(allocBefore);
    out.close();
    scheduler.init(conf);
    scheduler.start();
    scheduler.reinitialize(conf, resourceManager.getRMContext());
    // Add a node
    RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(8192, 8), 1, "127.0.0.1");
    NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
    scheduler.handle(nodeEvent1);
    // Request for app 1
    ApplicationAttemptId attId1 = createSchedulingRequest(1024, "queue1", "user1", 1);
    scheduler.update();
    NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node1);
    scheduler.handle(updateEvent);
    // App 1 should be running
    assertEquals(1, scheduler.getSchedulerApp(attId1).getLiveContainers().size());
    ApplicationAttemptId attId2 = createSchedulingRequest(1024, "queue1", "user1", 1);
    scheduler.update();
    scheduler.handle(updateEvent);
    ApplicationAttemptId attId3 = createSchedulingRequest(1024, "queue1", "user1", 1);
    scheduler.update();
    scheduler.handle(updateEvent);
    ApplicationAttemptId attId4 = createSchedulingRequest(1024, "queue1", "user1", 1);
    scheduler.update();
    scheduler.handle(updateEvent);
    // App 2 should be running
    assertEquals(1, scheduler.getSchedulerApp(attId2).getLiveContainers().size());
    // App 3 should be running
    assertEquals(1, scheduler.getSchedulerApp(attId3).getLiveContainers().size());
    // App 4 should not be running
    assertEquals(0, scheduler.getSchedulerApp(attId4).getLiveContainers().size());
    out = new PrintWriter(new FileWriter(ALLOC_FILE));
    out.println(allocAfter);
    out.close();
    scheduler.reinitialize(conf, resourceManager.getRMContext());
    scheduler.update();
    scheduler.handle(updateEvent);
    // App 2 should still be running
    assertEquals(1, scheduler.getSchedulerApp(attId2).getLiveContainers().size());
    scheduler.update();
    scheduler.handle(updateEvent);
    // App 3 should still be running
    assertEquals(1, scheduler.getSchedulerApp(attId3).getLiveContainers().size());
    scheduler.update();
    scheduler.handle(updateEvent);
    // App 4 should not be running
    assertEquals(0, scheduler.getSchedulerApp(attId4).getLiveContainers().size());
    // Now remove app 1
    AppAttemptRemovedSchedulerEvent appRemovedEvent1 = new AppAttemptRemovedSchedulerEvent(attId1, RMAppAttemptState.FINISHED, false);
    scheduler.handle(appRemovedEvent1);
    scheduler.update();
    scheduler.handle(updateEvent);
    // App 4 should not be running
    assertEquals(0, scheduler.getSchedulerApp(attId4).getLiveContainers().size());
    // Now remove app 2
    appRemovedEvent1 = new AppAttemptRemovedSchedulerEvent(attId2, RMAppAttemptState.FINISHED, false);
    scheduler.handle(appRemovedEvent1);
    scheduler.update();
    scheduler.handle(updateEvent);
    // App 4 should not be running
    assertEquals(0, scheduler.getSchedulerApp(attId4).getLiveContainers().size());
    // Now remove app 3
    appRemovedEvent1 = new AppAttemptRemovedSchedulerEvent(attId3, RMAppAttemptState.FINISHED, false);
    scheduler.handle(appRemovedEvent1);
    scheduler.update();
    scheduler.handle(updateEvent);
    // App 4 should be running now
    assertEquals(1, scheduler.getSchedulerApp(attId4).getLiveContainers().size());
}
Also used : NodeUpdateSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent) RMNode(org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode) NodeAddedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent) FileWriter(java.io.FileWriter) AppAttemptRemovedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptRemovedSchedulerEvent) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) PrintWriter(java.io.PrintWriter)

Aggregations

ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)514 Test (org.junit.Test)362 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)222 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)170 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)109 RMApp (org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp)104 Configuration (org.apache.hadoop.conf.Configuration)87 Resource (org.apache.hadoop.yarn.api.records.Resource)82 ArrayList (java.util.ArrayList)75 NodeId (org.apache.hadoop.yarn.api.records.NodeId)74 NodeAddedSchedulerEvent (org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent)65 RMNode (org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode)63 NodeUpdateSchedulerEvent (org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent)60 Path (org.apache.hadoop.fs.Path)55 JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)53 Priority (org.apache.hadoop.yarn.api.records.Priority)52 RMContainer (org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer)51 Container (org.apache.hadoop.yarn.api.records.Container)50 FiCaSchedulerApp (org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp)49 HashMap (java.util.HashMap)42