Search in sources :

Example 21 with ResourceRequest

use of org.apache.hadoop.yarn.api.records.ResourceRequest in project hadoop by apache.

the class TestFifoScheduler method testNodeLocalAssignment.

@Test(timeout = 2000)
public void testNodeLocalAssignment() throws Exception {
    AsyncDispatcher dispatcher = new InlineDispatcher();
    Configuration conf = new Configuration();
    RMContainerTokenSecretManager containerTokenSecretManager = new RMContainerTokenSecretManager(conf);
    containerTokenSecretManager.rollMasterKey();
    NMTokenSecretManagerInRM nmTokenSecretManager = new NMTokenSecretManagerInRM(conf);
    nmTokenSecretManager.rollMasterKey();
    RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class);
    FifoScheduler scheduler = new FifoScheduler();
    RMContext rmContext = new RMContextImpl(dispatcher, null, null, null, null, null, containerTokenSecretManager, nmTokenSecretManager, null, scheduler);
    rmContext.setSystemMetricsPublisher(mock(SystemMetricsPublisher.class));
    rmContext.setRMApplicationHistoryWriter(mock(RMApplicationHistoryWriter.class));
    ((RMContextImpl) rmContext).setYarnConfiguration(new YarnConfiguration());
    scheduler.setRMContext(rmContext);
    scheduler.init(conf);
    scheduler.start();
    scheduler.reinitialize(new Configuration(), rmContext);
    RMNode node0 = MockNodes.newNodeInfo(1, Resources.createResource(1024 * 64), 1, "127.0.0.1");
    NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node0);
    scheduler.handle(nodeEvent1);
    int _appId = 1;
    int _appAttemptId = 1;
    ApplicationAttemptId appAttemptId = createAppAttemptId(_appId, _appAttemptId);
    createMockRMApp(appAttemptId, rmContext);
    AppAddedSchedulerEvent appEvent = new AppAddedSchedulerEvent(appAttemptId.getApplicationId(), "queue1", "user1");
    scheduler.handle(appEvent);
    AppAttemptAddedSchedulerEvent attemptEvent = new AppAttemptAddedSchedulerEvent(appAttemptId, false);
    scheduler.handle(attemptEvent);
    int memory = 64;
    int nConts = 3;
    int priority = 20;
    List<ResourceRequest> ask = new ArrayList<ResourceRequest>();
    ResourceRequest nodeLocal = createResourceRequest(memory, node0.getHostName(), priority, nConts);
    ResourceRequest rackLocal = createResourceRequest(memory, node0.getRackName(), priority, nConts);
    ResourceRequest any = createResourceRequest(memory, ResourceRequest.ANY, priority, nConts);
    ask.add(nodeLocal);
    ask.add(rackLocal);
    ask.add(any);
    scheduler.allocate(appAttemptId, ask, new ArrayList<ContainerId>(), null, null, NULL_UPDATE_REQUESTS);
    NodeUpdateSchedulerEvent node0Update = new NodeUpdateSchedulerEvent(node0);
    // Before the node update event, there are 3 local requests outstanding
    Assert.assertEquals(3, nodeLocal.getNumContainers());
    scheduler.handle(node0Update);
    // After the node update event, check that there are no more local requests
    // outstanding
    Assert.assertEquals(0, nodeLocal.getNumContainers());
    //Also check that the containers were scheduled
    SchedulerAppReport info = scheduler.getSchedulerAppInfo(appAttemptId);
    Assert.assertEquals(3, info.getLiveContainers().size());
    scheduler.stop();
}
Also used : RMContext(org.apache.hadoop.yarn.server.resourcemanager.RMContext) NodeUpdateSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent) NodeAddedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent) CapacitySchedulerConfiguration(org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) AppAddedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent) ArrayList(java.util.ArrayList) InlineDispatcher(org.apache.hadoop.yarn.event.InlineDispatcher) RMApplicationHistoryWriter(org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) NMTokenSecretManagerInRM(org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM) RMNode(org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode) AsyncDispatcher(org.apache.hadoop.yarn.event.AsyncDispatcher) SystemMetricsPublisher(org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) RMContainerTokenSecretManager(org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager) AppAttemptAddedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptAddedSchedulerEvent) UpdateNodeResourceRequest(org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceRequest) ResourceRequest(org.apache.hadoop.yarn.api.records.ResourceRequest) RMContextImpl(org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl) SchedulerAppReport(org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerAppReport) Test(org.junit.Test)

Example 22 with ResourceRequest

use of org.apache.hadoop.yarn.api.records.ResourceRequest in project hadoop by apache.

the class TestFifoScheduler method testAllocateContainerOnNodeWithoutOffSwitchSpecified.

@Test(timeout = 60000)
public void testAllocateContainerOnNodeWithoutOffSwitchSpecified() throws Exception {
    Logger rootLogger = LogManager.getRootLogger();
    rootLogger.setLevel(Level.DEBUG);
    MockRM rm = new MockRM(conf);
    rm.start();
    MockNM nm1 = rm.registerNode("127.0.0.1:1234", 6 * GB);
    RMApp app1 = rm.submitApp(2048);
    // kick the scheduling, 2 GB given to AM1, remaining 4GB on nm1
    nm1.nodeHeartbeat(true);
    RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
    MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
    am1.registerAppAttempt();
    // add request for containers
    List<ResourceRequest> requests = new ArrayList<ResourceRequest>();
    requests.add(am1.createResourceReq("127.0.0.1", 1 * GB, 1, 1));
    requests.add(am1.createResourceReq("/default-rack", 1 * GB, 1, 1));
    // send the request
    am1.allocate(requests, null);
    try {
        // kick the schedule
        nm1.nodeHeartbeat(true);
    } catch (NullPointerException e) {
        Assert.fail("NPE when allocating container on node but " + "forget to set off-switch request should be handled");
    }
    rm.stop();
}
Also used : RMApp(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp) RMAppAttempt(org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt) MockNM(org.apache.hadoop.yarn.server.resourcemanager.MockNM) ArrayList(java.util.ArrayList) MockAM(org.apache.hadoop.yarn.server.resourcemanager.MockAM) MockRM(org.apache.hadoop.yarn.server.resourcemanager.MockRM) UpdateNodeResourceRequest(org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceRequest) ResourceRequest(org.apache.hadoop.yarn.api.records.ResourceRequest) Logger(org.apache.log4j.Logger) Test(org.junit.Test)

Example 23 with ResourceRequest

use of org.apache.hadoop.yarn.api.records.ResourceRequest in project hadoop by apache.

the class TestAMRestart method testAMRestartNotLostContainerCompleteMsg.

@Test(timeout = 30000)
public void testAMRestartNotLostContainerCompleteMsg() throws Exception {
    YarnConfiguration conf = new YarnConfiguration();
    conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
    MockRM rm1 = new MockRM(conf);
    rm1.start();
    RMApp app1 = rm1.submitApp(200, "name", "user", new HashMap<ApplicationAccessType, String>(), false, "default", -1, null, "MAPREDUCE", false, true);
    MockNM nm1 = new MockNM("127.0.0.1:1234", 10240, rm1.getResourceTrackerService());
    nm1.registerNode();
    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
    allocateContainers(nm1, am1, 1);
    nm1.nodeHeartbeat(am1.getApplicationAttemptId(), 2, ContainerState.RUNNING);
    ContainerId containerId2 = ContainerId.newContainerId(am1.getApplicationAttemptId(), 2);
    rm1.waitForState(nm1, containerId2, RMContainerState.RUNNING);
    // container complete
    nm1.nodeHeartbeat(am1.getApplicationAttemptId(), 2, ContainerState.COMPLETE);
    rm1.waitForState(nm1, containerId2, RMContainerState.COMPLETED);
    // before this msg pass to AM, AM may crash
    while (true) {
        AllocateResponse response = am1.allocate(new ArrayList<ResourceRequest>(), new ArrayList<ContainerId>());
        List<ContainerStatus> containerStatuses = response.getCompletedContainersStatuses();
        if (isContainerIdInContainerStatus(containerStatuses, containerId2) == false) {
            Thread.sleep(100);
            continue;
        }
        // is containerId still in justFinishedContainer?
        containerStatuses = app1.getCurrentAppAttempt().getJustFinishedContainers();
        if (isContainerIdInContainerStatus(containerStatuses, containerId2)) {
            Assert.fail();
        }
        break;
    }
    // fail the AM by sending CONTAINER_FINISHED event without registering.
    nm1.nodeHeartbeat(am1.getApplicationAttemptId(), 1, ContainerState.COMPLETE);
    rm1.waitForState(am1.getApplicationAttemptId(), RMAppAttemptState.FAILED);
    // wait for app to start a new attempt.
    rm1.waitForState(app1.getApplicationId(), RMAppState.ACCEPTED);
    // assert this is a new AM.
    ApplicationAttemptId newAttemptId = app1.getCurrentAppAttempt().getAppAttemptId();
    Assert.assertFalse(newAttemptId.equals(am1.getApplicationAttemptId()));
    // launch the new AM
    RMAppAttempt attempt2 = app1.getCurrentAppAttempt();
    MockAM am2 = rm1.launchAndRegisterAM(app1, rm1, nm1);
    // whether new AM could get container complete msg
    AllocateResponse allocateResponse = am2.allocate(new ArrayList<ResourceRequest>(), new ArrayList<ContainerId>());
    List<ContainerStatus> containerStatuses = allocateResponse.getCompletedContainersStatuses();
    if (isContainerIdInContainerStatus(containerStatuses, containerId2) == false) {
        Assert.fail();
    }
    containerStatuses = attempt2.getJustFinishedContainers();
    if (isContainerIdInContainerStatus(containerStatuses, containerId2)) {
        Assert.fail();
    }
    // the second allocate should not get container complete msg
    allocateResponse = am2.allocate(new ArrayList<ResourceRequest>(), new ArrayList<ContainerId>());
    containerStatuses = allocateResponse.getCompletedContainersStatuses();
    if (isContainerIdInContainerStatus(containerStatuses, containerId2)) {
        Assert.fail();
    }
    rm1.stop();
}
Also used : RMApp(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp) RMAppAttempt(org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt) MockNM(org.apache.hadoop.yarn.server.resourcemanager.MockNM) ArrayList(java.util.ArrayList) MockRM(org.apache.hadoop.yarn.server.resourcemanager.MockRM) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) AllocateResponse(org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse) NMContainerStatus(org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus) ContainerStatus(org.apache.hadoop.yarn.api.records.ContainerStatus) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) ApplicationAccessType(org.apache.hadoop.yarn.api.records.ApplicationAccessType) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) MockAM(org.apache.hadoop.yarn.server.resourcemanager.MockAM) ResourceRequest(org.apache.hadoop.yarn.api.records.ResourceRequest) Test(org.junit.Test)

Example 24 with ResourceRequest

use of org.apache.hadoop.yarn.api.records.ResourceRequest in project hadoop by apache.

the class TestCapacityScheduler method testApplicationHeadRoom.

// Verifies headroom passed to ApplicationMaster has been updated in
// RMAppAttemptMetrics
@Test
public void testApplicationHeadRoom() throws Exception {
    Configuration conf = new Configuration();
    conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, ResourceScheduler.class);
    MockRM rm = new MockRM(conf);
    rm.start();
    CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
    ApplicationId appId = BuilderUtils.newApplicationId(100, 1);
    ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(appId, 1);
    RMAppAttemptMetrics attemptMetric = new RMAppAttemptMetrics(appAttemptId, rm.getRMContext());
    RMAppImpl app = mock(RMAppImpl.class);
    when(app.getApplicationId()).thenReturn(appId);
    RMAppAttemptImpl attempt = mock(RMAppAttemptImpl.class);
    Container container = mock(Container.class);
    when(attempt.getMasterContainer()).thenReturn(container);
    ApplicationSubmissionContext submissionContext = mock(ApplicationSubmissionContext.class);
    when(attempt.getSubmissionContext()).thenReturn(submissionContext);
    when(attempt.getAppAttemptId()).thenReturn(appAttemptId);
    when(attempt.getRMAppAttemptMetrics()).thenReturn(attemptMetric);
    when(app.getCurrentAppAttempt()).thenReturn(attempt);
    rm.getRMContext().getRMApps().put(appId, app);
    SchedulerEvent addAppEvent = new AppAddedSchedulerEvent(appId, "default", "user");
    cs.handle(addAppEvent);
    SchedulerEvent addAttemptEvent = new AppAttemptAddedSchedulerEvent(appAttemptId, false);
    cs.handle(addAttemptEvent);
    Allocation allocate = cs.allocate(appAttemptId, Collections.<ResourceRequest>emptyList(), Collections.<ContainerId>emptyList(), null, null, NULL_UPDATE_REQUESTS);
    Assert.assertNotNull(attempt);
    Assert.assertEquals(Resource.newInstance(0, 0), allocate.getResourceLimit());
    Assert.assertEquals(Resource.newInstance(0, 0), attemptMetric.getApplicationAttemptHeadroom());
    // Add a node to cluster
    Resource newResource = Resource.newInstance(4 * GB, 1);
    RMNode node = MockNodes.newNodeInfo(0, newResource, 1, "127.0.0.1");
    cs.handle(new NodeAddedSchedulerEvent(node));
    allocate = cs.allocate(appAttemptId, Collections.<ResourceRequest>emptyList(), Collections.<ContainerId>emptyList(), null, null, NULL_UPDATE_REQUESTS);
    // All resources should be sent as headroom
    Assert.assertEquals(newResource, allocate.getResourceLimit());
    Assert.assertEquals(newResource, attemptMetric.getApplicationAttemptHeadroom());
    rm.stop();
}
Also used : RMAppImpl(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl) RMAppAttemptMetrics(org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptMetrics) NodeAddedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) AppAddedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent) Resource(org.apache.hadoop.yarn.api.records.Resource) MockRM(org.apache.hadoop.yarn.server.resourcemanager.MockRM) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) NodeAddedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent) AppAddedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent) SchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent) NodeRemovedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent) AppAttemptRemovedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptRemovedSchedulerEvent) ContainerExpiredSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.ContainerExpiredSchedulerEvent) AppAttemptAddedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptAddedSchedulerEvent) NodeUpdateSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent) RMContainer(org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer) Container(org.apache.hadoop.yarn.api.records.Container) RMNode(org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode) Allocation(org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) RMAppAttemptImpl(org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl) ApplicationSubmissionContext(org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext) AppAttemptAddedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptAddedSchedulerEvent) ResourceRequest(org.apache.hadoop.yarn.api.records.ResourceRequest) UpdateNodeResourceRequest(org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceRequest) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) Test(org.junit.Test)

Example 25 with ResourceRequest

use of org.apache.hadoop.yarn.api.records.ResourceRequest in project hadoop by apache.

the class TestApplicationLimitsByPartition method testHeadroom.

@Test
public void testHeadroom() throws Exception {
    /*
     * Test Case: Verify Headroom calculated is sum of headrooms for each
     * partition requested. So submit a app with requests for default partition
     * and 'x' partition, so the total headroom for the user should be sum of
     * the head room for both labels.
     */
    simpleNodeLabelMappingToManager();
    CapacitySchedulerConfiguration csConf = (CapacitySchedulerConfiguration) TestUtils.getComplexConfigurationWithQueueLabels(conf);
    final String A1 = CapacitySchedulerConfiguration.ROOT + ".a" + ".a1";
    final String B2 = CapacitySchedulerConfiguration.ROOT + ".b" + ".b2";
    csConf.setUserLimit(A1, 25);
    csConf.setUserLimit(B2, 25);
    YarnConfiguration conf = new YarnConfiguration();
    CapacitySchedulerContext csContext = mock(CapacitySchedulerContext.class);
    when(csContext.getConfiguration()).thenReturn(csConf);
    when(csContext.getConf()).thenReturn(conf);
    when(csContext.getMinimumResourceCapability()).thenReturn(Resources.createResource(GB));
    when(csContext.getMaximumResourceCapability()).thenReturn(Resources.createResource(16 * GB));
    when(csContext.getResourceCalculator()).thenReturn(resourceCalculator);
    RMContext rmContext = TestUtils.getMockRMContext();
    RMContext spyRMContext = spy(rmContext);
    when(spyRMContext.getNodeLabelManager()).thenReturn(mgr);
    when(csContext.getRMContext()).thenReturn(spyRMContext);
    mgr.activateNode(NodeId.newInstance("h0", 0), // default Label
    Resource.newInstance(160 * GB, 16));
    mgr.activateNode(NodeId.newInstance("h1", 0), // label x
    Resource.newInstance(160 * GB, 16));
    mgr.activateNode(NodeId.newInstance("h2", 0), // label y
    Resource.newInstance(160 * GB, 16));
    // Say cluster has 100 nodes of 16G each
    Resource clusterResource = Resources.createResource(160 * GB);
    when(csContext.getClusterResource()).thenReturn(clusterResource);
    Map<String, CSQueue> queues = new HashMap<String, CSQueue>();
    CSQueue rootQueue = CapacitySchedulerQueueManager.parseQueue(csContext, csConf, null, "root", queues, queues, TestUtils.spyHook);
    ResourceUsage queueResUsage = rootQueue.getQueueResourceUsage();
    when(csContext.getClusterResourceUsage()).thenReturn(queueResUsage);
    // Manipulate queue 'a'
    LeafQueue queue = TestLeafQueue.stubLeafQueue((LeafQueue) queues.get("b2"));
    queue.updateClusterResource(clusterResource, new ResourceLimits(clusterResource));
    String rack_0 = "rack_0";
    FiCaSchedulerNode node_0 = TestUtils.getMockNode("h0", rack_0, 0, 160 * GB);
    FiCaSchedulerNode node_1 = TestUtils.getMockNode("h1", rack_0, 0, 160 * GB);
    final String user_0 = "user_0";
    final String user_1 = "user_1";
    RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
    ConcurrentMap<ApplicationId, RMApp> spyApps = spy(new ConcurrentHashMap<ApplicationId, RMApp>());
    RMApp rmApp = mock(RMApp.class);
    ResourceRequest amResourceRequest = mock(ResourceRequest.class);
    Resource amResource = Resources.createResource(0, 0);
    when(amResourceRequest.getCapability()).thenReturn(amResource);
    when(rmApp.getAMResourceRequest()).thenReturn(amResourceRequest);
    Mockito.doReturn(rmApp).when(spyApps).get((ApplicationId) Matchers.any());
    when(spyRMContext.getRMApps()).thenReturn(spyApps);
    RMAppAttempt rmAppAttempt = mock(RMAppAttempt.class);
    when(rmApp.getRMAppAttempt((ApplicationAttemptId) Matchers.any())).thenReturn(rmAppAttempt);
    when(rmApp.getCurrentAppAttempt()).thenReturn(rmAppAttempt);
    Mockito.doReturn(rmApp).when(spyApps).get((ApplicationId) Matchers.any());
    Mockito.doReturn(true).when(spyApps).containsKey((ApplicationId) Matchers.any());
    Priority priority_1 = TestUtils.createMockPriority(1);
    // Submit first application with some resource-requests from user_0,
    // and check headroom
    final ApplicationAttemptId appAttemptId_0_0 = TestUtils.getMockApplicationAttemptId(0, 0);
    FiCaSchedulerApp app_0_0 = new FiCaSchedulerApp(appAttemptId_0_0, user_0, queue, queue.getAbstractUsersManager(), spyRMContext);
    queue.submitApplicationAttempt(app_0_0, user_0);
    List<ResourceRequest> app_0_0_requests = new ArrayList<ResourceRequest>();
    app_0_0_requests.add(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 2, true, priority_1, recordFactory));
    app_0_0.updateResourceRequests(app_0_0_requests);
    // Schedule to compute
    queue.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    //head room = queue capacity = 50 % 90% 160 GB * 0.25 (UL)
    Resource expectedHeadroom = Resources.createResource((int) (0.5 * 0.9 * 160 * 0.25) * GB, 1);
    assertEquals(expectedHeadroom, app_0_0.getHeadroom());
    // Submit second application from user_0, check headroom
    final ApplicationAttemptId appAttemptId_0_1 = TestUtils.getMockApplicationAttemptId(1, 0);
    FiCaSchedulerApp app_0_1 = new FiCaSchedulerApp(appAttemptId_0_1, user_0, queue, queue.getAbstractUsersManager(), spyRMContext);
    queue.submitApplicationAttempt(app_0_1, user_0);
    List<ResourceRequest> app_0_1_requests = new ArrayList<ResourceRequest>();
    app_0_1_requests.add(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 2, true, priority_1, recordFactory));
    app_0_1.updateResourceRequests(app_0_1_requests);
    app_0_1_requests.clear();
    app_0_1_requests.add(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 2, true, priority_1, recordFactory, "y"));
    app_0_1.updateResourceRequests(app_0_1_requests);
    // Schedule to compute
    queue.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), // Schedule to compute
    SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    queue.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), // Schedule to compute
    SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    // no change
    assertEquals(expectedHeadroom, app_0_0.getHeadroom());
    //head room for default label + head room for y partition
    //head room for y partition = 100% 50%(b queue capacity ) *  160 * GB
    Resource expectedHeadroomWithReqInY = Resources.add(Resources.createResource((int) (0.25 * 0.5 * 160) * GB, 1), expectedHeadroom);
    assertEquals(expectedHeadroomWithReqInY, app_0_1.getHeadroom());
    // Submit first application from user_1, check for new headroom
    final ApplicationAttemptId appAttemptId_1_0 = TestUtils.getMockApplicationAttemptId(2, 0);
    FiCaSchedulerApp app_1_0 = new FiCaSchedulerApp(appAttemptId_1_0, user_1, queue, queue.getAbstractUsersManager(), spyRMContext);
    queue.submitApplicationAttempt(app_1_0, user_1);
    List<ResourceRequest> app_1_0_requests = new ArrayList<ResourceRequest>();
    app_1_0_requests.add(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 2, true, priority_1, recordFactory));
    app_1_0.updateResourceRequests(app_1_0_requests);
    app_1_0_requests.clear();
    app_1_0_requests.add(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 2, true, priority_1, recordFactory, "y"));
    app_1_0.updateResourceRequests(app_1_0_requests);
    // Schedule to compute
    queue.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), // Schedule to compute
    SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    //head room = queue capacity = (50 % 90% 160 GB)/2 (for 2 users)
    expectedHeadroom = Resources.createResource((int) (0.5 * 0.9 * 160 * 0.25) * GB, 1);
    //head room for default label + head room for y partition
    //head room for y partition = 100% 50%(b queue capacity ) *  160 * GB
    expectedHeadroomWithReqInY = Resources.add(Resources.createResource((int) (0.25 * 0.5 * 160) * GB, 1), expectedHeadroom);
    assertEquals(expectedHeadroom, app_0_0.getHeadroom());
    assertEquals(expectedHeadroomWithReqInY, app_0_1.getHeadroom());
    assertEquals(expectedHeadroomWithReqInY, app_1_0.getHeadroom());
}
Also used : RMApp(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp) RMAppAttempt(org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt) HashMap(java.util.HashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ArrayList(java.util.ArrayList) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp) RMContext(org.apache.hadoop.yarn.server.resourcemanager.RMContext) FiCaSchedulerNode(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode) ResourceUsage(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage) Priority(org.apache.hadoop.yarn.api.records.Priority) Resource(org.apache.hadoop.yarn.api.records.Resource) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) RecordFactory(org.apache.hadoop.yarn.factories.RecordFactory) ResourceLimits(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits) ResourceRequest(org.apache.hadoop.yarn.api.records.ResourceRequest) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) Test(org.junit.Test)

Aggregations

ResourceRequest (org.apache.hadoop.yarn.api.records.ResourceRequest)144 Test (org.junit.Test)69 ArrayList (java.util.ArrayList)63 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)61 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)42 Resource (org.apache.hadoop.yarn.api.records.Resource)42 Container (org.apache.hadoop.yarn.api.records.Container)38 RMApp (org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp)35 Priority (org.apache.hadoop.yarn.api.records.Priority)28 RMContainer (org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer)25 RMAppAttempt (org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt)21 RMNode (org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode)20 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)19 NodeAddedSchedulerEvent (org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent)19 AllocateResponse (org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse)17 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)17 HashMap (java.util.HashMap)16 AllocateRequest (org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest)15 NodeId (org.apache.hadoop.yarn.api.records.NodeId)14 MockRM (org.apache.hadoop.yarn.server.resourcemanager.MockRM)14