Search in sources :

Example 11 with RMAppImpl

use of org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl in project hadoop by apache.

the class TestRMAppLogAggregationStatus method testGetLogAggregationStatusForAppReport.

@Test(timeout = 10000)
public void testGetLogAggregationStatusForAppReport() {
    YarnConfiguration conf = new YarnConfiguration();
    // Disable the log aggregation
    conf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, false);
    RMAppImpl rmApp = (RMAppImpl) createRMApp(conf);
    // The log aggregation status should be DISABLED.
    Assert.assertEquals(LogAggregationStatus.DISABLED, rmApp.getLogAggregationStatusForAppReport());
    // Enable the log aggregation
    conf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, true);
    rmApp = (RMAppImpl) createRMApp(conf);
    // If we do not know any NodeManagers for this application , and
    // the log aggregation is enabled, the log aggregation status will
    // return NOT_START
    Assert.assertEquals(LogAggregationStatus.NOT_START, rmApp.getLogAggregationStatusForAppReport());
    NodeId nodeId1 = NodeId.newInstance("localhost", 1111);
    NodeId nodeId2 = NodeId.newInstance("localhost", 2222);
    NodeId nodeId3 = NodeId.newInstance("localhost", 3333);
    NodeId nodeId4 = NodeId.newInstance("localhost", 4444);
    // If the log aggregation status for all NMs are NOT_START,
    // the log aggregation status for this app will return NOT_START
    rmApp.aggregateLogReport(nodeId1, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.NOT_START, ""));
    rmApp.aggregateLogReport(nodeId2, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.NOT_START, ""));
    rmApp.aggregateLogReport(nodeId3, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.NOT_START, ""));
    rmApp.aggregateLogReport(nodeId4, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.NOT_START, ""));
    Assert.assertEquals(LogAggregationStatus.NOT_START, rmApp.getLogAggregationStatusForAppReport());
    rmApp.aggregateLogReport(nodeId1, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.NOT_START, ""));
    rmApp.aggregateLogReport(nodeId2, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.RUNNING, ""));
    rmApp.aggregateLogReport(nodeId3, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.SUCCEEDED, ""));
    rmApp.aggregateLogReport(nodeId4, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.SUCCEEDED, ""));
    Assert.assertEquals(LogAggregationStatus.RUNNING, rmApp.getLogAggregationStatusForAppReport());
    rmApp.handle(new RMAppEvent(rmApp.getApplicationId(), RMAppEventType.KILL));
    Assert.assertTrue(RMAppImpl.isAppInFinalState(rmApp));
    // If at least of one log aggregation status for one NM is TIME_OUT,
    // others are SUCCEEDED, the log aggregation status for this app will
    // return TIME_OUT
    rmApp.aggregateLogReport(nodeId1, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.SUCCEEDED, ""));
    rmApp.aggregateLogReport(nodeId2, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.TIME_OUT, ""));
    rmApp.aggregateLogReport(nodeId3, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.SUCCEEDED, ""));
    rmApp.aggregateLogReport(nodeId4, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.SUCCEEDED, ""));
    Assert.assertEquals(LogAggregationStatus.TIME_OUT, rmApp.getLogAggregationStatusForAppReport());
    // If the log aggregation status for all NMs are SUCCEEDED and Application
    // is at the final state, the log aggregation status for this app will
    // return SUCCEEDED
    rmApp.aggregateLogReport(nodeId1, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.SUCCEEDED, ""));
    rmApp.aggregateLogReport(nodeId2, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.SUCCEEDED, ""));
    rmApp.aggregateLogReport(nodeId3, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.SUCCEEDED, ""));
    rmApp.aggregateLogReport(nodeId4, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.SUCCEEDED, ""));
    Assert.assertEquals(LogAggregationStatus.SUCCEEDED, rmApp.getLogAggregationStatusForAppReport());
    rmApp = (RMAppImpl) createRMApp(conf);
    // If the log aggregation status for at least one of NMs are RUNNING,
    // the log aggregation status for this app will return RUNNING
    rmApp.aggregateLogReport(nodeId1, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.NOT_START, ""));
    rmApp.aggregateLogReport(nodeId2, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.RUNNING, ""));
    rmApp.aggregateLogReport(nodeId3, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.NOT_START, ""));
    rmApp.aggregateLogReport(nodeId4, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.NOT_START, ""));
    Assert.assertEquals(LogAggregationStatus.RUNNING, rmApp.getLogAggregationStatusForAppReport());
    // If the log aggregation status for at least one of NMs
    // are RUNNING_WITH_FAILURE, the log aggregation status
    // for this app will return RUNNING_WITH_FAILURE
    rmApp.aggregateLogReport(nodeId1, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.NOT_START, ""));
    rmApp.aggregateLogReport(nodeId2, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.RUNNING, ""));
    rmApp.aggregateLogReport(nodeId3, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.NOT_START, ""));
    rmApp.aggregateLogReport(nodeId4, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.RUNNING_WITH_FAILURE, ""));
    Assert.assertEquals(LogAggregationStatus.RUNNING_WITH_FAILURE, rmApp.getLogAggregationStatusForAppReport());
    // For node4, the previous log aggregation status is RUNNING_WITH_FAILURE,
    // it will not be changed even it get a new log aggregation status
    // as RUNNING
    rmApp.aggregateLogReport(nodeId1, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.NOT_START, ""));
    rmApp.aggregateLogReport(nodeId2, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.RUNNING, ""));
    rmApp.aggregateLogReport(nodeId3, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.NOT_START, ""));
    rmApp.aggregateLogReport(nodeId4, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.RUNNING, ""));
    Assert.assertEquals(LogAggregationStatus.RUNNING_WITH_FAILURE, rmApp.getLogAggregationStatusForAppReport());
    rmApp.handle(new RMAppEvent(rmApp.getApplicationId(), RMAppEventType.KILL));
    Assert.assertTrue(RMAppImpl.isAppInFinalState(rmApp));
    // If at least of one log aggregation status for one NM is FAILED,
    // others are either SUCCEEDED or TIME_OUT, and this application is
    // at the final state, the log aggregation status for this app
    // will return FAILED
    rmApp.aggregateLogReport(nodeId1, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.SUCCEEDED, ""));
    rmApp.aggregateLogReport(nodeId2, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.TIME_OUT, ""));
    rmApp.aggregateLogReport(nodeId3, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.FAILED, ""));
    rmApp.aggregateLogReport(nodeId4, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.FAILED, ""));
    Assert.assertEquals(LogAggregationStatus.FAILED, rmApp.getLogAggregationStatusForAppReport());
}
Also used : RMAppImpl(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl) RMAppEvent(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) NodeId(org.apache.hadoop.yarn.api.records.NodeId) Test(org.junit.Test)

Example 12 with RMAppImpl

use of org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl in project hadoop by apache.

the class TestCapacityScheduler method testBlackListNodes.

@Test
public void testBlackListNodes() throws Exception {
    Configuration conf = new Configuration();
    conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, ResourceScheduler.class);
    MockRM rm = new MockRM(conf);
    rm.start();
    CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
    String host = "127.0.0.1";
    RMNode node = MockNodes.newNodeInfo(0, MockNodes.newResource(4 * GB), 1, host);
    cs.handle(new NodeAddedSchedulerEvent(node));
    ApplicationId appId = BuilderUtils.newApplicationId(100, 1);
    ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(appId, 1);
    RMAppAttemptMetrics attemptMetric = new RMAppAttemptMetrics(appAttemptId, rm.getRMContext());
    RMAppImpl app = mock(RMAppImpl.class);
    when(app.getApplicationId()).thenReturn(appId);
    RMAppAttemptImpl attempt = mock(RMAppAttemptImpl.class);
    Container container = mock(Container.class);
    when(attempt.getMasterContainer()).thenReturn(container);
    ApplicationSubmissionContext submissionContext = mock(ApplicationSubmissionContext.class);
    when(attempt.getSubmissionContext()).thenReturn(submissionContext);
    when(attempt.getAppAttemptId()).thenReturn(appAttemptId);
    when(attempt.getRMAppAttemptMetrics()).thenReturn(attemptMetric);
    when(app.getCurrentAppAttempt()).thenReturn(attempt);
    rm.getRMContext().getRMApps().put(appId, app);
    SchedulerEvent addAppEvent = new AppAddedSchedulerEvent(appId, "default", "user");
    cs.handle(addAppEvent);
    SchedulerEvent addAttemptEvent = new AppAttemptAddedSchedulerEvent(appAttemptId, false);
    cs.handle(addAttemptEvent);
    // Verify the blacklist can be updated independent of requesting containers
    cs.allocate(appAttemptId, Collections.<ResourceRequest>emptyList(), Collections.<ContainerId>emptyList(), Collections.singletonList(host), null, NULL_UPDATE_REQUESTS);
    Assert.assertTrue(cs.getApplicationAttempt(appAttemptId).isPlaceBlacklisted(host));
    cs.allocate(appAttemptId, Collections.<ResourceRequest>emptyList(), Collections.<ContainerId>emptyList(), null, Collections.singletonList(host), NULL_UPDATE_REQUESTS);
    Assert.assertFalse(cs.getApplicationAttempt(appAttemptId).isPlaceBlacklisted(host));
    rm.stop();
}
Also used : RMAppImpl(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl) RMAppAttemptMetrics(org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptMetrics) NodeAddedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) AppAddedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent) MockRM(org.apache.hadoop.yarn.server.resourcemanager.MockRM) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) NodeAddedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent) AppAddedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent) SchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent) NodeRemovedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent) AppAttemptRemovedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptRemovedSchedulerEvent) ContainerExpiredSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.ContainerExpiredSchedulerEvent) AppAttemptAddedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptAddedSchedulerEvent) NodeUpdateSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent) RMNode(org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode) RMContainer(org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer) Container(org.apache.hadoop.yarn.api.records.Container) RMAppAttemptImpl(org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl) ApplicationSubmissionContext(org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext) AppAttemptAddedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptAddedSchedulerEvent) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) Test(org.junit.Test)

Example 13 with RMAppImpl

use of org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl in project hadoop by apache.

the class TestCapacityScheduler method appHelper.

private ApplicationAttemptId appHelper(MockRM rm, CapacityScheduler cs, int clusterTs, int appId, String queue, String user) {
    ApplicationId appId1 = BuilderUtils.newApplicationId(clusterTs, appId);
    ApplicationAttemptId appAttemptId1 = BuilderUtils.newApplicationAttemptId(appId1, appId);
    RMAppAttemptMetrics attemptMetric1 = new RMAppAttemptMetrics(appAttemptId1, rm.getRMContext());
    RMAppImpl app1 = mock(RMAppImpl.class);
    when(app1.getApplicationId()).thenReturn(appId1);
    RMAppAttemptImpl attempt1 = mock(RMAppAttemptImpl.class);
    Container container = mock(Container.class);
    when(attempt1.getMasterContainer()).thenReturn(container);
    ApplicationSubmissionContext submissionContext = mock(ApplicationSubmissionContext.class);
    when(attempt1.getSubmissionContext()).thenReturn(submissionContext);
    when(attempt1.getAppAttemptId()).thenReturn(appAttemptId1);
    when(attempt1.getRMAppAttemptMetrics()).thenReturn(attemptMetric1);
    when(app1.getCurrentAppAttempt()).thenReturn(attempt1);
    rm.getRMContext().getRMApps().put(appId1, app1);
    SchedulerEvent addAppEvent1 = new AppAddedSchedulerEvent(appId1, queue, user);
    cs.handle(addAppEvent1);
    SchedulerEvent addAttemptEvent1 = new AppAttemptAddedSchedulerEvent(appAttemptId1, false);
    cs.handle(addAttemptEvent1);
    return appAttemptId1;
}
Also used : RMAppImpl(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl) RMAppAttemptMetrics(org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptMetrics) RMContainer(org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer) Container(org.apache.hadoop.yarn.api.records.Container) AppAddedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent) RMAppAttemptImpl(org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl) ApplicationSubmissionContext(org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) AppAttemptAddedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptAddedSchedulerEvent) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) NodeAddedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent) AppAddedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent) SchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent) NodeRemovedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent) AppAttemptRemovedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptRemovedSchedulerEvent) ContainerExpiredSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.ContainerExpiredSchedulerEvent) AppAttemptAddedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptAddedSchedulerEvent) NodeUpdateSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent)

Example 14 with RMAppImpl

use of org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl in project hadoop by apache.

the class TestCapacityScheduler method testHeadRoomCalculationWithDRC.

@Test
public void testHeadRoomCalculationWithDRC() throws Exception {
    // test with total cluster resource of 20GB memory and 20 vcores.
    // the queue where two apps running has user limit 0.8
    // allocate 10GB memory and 1 vcore to app 1.
    // app 1 should have headroom
    // 20GB*0.8 - 10GB = 6GB memory available and 15 vcores.
    // allocate 1GB memory and 1 vcore to app2.
    // app 2 should have headroom 20GB - 10 - 1 = 1GB memory,
    // and 20*0.8 - 1 = 15 vcores.
    CapacitySchedulerConfiguration csconf = new CapacitySchedulerConfiguration();
    csconf.setResourceComparator(DominantResourceCalculator.class);
    YarnConfiguration conf = new YarnConfiguration(csconf);
    conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, ResourceScheduler.class);
    MockRM rm = new MockRM(conf);
    rm.start();
    CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
    LeafQueue qb = (LeafQueue) cs.getQueue("default");
    qb.setUserLimitFactor((float) 0.8);
    // add app 1
    ApplicationId appId = BuilderUtils.newApplicationId(100, 1);
    ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(appId, 1);
    RMAppAttemptMetrics attemptMetric = new RMAppAttemptMetrics(appAttemptId, rm.getRMContext());
    RMAppImpl app = mock(RMAppImpl.class);
    when(app.getApplicationId()).thenReturn(appId);
    RMAppAttemptImpl attempt = mock(RMAppAttemptImpl.class);
    Container container = mock(Container.class);
    when(attempt.getMasterContainer()).thenReturn(container);
    ApplicationSubmissionContext submissionContext = mock(ApplicationSubmissionContext.class);
    when(attempt.getSubmissionContext()).thenReturn(submissionContext);
    when(attempt.getAppAttemptId()).thenReturn(appAttemptId);
    when(attempt.getRMAppAttemptMetrics()).thenReturn(attemptMetric);
    when(app.getCurrentAppAttempt()).thenReturn(attempt);
    rm.getRMContext().getRMApps().put(appId, app);
    SchedulerEvent addAppEvent = new AppAddedSchedulerEvent(appId, "default", "user1");
    cs.handle(addAppEvent);
    SchedulerEvent addAttemptEvent = new AppAttemptAddedSchedulerEvent(appAttemptId, false);
    cs.handle(addAttemptEvent);
    // add app 2
    ApplicationId appId2 = BuilderUtils.newApplicationId(100, 2);
    ApplicationAttemptId appAttemptId2 = BuilderUtils.newApplicationAttemptId(appId2, 1);
    RMAppAttemptMetrics attemptMetric2 = new RMAppAttemptMetrics(appAttemptId2, rm.getRMContext());
    RMAppImpl app2 = mock(RMAppImpl.class);
    when(app2.getApplicationId()).thenReturn(appId2);
    RMAppAttemptImpl attempt2 = mock(RMAppAttemptImpl.class);
    when(attempt2.getMasterContainer()).thenReturn(container);
    when(attempt2.getSubmissionContext()).thenReturn(submissionContext);
    when(attempt2.getAppAttemptId()).thenReturn(appAttemptId2);
    when(attempt2.getRMAppAttemptMetrics()).thenReturn(attemptMetric2);
    when(app2.getCurrentAppAttempt()).thenReturn(attempt2);
    rm.getRMContext().getRMApps().put(appId2, app2);
    addAppEvent = new AppAddedSchedulerEvent(appId2, "default", "user2");
    cs.handle(addAppEvent);
    addAttemptEvent = new AppAttemptAddedSchedulerEvent(appAttemptId2, false);
    cs.handle(addAttemptEvent);
    // add nodes  to cluster, so cluster have 20GB and 20 vcores
    Resource newResource = Resource.newInstance(10 * GB, 10);
    RMNode node = MockNodes.newNodeInfo(0, newResource, 1, "127.0.0.1");
    cs.handle(new NodeAddedSchedulerEvent(node));
    Resource newResource2 = Resource.newInstance(10 * GB, 10);
    RMNode node2 = MockNodes.newNodeInfo(0, newResource2, 1, "127.0.0.2");
    cs.handle(new NodeAddedSchedulerEvent(node2));
    FiCaSchedulerApp fiCaApp1 = cs.getSchedulerApplications().get(app.getApplicationId()).getCurrentAppAttempt();
    FiCaSchedulerApp fiCaApp2 = cs.getSchedulerApplications().get(app2.getApplicationId()).getCurrentAppAttempt();
    Priority u0Priority = TestUtils.createMockPriority(1);
    RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
    // allocate container for app1 with 10GB memory and 1 vcore
    fiCaApp1.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 10 * GB, 1, true, u0Priority, recordFactory)));
    cs.handle(new NodeUpdateSchedulerEvent(node));
    cs.handle(new NodeUpdateSchedulerEvent(node2));
    assertEquals(6 * GB, fiCaApp1.getHeadroom().getMemorySize());
    assertEquals(15, fiCaApp1.getHeadroom().getVirtualCores());
    // allocate container for app2 with 1GB memory and 1 vcore
    fiCaApp2.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 1, true, u0Priority, recordFactory)));
    cs.handle(new NodeUpdateSchedulerEvent(node));
    cs.handle(new NodeUpdateSchedulerEvent(node2));
    assertEquals(9 * GB, fiCaApp2.getHeadroom().getMemorySize());
    assertEquals(15, fiCaApp2.getHeadroom().getVirtualCores());
}
Also used : RMAppImpl(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl) RMAppAttemptMetrics(org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptMetrics) NodeUpdateSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent) NodeAddedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent) AppAddedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent) Priority(org.apache.hadoop.yarn.api.records.Priority) Resource(org.apache.hadoop.yarn.api.records.Resource) MockRM(org.apache.hadoop.yarn.server.resourcemanager.MockRM) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) NodeAddedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent) AppAddedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent) SchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent) NodeRemovedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent) AppAttemptRemovedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptRemovedSchedulerEvent) ContainerExpiredSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.ContainerExpiredSchedulerEvent) AppAttemptAddedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptAddedSchedulerEvent) NodeUpdateSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent) RMContainer(org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer) Container(org.apache.hadoop.yarn.api.records.Container) RMNode(org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode) RecordFactory(org.apache.hadoop.yarn.factories.RecordFactory) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) RMAppAttemptImpl(org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl) FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp) ApplicationSubmissionContext(org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext) AppAttemptAddedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptAddedSchedulerEvent) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) Test(org.junit.Test)

Example 15 with RMAppImpl

use of org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl in project hadoop by apache.

the class TestCapacityScheduler method testApplicationHeadRoom.

// Verifies headroom passed to ApplicationMaster has been updated in
// RMAppAttemptMetrics
@Test
public void testApplicationHeadRoom() throws Exception {
    Configuration conf = new Configuration();
    conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, ResourceScheduler.class);
    MockRM rm = new MockRM(conf);
    rm.start();
    CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
    ApplicationId appId = BuilderUtils.newApplicationId(100, 1);
    ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(appId, 1);
    RMAppAttemptMetrics attemptMetric = new RMAppAttemptMetrics(appAttemptId, rm.getRMContext());
    RMAppImpl app = mock(RMAppImpl.class);
    when(app.getApplicationId()).thenReturn(appId);
    RMAppAttemptImpl attempt = mock(RMAppAttemptImpl.class);
    Container container = mock(Container.class);
    when(attempt.getMasterContainer()).thenReturn(container);
    ApplicationSubmissionContext submissionContext = mock(ApplicationSubmissionContext.class);
    when(attempt.getSubmissionContext()).thenReturn(submissionContext);
    when(attempt.getAppAttemptId()).thenReturn(appAttemptId);
    when(attempt.getRMAppAttemptMetrics()).thenReturn(attemptMetric);
    when(app.getCurrentAppAttempt()).thenReturn(attempt);
    rm.getRMContext().getRMApps().put(appId, app);
    SchedulerEvent addAppEvent = new AppAddedSchedulerEvent(appId, "default", "user");
    cs.handle(addAppEvent);
    SchedulerEvent addAttemptEvent = new AppAttemptAddedSchedulerEvent(appAttemptId, false);
    cs.handle(addAttemptEvent);
    Allocation allocate = cs.allocate(appAttemptId, Collections.<ResourceRequest>emptyList(), Collections.<ContainerId>emptyList(), null, null, NULL_UPDATE_REQUESTS);
    Assert.assertNotNull(attempt);
    Assert.assertEquals(Resource.newInstance(0, 0), allocate.getResourceLimit());
    Assert.assertEquals(Resource.newInstance(0, 0), attemptMetric.getApplicationAttemptHeadroom());
    // Add a node to cluster
    Resource newResource = Resource.newInstance(4 * GB, 1);
    RMNode node = MockNodes.newNodeInfo(0, newResource, 1, "127.0.0.1");
    cs.handle(new NodeAddedSchedulerEvent(node));
    allocate = cs.allocate(appAttemptId, Collections.<ResourceRequest>emptyList(), Collections.<ContainerId>emptyList(), null, null, NULL_UPDATE_REQUESTS);
    // All resources should be sent as headroom
    Assert.assertEquals(newResource, allocate.getResourceLimit());
    Assert.assertEquals(newResource, attemptMetric.getApplicationAttemptHeadroom());
    rm.stop();
}
Also used : RMAppImpl(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl) RMAppAttemptMetrics(org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptMetrics) NodeAddedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) AppAddedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent) Resource(org.apache.hadoop.yarn.api.records.Resource) MockRM(org.apache.hadoop.yarn.server.resourcemanager.MockRM) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) NodeAddedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent) AppAddedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent) SchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent) NodeRemovedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent) AppAttemptRemovedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptRemovedSchedulerEvent) ContainerExpiredSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.ContainerExpiredSchedulerEvent) AppAttemptAddedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptAddedSchedulerEvent) NodeUpdateSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent) RMContainer(org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer) Container(org.apache.hadoop.yarn.api.records.Container) RMNode(org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode) Allocation(org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) RMAppAttemptImpl(org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl) ApplicationSubmissionContext(org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext) AppAttemptAddedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptAddedSchedulerEvent) ResourceRequest(org.apache.hadoop.yarn.api.records.ResourceRequest) UpdateNodeResourceRequest(org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceRequest) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) Test(org.junit.Test)

Aggregations

RMAppImpl (org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl)22 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)15 ApplicationSubmissionContext (org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext)12 Test (org.junit.Test)12 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)11 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)10 RMAppAttemptImpl (org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl)10 Container (org.apache.hadoop.yarn.api.records.Container)9 MockRM (org.apache.hadoop.yarn.server.resourcemanager.MockRM)9 RMAppAttemptMetrics (org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptMetrics)9 AppAddedSchedulerEvent (org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent)9 AppAttemptAddedSchedulerEvent (org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptAddedSchedulerEvent)9 RMContainer (org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer)8 AppAttemptRemovedSchedulerEvent (org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptRemovedSchedulerEvent)8 NodeAddedSchedulerEvent (org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent)8 SchedulerEvent (org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent)8 ContainerExpiredSchedulerEvent (org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.ContainerExpiredSchedulerEvent)7 NodeRemovedSchedulerEvent (org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent)7 NodeUpdateSchedulerEvent (org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent)7 Configuration (org.apache.hadoop.conf.Configuration)6