use of org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl in project hadoop by apache.
the class TestCapacityScheduler method testHeadRoomCalculationWithDRC.
@Test
public void testHeadRoomCalculationWithDRC() throws Exception {
// test with total cluster resource of 20GB memory and 20 vcores.
// the queue where two apps running has user limit 0.8
// allocate 10GB memory and 1 vcore to app 1.
// app 1 should have headroom
// 20GB*0.8 - 10GB = 6GB memory available and 15 vcores.
// allocate 1GB memory and 1 vcore to app2.
// app 2 should have headroom 20GB - 10 - 1 = 1GB memory,
// and 20*0.8 - 1 = 15 vcores.
CapacitySchedulerConfiguration csconf = new CapacitySchedulerConfiguration();
csconf.setResourceComparator(DominantResourceCalculator.class);
YarnConfiguration conf = new YarnConfiguration(csconf);
conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, ResourceScheduler.class);
MockRM rm = new MockRM(conf);
rm.start();
CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
LeafQueue qb = (LeafQueue) cs.getQueue("default");
qb.setUserLimitFactor((float) 0.8);
// add app 1
ApplicationId appId = BuilderUtils.newApplicationId(100, 1);
ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(appId, 1);
RMAppAttemptMetrics attemptMetric = new RMAppAttemptMetrics(appAttemptId, rm.getRMContext());
RMAppImpl app = mock(RMAppImpl.class);
when(app.getApplicationId()).thenReturn(appId);
RMAppAttemptImpl attempt = mock(RMAppAttemptImpl.class);
Container container = mock(Container.class);
when(attempt.getMasterContainer()).thenReturn(container);
ApplicationSubmissionContext submissionContext = mock(ApplicationSubmissionContext.class);
when(attempt.getSubmissionContext()).thenReturn(submissionContext);
when(attempt.getAppAttemptId()).thenReturn(appAttemptId);
when(attempt.getRMAppAttemptMetrics()).thenReturn(attemptMetric);
when(app.getCurrentAppAttempt()).thenReturn(attempt);
rm.getRMContext().getRMApps().put(appId, app);
SchedulerEvent addAppEvent = new AppAddedSchedulerEvent(appId, "default", "user1");
cs.handle(addAppEvent);
SchedulerEvent addAttemptEvent = new AppAttemptAddedSchedulerEvent(appAttemptId, false);
cs.handle(addAttemptEvent);
// add app 2
ApplicationId appId2 = BuilderUtils.newApplicationId(100, 2);
ApplicationAttemptId appAttemptId2 = BuilderUtils.newApplicationAttemptId(appId2, 1);
RMAppAttemptMetrics attemptMetric2 = new RMAppAttemptMetrics(appAttemptId2, rm.getRMContext());
RMAppImpl app2 = mock(RMAppImpl.class);
when(app2.getApplicationId()).thenReturn(appId2);
RMAppAttemptImpl attempt2 = mock(RMAppAttemptImpl.class);
when(attempt2.getMasterContainer()).thenReturn(container);
when(attempt2.getSubmissionContext()).thenReturn(submissionContext);
when(attempt2.getAppAttemptId()).thenReturn(appAttemptId2);
when(attempt2.getRMAppAttemptMetrics()).thenReturn(attemptMetric2);
when(app2.getCurrentAppAttempt()).thenReturn(attempt2);
rm.getRMContext().getRMApps().put(appId2, app2);
addAppEvent = new AppAddedSchedulerEvent(appId2, "default", "user2");
cs.handle(addAppEvent);
addAttemptEvent = new AppAttemptAddedSchedulerEvent(appAttemptId2, false);
cs.handle(addAttemptEvent);
// add nodes to cluster, so cluster have 20GB and 20 vcores
Resource newResource = Resource.newInstance(10 * GB, 10);
RMNode node = MockNodes.newNodeInfo(0, newResource, 1, "127.0.0.1");
cs.handle(new NodeAddedSchedulerEvent(node));
Resource newResource2 = Resource.newInstance(10 * GB, 10);
RMNode node2 = MockNodes.newNodeInfo(0, newResource2, 1, "127.0.0.2");
cs.handle(new NodeAddedSchedulerEvent(node2));
FiCaSchedulerApp fiCaApp1 = cs.getSchedulerApplications().get(app.getApplicationId()).getCurrentAppAttempt();
FiCaSchedulerApp fiCaApp2 = cs.getSchedulerApplications().get(app2.getApplicationId()).getCurrentAppAttempt();
Priority u0Priority = TestUtils.createMockPriority(1);
RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
// allocate container for app1 with 10GB memory and 1 vcore
fiCaApp1.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 10 * GB, 1, true, u0Priority, recordFactory)));
cs.handle(new NodeUpdateSchedulerEvent(node));
cs.handle(new NodeUpdateSchedulerEvent(node2));
assertEquals(6 * GB, fiCaApp1.getHeadroom().getMemorySize());
assertEquals(15, fiCaApp1.getHeadroom().getVirtualCores());
// allocate container for app2 with 1GB memory and 1 vcore
fiCaApp2.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 1, true, u0Priority, recordFactory)));
cs.handle(new NodeUpdateSchedulerEvent(node));
cs.handle(new NodeUpdateSchedulerEvent(node2));
assertEquals(9 * GB, fiCaApp2.getHeadroom().getMemorySize());
assertEquals(15, fiCaApp2.getHeadroom().getVirtualCores());
}
use of org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl in project hadoop by apache.
the class TestCapacityScheduler method testApplicationHeadRoom.
// Verifies headroom passed to ApplicationMaster has been updated in
// RMAppAttemptMetrics
@Test
public void testApplicationHeadRoom() throws Exception {
Configuration conf = new Configuration();
conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, ResourceScheduler.class);
MockRM rm = new MockRM(conf);
rm.start();
CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
ApplicationId appId = BuilderUtils.newApplicationId(100, 1);
ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(appId, 1);
RMAppAttemptMetrics attemptMetric = new RMAppAttemptMetrics(appAttemptId, rm.getRMContext());
RMAppImpl app = mock(RMAppImpl.class);
when(app.getApplicationId()).thenReturn(appId);
RMAppAttemptImpl attempt = mock(RMAppAttemptImpl.class);
Container container = mock(Container.class);
when(attempt.getMasterContainer()).thenReturn(container);
ApplicationSubmissionContext submissionContext = mock(ApplicationSubmissionContext.class);
when(attempt.getSubmissionContext()).thenReturn(submissionContext);
when(attempt.getAppAttemptId()).thenReturn(appAttemptId);
when(attempt.getRMAppAttemptMetrics()).thenReturn(attemptMetric);
when(app.getCurrentAppAttempt()).thenReturn(attempt);
rm.getRMContext().getRMApps().put(appId, app);
SchedulerEvent addAppEvent = new AppAddedSchedulerEvent(appId, "default", "user");
cs.handle(addAppEvent);
SchedulerEvent addAttemptEvent = new AppAttemptAddedSchedulerEvent(appAttemptId, false);
cs.handle(addAttemptEvent);
Allocation allocate = cs.allocate(appAttemptId, Collections.<ResourceRequest>emptyList(), Collections.<ContainerId>emptyList(), null, null, NULL_UPDATE_REQUESTS);
Assert.assertNotNull(attempt);
Assert.assertEquals(Resource.newInstance(0, 0), allocate.getResourceLimit());
Assert.assertEquals(Resource.newInstance(0, 0), attemptMetric.getApplicationAttemptHeadroom());
// Add a node to cluster
Resource newResource = Resource.newInstance(4 * GB, 1);
RMNode node = MockNodes.newNodeInfo(0, newResource, 1, "127.0.0.1");
cs.handle(new NodeAddedSchedulerEvent(node));
allocate = cs.allocate(appAttemptId, Collections.<ResourceRequest>emptyList(), Collections.<ContainerId>emptyList(), null, null, NULL_UPDATE_REQUESTS);
// All resources should be sent as headroom
Assert.assertEquals(newResource, allocate.getResourceLimit());
Assert.assertEquals(newResource, attemptMetric.getApplicationAttemptHeadroom());
rm.stop();
}
use of org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl in project hadoop by apache.
the class TestCapacityScheduler method testMoveAttemptNotAdded.
@Test(timeout = 60000)
public void testMoveAttemptNotAdded() throws Exception {
Configuration conf = new Configuration();
conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, ResourceScheduler.class);
MockRM rm = new MockRM(getCapacityConfiguration(conf));
rm.start();
CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
ApplicationId appId = BuilderUtils.newApplicationId(100, 1);
ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(appId, 1);
RMAppAttemptMetrics attemptMetric = new RMAppAttemptMetrics(appAttemptId, rm.getRMContext());
RMAppImpl app = mock(RMAppImpl.class);
when(app.getApplicationId()).thenReturn(appId);
RMAppAttemptImpl attempt = mock(RMAppAttemptImpl.class);
Container container = mock(Container.class);
when(attempt.getMasterContainer()).thenReturn(container);
ApplicationSubmissionContext submissionContext = mock(ApplicationSubmissionContext.class);
when(attempt.getSubmissionContext()).thenReturn(submissionContext);
when(attempt.getAppAttemptId()).thenReturn(appAttemptId);
when(attempt.getRMAppAttemptMetrics()).thenReturn(attemptMetric);
when(app.getCurrentAppAttempt()).thenReturn(attempt);
rm.getRMContext().getRMApps().put(appId, app);
SchedulerEvent addAppEvent = new AppAddedSchedulerEvent(appId, "a1", "user");
try {
cs.moveApplication(appId, "b1");
fail("Move should throw exception app not available");
} catch (YarnException e) {
assertEquals("App to be moved application_100_0001 not found.", e.getMessage());
}
cs.handle(addAppEvent);
cs.moveApplication(appId, "b1");
SchedulerEvent addAttemptEvent = new AppAttemptAddedSchedulerEvent(appAttemptId, false);
cs.handle(addAttemptEvent);
CSQueue rootQ = cs.getRootQueue();
CSQueue queueB = cs.getQueue("b");
CSQueue queueA = cs.getQueue("a");
CSQueue queueA1 = cs.getQueue("a1");
CSQueue queueB1 = cs.getQueue("b1");
Assert.assertEquals(1, rootQ.getNumApplications());
Assert.assertEquals(0, queueA.getNumApplications());
Assert.assertEquals(1, queueB.getNumApplications());
Assert.assertEquals(0, queueA1.getNumApplications());
Assert.assertEquals(1, queueB1.getNumApplications());
rm.close();
}
use of org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl in project hadoop by apache.
the class TestClientRMService method testGetApplicationResourceUsageReportDummy.
@Test
public void testGetApplicationResourceUsageReportDummy() throws YarnException, IOException {
ApplicationAttemptId attemptId = getApplicationAttemptId(1);
YarnScheduler yarnScheduler = mockYarnScheduler();
RMContext rmContext = mock(RMContext.class);
mockRMContext(yarnScheduler, rmContext);
when(rmContext.getDispatcher().getEventHandler()).thenReturn(new EventHandler<Event>() {
public void handle(Event event) {
}
});
ApplicationSubmissionContext asContext = mock(ApplicationSubmissionContext.class);
YarnConfiguration config = new YarnConfiguration();
RMAppAttemptImpl rmAppAttemptImpl = new RMAppAttemptImpl(attemptId, rmContext, yarnScheduler, null, asContext, config, null, null);
ApplicationResourceUsageReport report = rmAppAttemptImpl.getApplicationResourceUsageReport();
assertEquals(report, RMServerUtils.DUMMY_APPLICATION_RESOURCE_USAGE_REPORT);
}
use of org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl in project hadoop by apache.
the class FairSchedulerTestBase method createMockRMApp.
protected RMApp createMockRMApp(ApplicationAttemptId attemptId) {
RMApp app = mock(RMAppImpl.class);
when(app.getApplicationId()).thenReturn(attemptId.getApplicationId());
RMAppAttemptImpl attempt = mock(RMAppAttemptImpl.class);
when(attempt.getAppAttemptId()).thenReturn(attemptId);
RMAppAttemptMetrics attemptMetric = mock(RMAppAttemptMetrics.class);
when(attempt.getRMAppAttemptMetrics()).thenReturn(attemptMetric);
when(app.getCurrentAppAttempt()).thenReturn(attempt);
ApplicationSubmissionContext submissionContext = mock(ApplicationSubmissionContext.class);
when(submissionContext.getUnmanagedAM()).thenReturn(false);
when(attempt.getSubmissionContext()).thenReturn(submissionContext);
resourceManager.getRMContext().getRMApps().put(attemptId.getApplicationId(), app);
return app;
}
Aggregations