use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler in project hadoop by apache.
the class Application method submit.
@SuppressWarnings("deprecation")
public synchronized void submit() throws IOException, YarnException {
ApplicationSubmissionContext context = recordFactory.newRecordInstance(ApplicationSubmissionContext.class);
context.setApplicationId(this.applicationId);
context.setQueue(this.queue);
// Set up the container launch context for the application master
ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);
context.setAMContainerSpec(amContainer);
context.setResource(Resources.createResource(YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB));
SubmitApplicationRequest request = recordFactory.newRecordInstance(SubmitApplicationRequest.class);
request.setApplicationSubmissionContext(context);
final ResourceScheduler scheduler = resourceManager.getResourceScheduler();
resourceManager.getClientRMService().submitApplication(request);
RMAppEvent event = new RMAppEvent(this.applicationId, RMAppEventType.START);
resourceManager.getRMContext().getRMApps().get(applicationId).handle(event);
event = new RMAppEvent(this.applicationId, RMAppEventType.APP_NEW_SAVED);
resourceManager.getRMContext().getRMApps().get(applicationId).handle(event);
event = new RMAppEvent(this.applicationId, RMAppEventType.APP_ACCEPTED);
resourceManager.getRMContext().getRMApps().get(applicationId).handle(event);
// Notify scheduler
AppAddedSchedulerEvent addAppEvent = new AppAddedSchedulerEvent(this.applicationId, this.queue, "user");
scheduler.handle(addAppEvent);
AppAttemptAddedSchedulerEvent addAttemptEvent = new AppAttemptAddedSchedulerEvent(this.applicationAttemptId, false);
scheduler.handle(addAttemptEvent);
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler in project hadoop by apache.
the class TestAppManager method testRMAppSubmitMaxAppAttempts.
@Test(timeout = 30000)
public void testRMAppSubmitMaxAppAttempts() throws Exception {
int[] globalMaxAppAttempts = new int[] { 10, 1 };
int[][] individualMaxAppAttempts = new int[][] { new int[] { 9, 10, 11, 0 }, new int[] { 1, 10, 0, -1 } };
int[][] expectedNums = new int[][] { new int[] { 9, 10, 10, 10 }, new int[] { 1, 1, 1, 1 } };
for (int i = 0; i < globalMaxAppAttempts.length; ++i) {
for (int j = 0; j < individualMaxAppAttempts.length; ++j) {
ResourceScheduler scheduler = mockResourceScheduler();
Configuration conf = new Configuration();
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, globalMaxAppAttempts[i]);
ApplicationMasterService masterService = new ApplicationMasterService(rmContext, scheduler);
TestRMAppManager appMonitor = new TestRMAppManager(rmContext, new ClientToAMTokenSecretManagerInRM(), scheduler, masterService, new ApplicationACLsManager(conf), conf);
ApplicationId appID = MockApps.newAppID(i * 4 + j + 1);
asContext.setApplicationId(appID);
if (individualMaxAppAttempts[i][j] != 0) {
asContext.setMaxAppAttempts(individualMaxAppAttempts[i][j]);
}
appMonitor.submitApplication(asContext, "test");
RMApp app = rmContext.getRMApps().get(appID);
Assert.assertEquals("max application attempts doesn't match", expectedNums[i][j], app.getMaxAppAttempts());
// wait for event to be processed
int timeoutSecs = 0;
while ((getAppEventType() == RMAppEventType.KILL) && timeoutSecs++ < 20) {
Thread.sleep(1000);
}
setAppEventType(RMAppEventType.KILL);
}
}
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler in project hadoop by apache.
the class TestApplicationACLs method setup.
@BeforeClass
public static void setup() throws InterruptedException, IOException {
RMStateStore store = RMStateStoreFactory.getStore(conf);
conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
AccessControlList adminACL = new AccessControlList("");
adminACL.addGroup(SUPER_GROUP);
conf.set(YarnConfiguration.YARN_ADMIN_ACL, adminACL.getAclString());
resourceManager = new MockRM(conf) {
@Override
protected QueueACLsManager createQueueACLsManager(ResourceScheduler scheduler, Configuration conf) {
QueueACLsManager mockQueueACLsManager = mock(QueueACLsManager.class);
when(mockQueueACLsManager.checkAccess(any(UserGroupInformation.class), any(QueueACL.class), any(RMApp.class), any(String.class), any())).thenAnswer(new Answer() {
public Object answer(InvocationOnMock invocation) {
return isQueueUser;
}
});
return mockQueueACLsManager;
}
protected ClientRMService createClientRMService() {
return new ClientRMService(getRMContext(), this.scheduler, this.rmAppManager, this.applicationACLsManager, this.queueACLsManager, null);
}
;
};
new Thread() {
public void run() {
UserGroupInformation.createUserForTesting(ENEMY, new String[] {});
UserGroupInformation.createUserForTesting(FRIEND, new String[] { FRIENDLY_GROUP });
UserGroupInformation.createUserForTesting(SUPER_USER, new String[] { SUPER_GROUP });
resourceManager.start();
}
;
}.start();
int waitCount = 0;
while (resourceManager.getServiceState() == STATE.INITED && waitCount++ < 60) {
LOG.info("Waiting for RM to start...");
Thread.sleep(1500);
}
if (resourceManager.getServiceState() != STATE.STARTED) {
// RM could have failed.
throw new IOException("ResourceManager failed to start. Final state is " + resourceManager.getServiceState());
}
UserGroupInformation owner = UserGroupInformation.createRemoteUser(APP_OWNER);
rmClient = owner.doAs(new PrivilegedExceptionAction<ApplicationClientProtocol>() {
@Override
public ApplicationClientProtocol run() throws Exception {
return (ApplicationClientProtocol) rpc.getProxy(ApplicationClientProtocol.class, rmAddress, conf);
}
});
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler in project hadoop by apache.
the class TestCapacityScheduler method verifyAMLimitForLeafQueue.
private void verifyAMLimitForLeafQueue(CapacitySchedulerConfiguration config) throws Exception {
MockRM rm = setUpMove(config);
rm.registerNode("127.0.0.1:1234", 2 * GB);
String queueName = "a1";
String userName = "user_0";
ResourceScheduler scheduler = rm.getRMContext().getScheduler();
LeafQueue queueA = (LeafQueue) ((CapacityScheduler) scheduler).getQueue(queueName);
Resource amResourceLimit = queueA.getAMResourceLimit();
Resource amResource1 = Resource.newInstance(amResourceLimit.getMemorySize() + 1024, amResourceLimit.getVirtualCores() + 1);
Resource amResource2 = Resource.newInstance(amResourceLimit.getMemorySize() + 2048, amResourceLimit.getVirtualCores() + 1);
rm.submitApp(amResource1, "app-1", userName, null, queueName);
rm.submitApp(amResource2, "app-2", userName, null, queueName);
// When AM limit is exceeded, 1 applications will be activated.Rest all
// applications will be in pending
Assert.assertEquals("PendingApplications should be 1", 1, queueA.getNumPendingApplications());
Assert.assertEquals("Active applications should be 1", 1, queueA.getNumActiveApplications());
Assert.assertEquals("User PendingApplications should be 1", 1, queueA.getUser(userName).getPendingApplications());
Assert.assertEquals("User Active applications should be 1", 1, queueA.getUser(userName).getActiveApplications());
rm.stop();
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler in project hadoop by apache.
the class TestCapacityScheduler method testAMUsedResource.
// Test verifies AM Used resource for LeafQueue when AM ResourceRequest is
// lesser than minimumAllocation
@Test(timeout = 30000)
public void testAMUsedResource() throws Exception {
MockRM rm = setUpMove();
rm.registerNode("127.0.0.1:1234", 4 * GB);
Configuration conf = rm.getConfig();
int minAllocMb = conf.getInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB);
int amMemory = 50;
assertTrue("AM memory is greater than or equal to minAllocation", amMemory < minAllocMb);
Resource minAllocResource = Resource.newInstance(minAllocMb, 1);
String queueName = "a1";
RMApp rmApp = rm.submitApp(amMemory, "app-1", "user_0", null, queueName);
assertEquals("RMApp does not containes minimum allocation", minAllocResource, rmApp.getAMResourceRequest().getCapability());
ResourceScheduler scheduler = rm.getRMContext().getScheduler();
LeafQueue queueA = (LeafQueue) ((CapacityScheduler) scheduler).getQueue(queueName);
assertEquals("Minimum Resource for AM is incorrect", minAllocResource, queueA.getUser("user_0").getResourceUsage().getAMUsed());
rm.stop();
}
Aggregations