use of org.apache.hadoop.yarn.server.resourcemanager.MockAM in project hadoop by apache.
the class TestApplicationLimitsByPartition method testUserAMResourceLimitWithLabels.
@Test(timeout = 120000)
public void testUserAMResourceLimitWithLabels() throws Exception {
/*
* Test Case:
* Verify user level AM resource limit. This test case is ran with two
* users. And per-partition level am-resource-limit will be 0.4, which
* internally will be 4GB. Hence 2GB will be available for each
* user for its AM resource.
*
* Now this test case will create a scenario where AM resource limit per
* partition is not met, but user level am-resource limit is reached.
* Hence app will be pending.
*/
final String user_0 = "user_0";
final String user_1 = "user_1";
simpleNodeLabelMappingToManager();
CapacitySchedulerConfiguration config = (CapacitySchedulerConfiguration) TestUtils.getConfigurationWithQueueLabels(conf);
// After getting queue conf, configure AM resource percent for Queue A1
// as 0.4 (Label X). Also set userlimit as 50% for this queue. So when we
// have two users submitting applications, each user will get 50% of AM
// resource which is available in this partition.
final String A1 = CapacitySchedulerConfiguration.ROOT + ".a" + ".a1";
config.setMaximumAMResourcePercentPerPartition(A1, "x", 0.4f);
config.setUserLimit(A1, 50);
// Now inject node label manager with this updated config
MockRM rm1 = new MockRM(config) {
@Override
public RMNodeLabelsManager createNodeLabelManager() {
return mgr;
}
};
rm1.getRMContext().setNodeLabelManager(mgr);
rm1.start();
// label = x
MockNM nm1 = rm1.registerNode("h1:1234", 10 * GB);
// label = y
rm1.registerNode("h2:1234", 10 * GB);
// label = <empty>
rm1.registerNode("h3:1234", 10 * GB);
// Submit app1 with 1Gb AM resource to Queue A1 for label X for user0
RMApp app1 = rm1.submitApp(GB, "app", user_0, null, "a1", "x");
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
// Place few allocate requests to make it an active application
am1.allocate("*", 1 * GB, 15, new ArrayList<ContainerId>(), "");
// Now submit 2nd app to Queue A1 for label X for user1
RMApp app2 = rm1.submitApp(GB, "app", user_1, null, "a1", "x");
MockRM.launchAndRegisterAM(app2, rm1, nm1);
CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
LeafQueue leafQueue = (LeafQueue) cs.getQueue("a1");
Assert.assertNotNull(leafQueue);
// Verify active applications count in this queue.
Assert.assertEquals(2, leafQueue.getNumActiveApplications());
Assert.assertEquals(1, leafQueue.getNumActiveApplications(user_0));
Assert.assertEquals(0, leafQueue.getNumPendingApplications());
// Submit 3rd app to Queue A1 for label X for user1. Now user1 will have
// 2 applications (2 GB resource) and user0 will have one app (1GB).
RMApp app3 = rm1.submitApp(GB, "app", user_1, null, "a1", "x");
MockAM am2 = MockRM.launchAndRegisterAM(app3, rm1, nm1);
// Place few allocate requests to make it an active application. This is
// to ensure that user1 and user0 are active users.
am2.allocate("*", 1 * GB, 10, new ArrayList<ContainerId>(), "");
// Submit final app to Queue A1 for label X. Since we are trying to submit
// for user1, we need 3Gb resource for AMs.
// 4Gb -> 40% of label "X" in queue A1
// Since we have 2 users, 50% of 4Gb will be max for each user. Here user1
// has already crossed this 2GB limit, hence this app will be pending.
RMApp pendingApp = rm1.submitApp(GB, "app", user_1, null, "a1", "x");
// Verify active applications count per user and also in queue level.
Assert.assertEquals(3, leafQueue.getNumActiveApplications());
Assert.assertEquals(1, leafQueue.getNumActiveApplications(user_0));
Assert.assertEquals(2, leafQueue.getNumActiveApplications(user_1));
Assert.assertEquals(1, leafQueue.getNumPendingApplications(user_1));
Assert.assertEquals(1, leafQueue.getNumPendingApplications());
//verify Diagnostic messages
Assert.assertTrue("AM diagnostics not set properly", pendingApp.getDiagnostics().toString().contains(AMState.INACTIVATED.getDiagnosticMessage()));
Assert.assertTrue("AM diagnostics not set properly", pendingApp.getDiagnostics().toString().contains(CSAMContainerLaunchDiagnosticsConstants.USER_AM_RESOURCE_LIMIT_EXCEED));
rm1.close();
}
use of org.apache.hadoop.yarn.server.resourcemanager.MockAM in project hadoop by apache.
the class TestApplicationPriority method testApplicationPriorityAllocation.
@Test
public void testApplicationPriorityAllocation() throws Exception {
Configuration conf = new Configuration();
conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, ResourceScheduler.class);
// Set Max Application Priority as 10
conf.setInt(YarnConfiguration.MAX_CLUSTER_LEVEL_APPLICATION_PRIORITY, 10);
MockRM rm = new MockRM(conf);
rm.start();
Priority appPriority1 = Priority.newInstance(5);
MockNM nm1 = rm.registerNode("127.0.0.1:1234", 16 * GB);
RMApp app1 = rm.submitApp(1 * GB, appPriority1);
// kick the scheduler, 1 GB given to AM1, remaining 15GB on nm1
MockAM am1 = MockRM.launchAM(app1, rm, nm1);
am1.registerAppAttempt();
// allocate 7 containers for App1
List<Container> allocated1 = am1.allocateAndWaitForContainers("127.0.0.1", 7, 2 * GB, nm1);
Assert.assertEquals(7, allocated1.size());
Assert.assertEquals(2 * GB, allocated1.get(0).getResource().getMemorySize());
// check node report, 15 GB used (1 AM and 7 containers) and 1 GB available
SchedulerNodeReport report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
Assert.assertEquals(15 * GB, report_nm1.getUsedResource().getMemorySize());
Assert.assertEquals(1 * GB, report_nm1.getAvailableResource().getMemorySize());
// Submit the second app App2 with priority 8 (Higher than App1)
Priority appPriority2 = Priority.newInstance(8);
RMApp app2 = rm.submitApp(1 * GB, appPriority2);
// kick the scheduler, 1 GB which was free is given to AM of App2
MockAM am2 = MockRM.launchAM(app2, rm, nm1);
am2.registerAppAttempt();
// check node report, 16 GB used and 0 GB available
report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
Assert.assertEquals(16 * GB, report_nm1.getUsedResource().getMemorySize());
Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemorySize());
// get scheduler
CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
// get scheduler app
FiCaSchedulerApp schedulerAppAttempt = cs.getSchedulerApplications().get(app1.getApplicationId()).getCurrentAppAttempt();
// kill 2 containers of App1 to free up some space
int counter = 0;
for (Container c : allocated1) {
if (++counter > 2) {
break;
}
cs.markContainerForKillable(schedulerAppAttempt.getRMContainer(c.getId()));
}
// check node report, 12 GB used and 4 GB available
report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
Assert.assertEquals(12 * GB, report_nm1.getUsedResource().getMemorySize());
Assert.assertEquals(4 * GB, report_nm1.getAvailableResource().getMemorySize());
// send updated request for App1
am1.allocate("127.0.0.1", 2 * GB, 10, new ArrayList<ContainerId>());
// kick the scheduler, since App2 priority is more than App1, it will get
// remaining cluster space.
List<Container> allocated2 = am2.allocateAndWaitForContainers("127.0.0.1", 2, 2 * GB, nm1);
// App2 has got 2 containers now.
Assert.assertEquals(2, allocated2.size());
// check node report, 16 GB used and 0 GB available
report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
Assert.assertEquals(16 * GB, report_nm1.getUsedResource().getMemorySize());
Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemorySize());
rm.stop();
}
use of org.apache.hadoop.yarn.server.resourcemanager.MockAM in project hadoop by apache.
the class TestApplicationPriority method testApplicationPriorityAllocationWithChangeInPriority.
@Test
public void testApplicationPriorityAllocationWithChangeInPriority() throws Exception {
Configuration conf = new Configuration();
conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, ResourceScheduler.class);
// Set Max Application Priority as 10
conf.setInt(YarnConfiguration.MAX_CLUSTER_LEVEL_APPLICATION_PRIORITY, 10);
MockRM rm = new MockRM(conf);
rm.start();
Priority appPriority1 = Priority.newInstance(5);
MockNM nm1 = rm.registerNode("127.0.0.1:1234", 16 * GB);
RMApp app1 = rm.submitApp(1 * GB, appPriority1);
// kick the scheduler, 1 GB given to AM1, remaining 15GB on nm1
MockAM am1 = MockRM.launchAM(app1, rm, nm1);
am1.registerAppAttempt();
// add request for containers and wait for containers to be allocated.
int NUM_CONTAINERS = 7;
List<Container> allocated1 = am1.allocateAndWaitForContainers("127.0.0.1", NUM_CONTAINERS, 2 * GB, nm1);
Assert.assertEquals(7, allocated1.size());
Assert.assertEquals(2 * GB, allocated1.get(0).getResource().getMemorySize());
// check node report, 15 GB used (1 AM and 7 containers) and 1 GB available
SchedulerNodeReport report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
Assert.assertEquals(15 * GB, report_nm1.getUsedResource().getMemorySize());
Assert.assertEquals(1 * GB, report_nm1.getAvailableResource().getMemorySize());
// Submit the second app App2 with priority 8 (Higher than App1)
Priority appPriority2 = Priority.newInstance(8);
RMApp app2 = rm.submitApp(1 * GB, appPriority2);
// kick the scheduler, 1 GB which was free is given to AM of App2
MockAM am2 = MockRM.launchAM(app2, rm, nm1);
am2.registerAppAttempt();
// check node report, 16 GB used and 0 GB available
report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
Assert.assertEquals(16 * GB, report_nm1.getUsedResource().getMemorySize());
Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemorySize());
// get scheduler
CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
// get scheduler app
FiCaSchedulerApp schedulerAppAttemptApp1 = cs.getSchedulerApplications().get(app1.getApplicationId()).getCurrentAppAttempt();
// kill 2 containers to free up some space
int counter = 0;
for (Iterator<Container> iterator = allocated1.iterator(); iterator.hasNext(); ) {
Container c = iterator.next();
if (++counter > 2) {
break;
}
cs.markContainerForKillable(schedulerAppAttemptApp1.getRMContainer(c.getId()));
iterator.remove();
}
// check node report, 12 GB used and 4 GB available
report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
Assert.assertEquals(12 * GB, report_nm1.getUsedResource().getMemorySize());
Assert.assertEquals(4 * GB, report_nm1.getAvailableResource().getMemorySize());
// add request for containers App1
am1.allocate("127.0.0.1", 2 * GB, 10, new ArrayList<ContainerId>());
// add request for containers App2 and wait for containers to get allocated
List<Container> allocated2 = am2.allocateAndWaitForContainers("127.0.0.1", 2, 2 * GB, nm1);
Assert.assertEquals(2, allocated2.size());
// check node report, 16 GB used and 0 GB available
report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
Assert.assertEquals(16 * GB, report_nm1.getUsedResource().getMemorySize());
Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemorySize());
// kill 1 more
counter = 0;
for (Iterator<Container> iterator = allocated1.iterator(); iterator.hasNext(); ) {
Container c = iterator.next();
if (++counter > 1) {
break;
}
cs.markContainerForKillable(schedulerAppAttemptApp1.getRMContainer(c.getId()));
iterator.remove();
}
// check node report, 14 GB used and 2 GB available
report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
Assert.assertEquals(14 * GB, report_nm1.getUsedResource().getMemorySize());
Assert.assertEquals(2 * GB, report_nm1.getAvailableResource().getMemorySize());
// Change the priority of App1 to 3 (lowest)
Priority appPriority3 = Priority.newInstance(3);
UserGroupInformation ugi = UserGroupInformation.createRemoteUser(app2.getUser());
cs.updateApplicationPriority(appPriority3, app2.getApplicationId(), null, ugi);
// add request for containers App2
am2.allocate("127.0.0.1", 2 * GB, 3, new ArrayList<ContainerId>());
// add request for containers App1 and wait for containers to get allocated
// since priority is more for App1 now, App1 will get a container.
List<Container> allocated3 = am1.allocateAndWaitForContainers("127.0.0.1", 1, 2 * GB, nm1);
Assert.assertEquals(1, allocated3.size());
// Now App1 will have 5 containers and 1 AM. App2 will have 2 containers.
Assert.assertEquals(6, schedulerAppAttemptApp1.getLiveContainers().size());
rm.stop();
}
use of org.apache.hadoop.yarn.server.resourcemanager.MockAM in project hadoop by apache.
the class TestCapacityScheduler method testSchedulingOnRemovedNode.
@Test
public void testSchedulingOnRemovedNode() throws Exception {
Configuration conf = new YarnConfiguration();
conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, ResourceScheduler.class);
conf.setBoolean(CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_ENABLE, false);
MockRM rm = new MockRM(conf);
rm.start();
RMApp app = rm.submitApp(100);
rm.drainEvents();
MockNM nm1 = rm.registerNode("127.0.0.1:1234", 10240, 10);
MockAM am = MockRM.launchAndRegisterAM(app, rm, nm1);
//remove nm2 to keep am alive
MockNM nm2 = rm.registerNode("127.0.0.1:1235", 10240, 10);
am.allocate(ResourceRequest.ANY, 2048, 1, null);
CapacityScheduler scheduler = (CapacityScheduler) rm.getRMContext().getScheduler();
FiCaSchedulerNode node = (FiCaSchedulerNode) scheduler.getNodeTracker().getNode(nm2.getNodeId());
scheduler.handle(new NodeRemovedSchedulerEvent(rm.getRMContext().getRMNodes().get(nm2.getNodeId())));
// schedulerNode is removed, try allocate a container
scheduler.allocateContainersToNode(new SimplePlacementSet<>(node), true);
AppAttemptRemovedSchedulerEvent appRemovedEvent1 = new AppAttemptRemovedSchedulerEvent(am.getApplicationAttemptId(), RMAppAttemptState.FINISHED, false);
scheduler.handle(appRemovedEvent1);
rm.stop();
}
use of org.apache.hadoop.yarn.server.resourcemanager.MockAM in project hadoop by apache.
the class TestCapacityScheduler method testQueueHierarchyPendingResourceUpdate.
@Test
public void testQueueHierarchyPendingResourceUpdate() throws Exception {
Configuration conf = TestUtils.getConfigurationWithQueueLabels(new Configuration(false));
conf.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, true);
final RMNodeLabelsManager mgr = new NullRMNodeLabelsManager();
mgr.init(conf);
mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("x", "y"));
mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x")));
MemoryRMStateStore memStore = new MemoryRMStateStore();
memStore.init(conf);
MockRM rm = new MockRM(conf, memStore) {
protected RMNodeLabelsManager createNodeLabelManager() {
return mgr;
}
};
rm.start();
// label = x
MockNM nm1 = new MockNM("h1:1234", 200 * GB, rm.getResourceTrackerService());
nm1.registerNode();
// label = ""
MockNM nm2 = new MockNM("h2:1234", 200 * GB, rm.getResourceTrackerService());
nm2.registerNode();
// Launch app1 in queue=a1
RMApp app1 = rm.submitApp(1 * GB, "app", "user", null, "a1");
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm2);
// Launch app2 in queue=b1
RMApp app2 = rm.submitApp(8 * GB, "app", "user", null, "b1");
MockAM am2 = MockRM.launchAndRegisterAM(app2, rm, nm2);
// am1 asks for 8 * 1GB container for no label
am1.allocate(Arrays.asList(ResourceRequest.newInstance(Priority.newInstance(1), "*", Resources.createResource(1 * GB), 8)), null);
checkPendingResource(rm, "a1", 8 * GB, null);
checkPendingResource(rm, "a", 8 * GB, null);
checkPendingResource(rm, "root", 8 * GB, null);
// am2 asks for 8 * 1GB container for no label
am2.allocate(Arrays.asList(ResourceRequest.newInstance(Priority.newInstance(1), "*", Resources.createResource(1 * GB), 8)), null);
checkPendingResource(rm, "a1", 8 * GB, null);
checkPendingResource(rm, "a", 8 * GB, null);
checkPendingResource(rm, "b1", 8 * GB, null);
checkPendingResource(rm, "b", 8 * GB, null);
// root = a + b
checkPendingResource(rm, "root", 16 * GB, null);
// am2 asks for 8 * 1GB container in another priority for no label
am2.allocate(Arrays.asList(ResourceRequest.newInstance(Priority.newInstance(2), "*", Resources.createResource(1 * GB), 8)), null);
checkPendingResource(rm, "a1", 8 * GB, null);
checkPendingResource(rm, "a", 8 * GB, null);
checkPendingResource(rm, "b1", 16 * GB, null);
checkPendingResource(rm, "b", 16 * GB, null);
// root = a + b
checkPendingResource(rm, "root", 24 * GB, null);
// am1 asks 4 GB resource instead of 8 * GB for priority=1
am1.allocate(Arrays.asList(ResourceRequest.newInstance(Priority.newInstance(1), "*", Resources.createResource(4 * GB), 1)), null);
checkPendingResource(rm, "a1", 4 * GB, null);
checkPendingResource(rm, "a", 4 * GB, null);
checkPendingResource(rm, "b1", 16 * GB, null);
checkPendingResource(rm, "b", 16 * GB, null);
// root = a + b
checkPendingResource(rm, "root", 20 * GB, null);
// am1 asks 8 * GB resource which label=x
am1.allocate(Arrays.asList(ResourceRequest.newInstance(Priority.newInstance(2), "*", Resources.createResource(8 * GB), 1, true, "x")), null);
checkPendingResource(rm, "a1", 4 * GB, null);
checkPendingResource(rm, "a", 4 * GB, null);
checkPendingResource(rm, "a1", 8 * GB, "x");
checkPendingResource(rm, "a", 8 * GB, "x");
checkPendingResource(rm, "b1", 16 * GB, null);
checkPendingResource(rm, "b", 16 * GB, null);
// root = a + b
checkPendingResource(rm, "root", 20 * GB, null);
checkPendingResource(rm, "root", 8 * GB, "x");
// some containers allocated for am1, pending resource should decrease
ContainerId containerId = ContainerId.newContainerId(am1.getApplicationAttemptId(), 2);
Assert.assertTrue(rm.waitForState(nm1, containerId, RMContainerState.ALLOCATED));
containerId = ContainerId.newContainerId(am1.getApplicationAttemptId(), 3);
Assert.assertTrue(rm.waitForState(nm2, containerId, RMContainerState.ALLOCATED));
checkPendingResource(rm, "a1", 0 * GB, null);
checkPendingResource(rm, "a", 0 * GB, null);
checkPendingResource(rm, "a1", 0 * GB, "x");
checkPendingResource(rm, "a", 0 * GB, "x");
// some containers could be allocated for am2 when we allocating containers
// for am1, just check if pending resource of b1/b/root > 0
checkPendingResourceGreaterThanZero(rm, "b1", null);
checkPendingResourceGreaterThanZero(rm, "b", null);
// root = a + b
checkPendingResourceGreaterThanZero(rm, "root", null);
checkPendingResource(rm, "root", 0 * GB, "x");
// complete am2, pending resource should be 0 now
AppAttemptRemovedSchedulerEvent appRemovedEvent = new AppAttemptRemovedSchedulerEvent(am2.getApplicationAttemptId(), RMAppAttemptState.FINISHED, false);
rm.getResourceScheduler().handle(appRemovedEvent);
checkPendingResource(rm, "a1", 0 * GB, null);
checkPendingResource(rm, "a", 0 * GB, null);
checkPendingResource(rm, "a1", 0 * GB, "x");
checkPendingResource(rm, "a", 0 * GB, "x");
checkPendingResource(rm, "b1", 0 * GB, null);
checkPendingResource(rm, "b", 0 * GB, null);
checkPendingResource(rm, "root", 0 * GB, null);
checkPendingResource(rm, "root", 0 * GB, "x");
}
Aggregations