use of org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager in project hadoop by apache.
the class TestLeafQueue method testApplicationQueuePercent.
@Test
public void testApplicationQueuePercent() throws Exception {
Resource res = Resource.newInstance(10 * 1024, 10);
CapacityScheduler scheduler = mock(CapacityScheduler.class);
when(scheduler.getClusterResource()).thenReturn(res);
when(scheduler.getResourceCalculator()).thenReturn(new DefaultResourceCalculator());
ApplicationAttemptId appAttId = createAppAttemptId(0, 0);
RMContext rmContext = mock(RMContext.class);
when(rmContext.getEpoch()).thenReturn(3L);
when(rmContext.getScheduler()).thenReturn(scheduler);
when(rmContext.getRMApps()).thenReturn(new ConcurrentHashMap<ApplicationId, RMApp>());
RMNodeLabelsManager nlm = mock(RMNodeLabelsManager.class);
when(nlm.getResourceByLabel(any(), any())).thenReturn(res);
when(rmContext.getNodeLabelManager()).thenReturn(nlm);
// Queue "test" consumes 100% of the cluster, so its capacity and absolute
// capacity are both 1.0f.
Queue queue = createQueue("test", null, 1.0f, 1.0f);
final String user = "user1";
FiCaSchedulerApp app = new FiCaSchedulerApp(appAttId, user, queue, queue.getAbstractUsersManager(), rmContext);
// Resource request
Resource requestedResource = Resource.newInstance(1536, 2);
app.getAppAttemptResourceUsage().incUsed(requestedResource);
// In "test" queue, 1536 used is 15% of both the queue and the cluster
assertEquals(15.0f, app.getResourceUsageReport().getQueueUsagePercentage(), 0.01f);
assertEquals(15.0f, app.getResourceUsageReport().getClusterUsagePercentage(), 0.01f);
// Queue "test2" is a child of root and its capacity is 50% of root. As a
// child of root, its absolute capaicty is also 50%.
queue = createQueue("test2", null, 0.5f, 0.5f);
app = new FiCaSchedulerApp(appAttId, user, queue, queue.getAbstractUsersManager(), rmContext);
app.getAppAttemptResourceUsage().incUsed(requestedResource);
// In "test2" queue, 1536 used is 30% of "test2" and 15% of the cluster.
assertEquals(30.0f, app.getResourceUsageReport().getQueueUsagePercentage(), 0.01f);
assertEquals(15.0f, app.getResourceUsageReport().getClusterUsagePercentage(), 0.01f);
// Queue "test2.1" is 50% of queue "test2", which is 50% of the cluster.
// Therefore, "test2.1" capacity is 50% and absolute capacity is 25%.
AbstractCSQueue qChild = createQueue("test2.1", queue, 0.5f, 0.25f);
app = new FiCaSchedulerApp(appAttId, user, qChild, qChild.getAbstractUsersManager(), rmContext);
app.getAppAttemptResourceUsage().incUsed(requestedResource);
// In "test2.1" queue, 1536 used is 60% of "test2.1" and 15% of the cluster.
assertEquals(60.0f, app.getResourceUsageReport().getQueueUsagePercentage(), 0.01f);
assertEquals(15.0f, app.getResourceUsageReport().getClusterUsagePercentage(), 0.01f);
}
use of org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager in project hadoop by apache.
the class TestNodeLabelContainerAllocation method testContainerAllocationWithSingleUserLimits.
@Test(timeout = 300000)
public void testContainerAllocationWithSingleUserLimits() throws Exception {
final RMNodeLabelsManager mgr = new NullRMNodeLabelsManager();
mgr.init(conf);
// set node -> label
mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("x", "y"));
mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x"), NodeId.newInstance("h2", 0), toSet("y")));
// inject node label manager
MockRM rm1 = new MockRM(TestUtils.getConfigurationWithDefaultQueueLabels(conf)) {
@Override
public RMNodeLabelsManager createNodeLabelManager() {
return mgr;
}
};
rm1.getRMContext().setNodeLabelManager(mgr);
rm1.start();
// label = x
MockNM nm1 = rm1.registerNode("h1:1234", 8000);
// label = y
rm1.registerNode("h2:1234", 8000);
// label = <empty>
MockNM nm3 = rm1.registerNode("h3:1234", 8000);
// launch an app to queue a1 (label = x), and check all container will
// be allocated in h1
RMApp app1 = rm1.submitApp(200, "app", "user", null, "a1");
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
// A has only 10% of x, so it can only allocate one container in label=empty
ContainerId containerId = ContainerId.newContainerId(am1.getApplicationAttemptId(), 2);
am1.allocate("*", 1024, 1, new ArrayList<ContainerId>(), "");
Assert.assertTrue(rm1.waitForState(nm3, containerId, RMContainerState.ALLOCATED));
// Cannot allocate 2nd label=empty container
containerId = ContainerId.newContainerId(am1.getApplicationAttemptId(), 3);
am1.allocate("*", 1024, 1, new ArrayList<ContainerId>(), "");
Assert.assertFalse(rm1.waitForState(nm3, containerId, RMContainerState.ALLOCATED));
// We can allocate floor(8000 / 1024) = 7 containers
for (int id = 3; id <= 8; id++) {
containerId = ContainerId.newContainerId(am1.getApplicationAttemptId(), id);
am1.allocate("*", 1024, 1, new ArrayList<ContainerId>(), "x");
Assert.assertTrue(rm1.waitForState(nm1, containerId, RMContainerState.ALLOCATED));
}
rm1.close();
}
use of org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager in project hadoop by apache.
the class TestQueueParsing method testQueueParsingWhenLabelsInheritedNotExistedInNodeLabelManager.
@Test
public void testQueueParsingWhenLabelsInheritedNotExistedInNodeLabelManager() throws IOException {
YarnConfiguration conf = new YarnConfiguration();
CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration(conf);
setupQueueConfigurationWithLabelsInherit(csConf);
CapacityScheduler capacityScheduler = new CapacityScheduler();
RMContextImpl rmContext = new RMContextImpl(null, null, null, null, null, null, new RMContainerTokenSecretManager(csConf), new NMTokenSecretManagerInRM(csConf), new ClientToAMTokenSecretManagerInRM(), null);
RMNodeLabelsManager nodeLabelsManager = new NullRMNodeLabelsManager();
nodeLabelsManager.init(conf);
nodeLabelsManager.start();
rmContext.setNodeLabelManager(nodeLabelsManager);
capacityScheduler.setConf(csConf);
capacityScheduler.setRMContext(rmContext);
capacityScheduler.init(csConf);
capacityScheduler.start();
ServiceOperations.stopQuietly(capacityScheduler);
ServiceOperations.stopQuietly(nodeLabelsManager);
}
use of org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager in project hadoop by apache.
the class TestQueueParsing method testSingleLevelQueueParsingWhenLabelsNotExistedInNodeLabelManager.
@Test
public void testSingleLevelQueueParsingWhenLabelsNotExistedInNodeLabelManager() throws IOException {
YarnConfiguration conf = new YarnConfiguration();
CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration(conf);
setupQueueConfigurationWithSingleLevel(csConf);
CapacityScheduler capacityScheduler = new CapacityScheduler();
RMContextImpl rmContext = new RMContextImpl(null, null, null, null, null, null, new RMContainerTokenSecretManager(csConf), new NMTokenSecretManagerInRM(csConf), new ClientToAMTokenSecretManagerInRM(), null);
RMNodeLabelsManager nodeLabelsManager = new NullRMNodeLabelsManager();
nodeLabelsManager.init(conf);
nodeLabelsManager.start();
rmContext.setNodeLabelManager(nodeLabelsManager);
capacityScheduler.setConf(csConf);
capacityScheduler.setRMContext(rmContext);
capacityScheduler.init(csConf);
capacityScheduler.start();
ServiceOperations.stopQuietly(capacityScheduler);
ServiceOperations.stopQuietly(nodeLabelsManager);
}
use of org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager in project hadoop by apache.
the class TestQueueParsing method testQueueParsingWhenLabelsNotExistedInNodeLabelManager.
@Test
public void testQueueParsingWhenLabelsNotExistedInNodeLabelManager() throws IOException {
YarnConfiguration conf = new YarnConfiguration();
CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration(conf);
setupQueueConfigurationWithLabels(csConf);
CapacityScheduler capacityScheduler = new CapacityScheduler();
RMContextImpl rmContext = new RMContextImpl(null, null, null, null, null, null, new RMContainerTokenSecretManager(csConf), new NMTokenSecretManagerInRM(csConf), new ClientToAMTokenSecretManagerInRM(), null);
RMNodeLabelsManager nodeLabelsManager = new NullRMNodeLabelsManager();
nodeLabelsManager.init(conf);
nodeLabelsManager.start();
rmContext.setNodeLabelManager(nodeLabelsManager);
capacityScheduler.setConf(csConf);
capacityScheduler.setRMContext(rmContext);
capacityScheduler.init(csConf);
capacityScheduler.start();
ServiceOperations.stopQuietly(capacityScheduler);
ServiceOperations.stopQuietly(nodeLabelsManager);
}
Aggregations