use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp in project hadoop by apache.
the class TestLeafQueue method testDRFUserLimits.
@Test
public void testDRFUserLimits() throws Exception {
setUpWithDominantResourceCalculator();
// Mock the queue
LeafQueue b = stubLeafQueue((LeafQueue) queues.get(B));
// unset maxCapacity
b.setMaxCapacity(1.0f);
// Users
final String user0 = "user_0";
final String user1 = "user_1";
// Submit applications
final ApplicationAttemptId appAttemptId0 = TestUtils.getMockApplicationAttemptId(0, 0);
FiCaSchedulerApp app0 = new FiCaSchedulerApp(appAttemptId0, user0, b, b.getAbstractUsersManager(), spyRMContext);
b.submitApplicationAttempt(app0, user0);
final ApplicationAttemptId appAttemptId2 = TestUtils.getMockApplicationAttemptId(2, 0);
FiCaSchedulerApp app2 = new FiCaSchedulerApp(appAttemptId2, user1, b, b.getAbstractUsersManager(), spyRMContext);
b.submitApplicationAttempt(app2, user1);
// Setup some nodes
String host0 = "127.0.0.1";
FiCaSchedulerNode node0 = TestUtils.getMockNode(host0, DEFAULT_RACK, 0, 8 * GB, 100);
String host1 = "127.0.0.2";
FiCaSchedulerNode node1 = TestUtils.getMockNode(host1, DEFAULT_RACK, 0, 8 * GB, 100);
Map<NodeId, FiCaSchedulerNode> nodes = ImmutableMap.of(node0.getNodeID(), node0, node1.getNodeID(), node1);
Map<ApplicationAttemptId, FiCaSchedulerApp> apps = ImmutableMap.of(app0.getApplicationAttemptId(), app0, app2.getApplicationAttemptId(), app2);
int numNodes = 2;
Resource clusterResource = Resources.createResource(numNodes * (8 * GB), numNodes * 100);
when(csContext.getNumClusterNodes()).thenReturn(numNodes);
when(csContext.getClusterResource()).thenReturn(clusterResource);
// Setup resource-requests so that one application is memory dominant
// and other application is vcores dominant
Priority priority = TestUtils.createMockPriority(1);
app0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 40, 10, true, priority, recordFactory, RMNodeLabelsManager.NO_LABEL)));
app2.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 2 * GB, 10, 10, true, priority, recordFactory, RMNodeLabelsManager.NO_LABEL)));
/**
* Start testing...
*/
// Set user-limit
b.setUserLimit(50);
b.setUserLimitFactor(2);
User queueUser0 = b.getUser(user0);
User queueUser1 = b.getUser(user1);
assertEquals("There should 2 active users!", 2, b.getAbstractUsersManager().getNumActiveUsers());
// Fill both Nodes as far as we can
CSAssignment assign;
do {
assign = b.assignContainers(clusterResource, node0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
LOG.info(assign.toString());
applyCSAssignment(clusterResource, assign, b, nodes, apps);
} while (assign.getResource().getMemorySize() > 0 && assign.getAssignmentInformation().getNumReservations() == 0);
do {
assign = b.assignContainers(clusterResource, node1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
applyCSAssignment(clusterResource, assign, b, nodes, apps);
} while (assign.getResource().getMemorySize() > 0 && assign.getAssignmentInformation().getNumReservations() == 0);
//LOG.info("user_0: " + queueUser0.getUsed());
//LOG.info("user_1: " + queueUser1.getUsed());
assertTrue("Verify user_0 got resources ", queueUser0.getUsed().getMemorySize() > 0);
assertTrue("Verify user_1 got resources ", queueUser1.getUsed().getMemorySize() > 0);
assertTrue("Expected AbsoluteUsedCapacity > 0.95, got: " + b.getAbsoluteUsedCapacity(), b.getAbsoluteUsedCapacity() > 0.95);
// Verify consumedRatio is based on dominant resources
float expectedRatio = queueUser0.getUsed().getVirtualCores() / (numNodes * 100.0f) + queueUser1.getUsed().getMemorySize() / (numNodes * 8.0f * GB);
assertEquals(expectedRatio, b.getUsersManager().getUsageRatio(""), 0.001);
// Add another node and make sure consumedRatio is adjusted
// accordingly.
numNodes = 3;
clusterResource = Resources.createResource(numNodes * (8 * GB), numNodes * 100);
when(csContext.getNumClusterNodes()).thenReturn(numNodes);
root.updateClusterResource(clusterResource, new ResourceLimits(clusterResource));
expectedRatio = queueUser0.getUsed().getVirtualCores() / (numNodes * 100.0f) + queueUser1.getUsed().getMemorySize() / (numNodes * 8.0f * GB);
assertEquals(expectedRatio, b.getUsersManager().getUsageRatio(""), 0.001);
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp in project hadoop by apache.
the class TestLeafQueue method testUserQueueAcl.
@Test
public void testUserQueueAcl() throws Exception {
// Manipulate queue 'a'
LeafQueue d = stubLeafQueue((LeafQueue) queues.get(D));
// Users
final String user_d = "user_d";
// Submit applications
final ApplicationAttemptId appAttemptId_0 = TestUtils.getMockApplicationAttemptId(0, 1);
FiCaSchedulerApp app_0 = new FiCaSchedulerApp(appAttemptId_0, user_d, d, null, spyRMContext);
d.submitApplicationAttempt(app_0, user_d);
// Attempt the same application again
final ApplicationAttemptId appAttemptId_1 = TestUtils.getMockApplicationAttemptId(0, 2);
FiCaSchedulerApp app_1 = new FiCaSchedulerApp(appAttemptId_1, user_d, d, null, spyRMContext);
// same user
d.submitApplicationAttempt(app_1, user_d);
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp in project hadoop by apache.
the class TestLeafQueue method testGetTotalPendingResourcesConsideringUserLimitTwoUsers.
@Test
public void testGetTotalPendingResourcesConsideringUserLimitTwoUsers() throws Exception {
// Manipulate queue 'e'
LeafQueue e = stubLeafQueue((LeafQueue) queues.get(E));
// Allow queue 'e' to use 100% of cluster resources (max capacity).
e.setMaxCapacity(1.0f);
// When used queue resources goes above capacity (in this case, 1%), user
// resource limit (used in calculating headroom) is calculated in small
// increments to ensure that user-limit-percent can be met for all users in
// a queue. Take user-limit-percent out of the equation so that user
// resource limit will always be calculated to its max possible value.
e.setUserLimit(1000);
final String user_0 = "user_0";
final String user_1 = "user_1";
// Submit 2 applications for user_0
final ApplicationAttemptId appAttemptId_0 = TestUtils.getMockApplicationAttemptId(0, 0);
FiCaSchedulerApp app_0 = new FiCaSchedulerApp(appAttemptId_0, user_0, e, mock(ActiveUsersManager.class), spyRMContext);
e.submitApplicationAttempt(app_0, user_0);
final ApplicationAttemptId appAttemptId_1 = TestUtils.getMockApplicationAttemptId(1, 0);
FiCaSchedulerApp app_1 = new FiCaSchedulerApp(appAttemptId_1, user_0, e, mock(ActiveUsersManager.class), spyRMContext);
e.submitApplicationAttempt(app_1, user_0);
// Submit 2 applications for user_1
final ApplicationAttemptId appAttemptId_2 = TestUtils.getMockApplicationAttemptId(2, 0);
FiCaSchedulerApp app_2 = new FiCaSchedulerApp(appAttemptId_2, user_1, e, mock(ActiveUsersManager.class), spyRMContext);
e.submitApplicationAttempt(app_2, user_1);
final ApplicationAttemptId appAttemptId_3 = TestUtils.getMockApplicationAttemptId(3, 0);
FiCaSchedulerApp app_3 = new FiCaSchedulerApp(appAttemptId_3, user_1, e, mock(ActiveUsersManager.class), spyRMContext);
e.submitApplicationAttempt(app_3, user_1);
Map<ApplicationAttemptId, FiCaSchedulerApp> apps = ImmutableMap.of(app_0.getApplicationAttemptId(), app_0, app_1.getApplicationAttemptId(), app_1, app_2.getApplicationAttemptId(), app_2, app_3.getApplicationAttemptId(), app_3);
// Setup 1 node with 100GB of memory resources.
String host_0 = "127.0.0.1";
FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 100 * GB);
Map<NodeId, FiCaSchedulerNode> nodes = ImmutableMap.of(node_0.getNodeID(), node_0);
final int numNodes = 1;
Resource clusterResource = Resources.createResource(numNodes * (100 * GB), numNodes * 128);
when(csContext.getNumClusterNodes()).thenReturn(numNodes);
// Pending resource requests for user_0: app_0 and app_1 total 3GB.
Priority priority = TestUtils.createMockPriority(1);
app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 1, true, priority, recordFactory)));
app_1.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 2, true, priority, recordFactory)));
// Pending resource requests for user_1: app_2 and app_3 total 1GB.
priority = TestUtils.createMockPriority(1);
app_2.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 2, true, priority, recordFactory)));
app_3.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 1, true, priority, recordFactory)));
// Start testing...
// With queue capacity set at 1% of 100GB and user-limit-factor set to 1.0,
// queue 'e' should be able to consume 1GB per user.
assertEquals(2 * GB, e.getTotalPendingResourcesConsideringUserLimit(clusterResource, RMNodeLabelsManager.NO_LABEL, false).getMemorySize());
// None of the apps have assigned resources
// user_0's apps:
assertEquals(0 * GB, app_0.getCurrentConsumption().getMemorySize());
assertEquals(0 * GB, app_1.getCurrentConsumption().getMemorySize());
// user_1's apps:
assertEquals(0 * GB, app_2.getCurrentConsumption().getMemorySize());
assertEquals(0 * GB, app_3.getCurrentConsumption().getMemorySize());
// Assign 1st Container of 1GB
applyCSAssignment(clusterResource, e.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), e, nodes, apps);
// The first container was assigned to user_0's app_0. Queues total headroom
// has 1GB left for user_1.
assertEquals(1 * GB, e.getTotalPendingResourcesConsideringUserLimit(clusterResource, RMNodeLabelsManager.NO_LABEL, false).getMemorySize());
// user_0's apps:
assertEquals(1 * GB, app_0.getCurrentConsumption().getMemorySize());
assertEquals(0 * GB, app_1.getCurrentConsumption().getMemorySize());
// user_1's apps:
assertEquals(0 * GB, app_2.getCurrentConsumption().getMemorySize());
assertEquals(0 * GB, app_3.getCurrentConsumption().getMemorySize());
// Assign 2nd container of 1GB
applyCSAssignment(clusterResource, e.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), e, nodes, apps);
// user_0 has no headroom due to user-limit-factor of 1.0. However capacity
// scheduler will assign one container more than user-limit-factor. So,
// this container went to user_0's app_1. so, headroom for queue 'e'e is
// still 1GB for user_1
assertEquals(1 * GB, e.getTotalPendingResourcesConsideringUserLimit(clusterResource, RMNodeLabelsManager.NO_LABEL, false).getMemorySize());
// user_0's apps:
assertEquals(1 * GB, app_0.getCurrentConsumption().getMemorySize());
assertEquals(1 * GB, app_1.getCurrentConsumption().getMemorySize());
// user_1's apps:
assertEquals(0 * GB, app_2.getCurrentConsumption().getMemorySize());
assertEquals(0 * GB, app_3.getCurrentConsumption().getMemorySize());
// Assign 3rd container.
applyCSAssignment(clusterResource, e.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), e, nodes, apps);
// Container was allocated to user_1's app_2 since user_1, Now, no headroom
// is left.
assertEquals(0 * GB, e.getTotalPendingResourcesConsideringUserLimit(clusterResource, RMNodeLabelsManager.NO_LABEL, false).getMemorySize());
// user_0's apps:
assertEquals(1 * GB, app_0.getCurrentConsumption().getMemorySize());
assertEquals(1 * GB, app_1.getCurrentConsumption().getMemorySize());
// user_1's apps:
assertEquals(1 * GB, app_2.getCurrentConsumption().getMemorySize());
assertEquals(0 * GB, app_3.getCurrentConsumption().getMemorySize());
// Assign 4th container.
applyCSAssignment(clusterResource, e.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), e, nodes, apps);
// Allocated to user_1's app_2 since scheduler allocates 1 container
// above user resource limit. Available headroom still 0.
assertEquals(0 * GB, e.getTotalPendingResourcesConsideringUserLimit(clusterResource, RMNodeLabelsManager.NO_LABEL, false).getMemorySize());
// user_0's apps:
long app_0_consumption = app_0.getCurrentConsumption().getMemorySize();
assertEquals(1 * GB, app_0_consumption);
long app_1_consumption = app_1.getCurrentConsumption().getMemorySize();
assertEquals(1 * GB, app_1_consumption);
// user_1's apps:
long app_2_consumption = app_2.getCurrentConsumption().getMemorySize();
assertEquals(2 * GB, app_2_consumption);
long app_3_consumption = app_3.getCurrentConsumption().getMemorySize();
assertEquals(0 * GB, app_3_consumption);
// Attempt to assign 5th container. Will be a no-op.
applyCSAssignment(clusterResource, e.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), e, nodes, apps);
// Cannot allocate 5th container because both users are above their allowed
// user resource limit. Values should be the same as previously.
assertEquals(0 * GB, e.getTotalPendingResourcesConsideringUserLimit(clusterResource, RMNodeLabelsManager.NO_LABEL, false).getMemorySize());
// user_0's apps:
assertEquals(app_0_consumption, app_0.getCurrentConsumption().getMemorySize());
assertEquals(app_1_consumption, app_1.getCurrentConsumption().getMemorySize());
// user_1's apps:
assertEquals(app_2_consumption, app_2.getCurrentConsumption().getMemorySize());
assertEquals(app_3_consumption, app_3.getCurrentConsumption().getMemorySize());
// Increase user-limit-factor from 1GB to 10GB (1% * 10 * 100GB = 10GB).
// Pending for both user_0 and user_1 are still 1GB each, so user-limit-
// factor is no longer the limiting factor.
e.setUserLimitFactor(10.0f);
applyCSAssignment(clusterResource, e.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), e, nodes, apps);
// Next container goes to user_0's app_1, since it still wanted 1GB.
assertEquals(1 * GB, e.getTotalPendingResourcesConsideringUserLimit(clusterResource, RMNodeLabelsManager.NO_LABEL, false).getMemorySize());
// user_0's apps:
assertEquals(1 * GB, app_0.getCurrentConsumption().getMemorySize());
assertEquals(2 * GB, app_1.getCurrentConsumption().getMemorySize());
// user_1's apps:
assertEquals(2 * GB, app_2.getCurrentConsumption().getMemorySize());
assertEquals(0 * GB, app_3.getCurrentConsumption().getMemorySize());
applyCSAssignment(clusterResource, e.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), e, nodes, apps);
// Last container goes to user_1's app_3, since it still wanted 1GB.
// user_0's apps:
assertEquals(0 * GB, e.getTotalPendingResourcesConsideringUserLimit(clusterResource, RMNodeLabelsManager.NO_LABEL, false).getMemorySize());
assertEquals(1 * GB, app_0.getCurrentConsumption().getMemorySize());
assertEquals(2 * GB, app_1.getCurrentConsumption().getMemorySize());
// user_1's apps:
assertEquals(2 * GB, app_2.getCurrentConsumption().getMemorySize());
assertEquals(1 * GB, app_3.getCurrentConsumption().getMemorySize());
// Release each container from app_0
for (RMContainer rmContainer : app_0.getLiveContainers()) {
e.completedContainer(clusterResource, app_0, node_0, rmContainer, ContainerStatus.newInstance(rmContainer.getContainerId(), ContainerState.COMPLETE, "", ContainerExitStatus.KILLED_BY_RESOURCEMANAGER), RMContainerEventType.KILL, null, true);
}
// Release each container from app_1
for (RMContainer rmContainer : app_1.getLiveContainers()) {
e.completedContainer(clusterResource, app_1, node_0, rmContainer, ContainerStatus.newInstance(rmContainer.getContainerId(), ContainerState.COMPLETE, "", ContainerExitStatus.KILLED_BY_RESOURCEMANAGER), RMContainerEventType.KILL, null, true);
}
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp in project hadoop by apache.
the class TestLeafQueue method testApplicationQueuePercent.
@Test
public void testApplicationQueuePercent() throws Exception {
Resource res = Resource.newInstance(10 * 1024, 10);
CapacityScheduler scheduler = mock(CapacityScheduler.class);
when(scheduler.getClusterResource()).thenReturn(res);
when(scheduler.getResourceCalculator()).thenReturn(new DefaultResourceCalculator());
ApplicationAttemptId appAttId = createAppAttemptId(0, 0);
RMContext rmContext = mock(RMContext.class);
when(rmContext.getEpoch()).thenReturn(3L);
when(rmContext.getScheduler()).thenReturn(scheduler);
when(rmContext.getRMApps()).thenReturn(new ConcurrentHashMap<ApplicationId, RMApp>());
RMNodeLabelsManager nlm = mock(RMNodeLabelsManager.class);
when(nlm.getResourceByLabel(any(), any())).thenReturn(res);
when(rmContext.getNodeLabelManager()).thenReturn(nlm);
// Queue "test" consumes 100% of the cluster, so its capacity and absolute
// capacity are both 1.0f.
Queue queue = createQueue("test", null, 1.0f, 1.0f);
final String user = "user1";
FiCaSchedulerApp app = new FiCaSchedulerApp(appAttId, user, queue, queue.getAbstractUsersManager(), rmContext);
// Resource request
Resource requestedResource = Resource.newInstance(1536, 2);
app.getAppAttemptResourceUsage().incUsed(requestedResource);
// In "test" queue, 1536 used is 15% of both the queue and the cluster
assertEquals(15.0f, app.getResourceUsageReport().getQueueUsagePercentage(), 0.01f);
assertEquals(15.0f, app.getResourceUsageReport().getClusterUsagePercentage(), 0.01f);
// Queue "test2" is a child of root and its capacity is 50% of root. As a
// child of root, its absolute capaicty is also 50%.
queue = createQueue("test2", null, 0.5f, 0.5f);
app = new FiCaSchedulerApp(appAttId, user, queue, queue.getAbstractUsersManager(), rmContext);
app.getAppAttemptResourceUsage().incUsed(requestedResource);
// In "test2" queue, 1536 used is 30% of "test2" and 15% of the cluster.
assertEquals(30.0f, app.getResourceUsageReport().getQueueUsagePercentage(), 0.01f);
assertEquals(15.0f, app.getResourceUsageReport().getClusterUsagePercentage(), 0.01f);
// Queue "test2.1" is 50% of queue "test2", which is 50% of the cluster.
// Therefore, "test2.1" capacity is 50% and absolute capacity is 25%.
AbstractCSQueue qChild = createQueue("test2.1", queue, 0.5f, 0.25f);
app = new FiCaSchedulerApp(appAttId, user, qChild, qChild.getAbstractUsersManager(), rmContext);
app.getAppAttemptResourceUsage().incUsed(requestedResource);
// In "test2.1" queue, 1536 used is 60% of "test2.1" and 15% of the cluster.
assertEquals(60.0f, app.getResourceUsageReport().getQueueUsagePercentage(), 0.01f);
assertEquals(15.0f, app.getResourceUsageReport().getClusterUsagePercentage(), 0.01f);
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp in project hadoop by apache.
the class TestLeafQueue method testAppAttemptMetrics.
@Test
public void testAppAttemptMetrics() throws Exception {
// Manipulate queue 'a'
LeafQueue a = stubLeafQueue((LeafQueue) queues.get(B));
// Users
final String user_0 = "user_0";
// Submit applications
final ApplicationAttemptId appAttemptId_0 = TestUtils.getMockApplicationAttemptId(0, 1);
AppAddedSchedulerEvent addAppEvent = new AppAddedSchedulerEvent(appAttemptId_0.getApplicationId(), a.getQueueName(), user_0);
cs.handle(addAppEvent);
AppAttemptAddedSchedulerEvent addAttemptEvent = new AppAttemptAddedSchedulerEvent(appAttemptId_0, false);
cs.handle(addAttemptEvent);
AppAttemptRemovedSchedulerEvent event = new AppAttemptRemovedSchedulerEvent(appAttemptId_0, RMAppAttemptState.FAILED, false);
cs.handle(event);
assertEquals(0, a.getMetrics().getAppsPending());
assertEquals(0, a.getMetrics().getAppsFailed());
// Attempt the same application again
final ApplicationAttemptId appAttemptId_1 = TestUtils.getMockApplicationAttemptId(0, 2);
FiCaSchedulerApp app_1 = new FiCaSchedulerApp(appAttemptId_1, user_0, a, null, spyRMContext);
app_1.setAMResource(Resource.newInstance(100, 1));
// same user
a.submitApplicationAttempt(app_1, user_0);
assertEquals(1, a.getMetrics().getAppsSubmitted());
assertEquals(1, a.getMetrics().getAppsPending());
assertEquals(1, a.getUser(user_0).getActiveApplications());
assertEquals(app_1.getAMResource().getMemorySize(), a.getMetrics().getUsedAMResourceMB());
assertEquals(app_1.getAMResource().getVirtualCores(), a.getMetrics().getUsedAMResourceVCores());
event = new AppAttemptRemovedSchedulerEvent(appAttemptId_0, RMAppAttemptState.FINISHED, false);
cs.handle(event);
AppRemovedSchedulerEvent rEvent = new AppRemovedSchedulerEvent(appAttemptId_0.getApplicationId(), RMAppState.FINISHED);
cs.handle(rEvent);
assertEquals(1, a.getMetrics().getAppsSubmitted());
assertEquals(0, a.getMetrics().getAppsPending());
assertEquals(0, a.getMetrics().getAppsFailed());
assertEquals(1, a.getMetrics().getAppsCompleted());
QueueMetrics userMetrics = a.getMetrics().getUserMetrics(user_0);
assertEquals(1, userMetrics.getAppsSubmitted());
}
Aggregations