Search in sources :

Example 41 with ApplicationAttemptId

use of org.apache.hadoop.yarn.api.records.ApplicationAttemptId in project hadoop by apache.

the class TestLeafQueue method testGetTotalPendingResourcesConsideringUserLimitTwoUsers.

@Test
public void testGetTotalPendingResourcesConsideringUserLimitTwoUsers() throws Exception {
    // Manipulate queue 'e'
    LeafQueue e = stubLeafQueue((LeafQueue) queues.get(E));
    // Allow queue 'e' to use 100% of cluster resources (max capacity).
    e.setMaxCapacity(1.0f);
    // When used queue resources goes above capacity (in this case, 1%), user
    // resource limit (used in calculating headroom) is calculated in small
    // increments to ensure that user-limit-percent can be met for all users in
    // a queue. Take user-limit-percent out of the equation so that user
    // resource limit will always be calculated to its max possible value.
    e.setUserLimit(1000);
    final String user_0 = "user_0";
    final String user_1 = "user_1";
    // Submit 2 applications for user_0
    final ApplicationAttemptId appAttemptId_0 = TestUtils.getMockApplicationAttemptId(0, 0);
    FiCaSchedulerApp app_0 = new FiCaSchedulerApp(appAttemptId_0, user_0, e, mock(ActiveUsersManager.class), spyRMContext);
    e.submitApplicationAttempt(app_0, user_0);
    final ApplicationAttemptId appAttemptId_1 = TestUtils.getMockApplicationAttemptId(1, 0);
    FiCaSchedulerApp app_1 = new FiCaSchedulerApp(appAttemptId_1, user_0, e, mock(ActiveUsersManager.class), spyRMContext);
    e.submitApplicationAttempt(app_1, user_0);
    // Submit 2 applications for user_1
    final ApplicationAttemptId appAttemptId_2 = TestUtils.getMockApplicationAttemptId(2, 0);
    FiCaSchedulerApp app_2 = new FiCaSchedulerApp(appAttemptId_2, user_1, e, mock(ActiveUsersManager.class), spyRMContext);
    e.submitApplicationAttempt(app_2, user_1);
    final ApplicationAttemptId appAttemptId_3 = TestUtils.getMockApplicationAttemptId(3, 0);
    FiCaSchedulerApp app_3 = new FiCaSchedulerApp(appAttemptId_3, user_1, e, mock(ActiveUsersManager.class), spyRMContext);
    e.submitApplicationAttempt(app_3, user_1);
    Map<ApplicationAttemptId, FiCaSchedulerApp> apps = ImmutableMap.of(app_0.getApplicationAttemptId(), app_0, app_1.getApplicationAttemptId(), app_1, app_2.getApplicationAttemptId(), app_2, app_3.getApplicationAttemptId(), app_3);
    // Setup 1 node with 100GB of memory resources.
    String host_0 = "127.0.0.1";
    FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 100 * GB);
    Map<NodeId, FiCaSchedulerNode> nodes = ImmutableMap.of(node_0.getNodeID(), node_0);
    final int numNodes = 1;
    Resource clusterResource = Resources.createResource(numNodes * (100 * GB), numNodes * 128);
    when(csContext.getNumClusterNodes()).thenReturn(numNodes);
    // Pending resource requests for user_0: app_0 and app_1 total 3GB.
    Priority priority = TestUtils.createMockPriority(1);
    app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 1, true, priority, recordFactory)));
    app_1.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 2, true, priority, recordFactory)));
    // Pending resource requests for user_1: app_2 and app_3 total 1GB.
    priority = TestUtils.createMockPriority(1);
    app_2.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 2, true, priority, recordFactory)));
    app_3.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 1, true, priority, recordFactory)));
    // Start testing...
    // With queue capacity set at 1% of 100GB and user-limit-factor set to 1.0,
    // queue 'e' should be able to consume 1GB per user.
    assertEquals(2 * GB, e.getTotalPendingResourcesConsideringUserLimit(clusterResource, RMNodeLabelsManager.NO_LABEL, false).getMemorySize());
    // None of the apps have assigned resources
    // user_0's apps:
    assertEquals(0 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_1.getCurrentConsumption().getMemorySize());
    // user_1's apps:
    assertEquals(0 * GB, app_2.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_3.getCurrentConsumption().getMemorySize());
    // Assign 1st Container of 1GB
    applyCSAssignment(clusterResource, e.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), e, nodes, apps);
    // The first container was assigned to user_0's app_0. Queues total headroom
    // has 1GB left for user_1.
    assertEquals(1 * GB, e.getTotalPendingResourcesConsideringUserLimit(clusterResource, RMNodeLabelsManager.NO_LABEL, false).getMemorySize());
    // user_0's apps:
    assertEquals(1 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_1.getCurrentConsumption().getMemorySize());
    // user_1's apps:
    assertEquals(0 * GB, app_2.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_3.getCurrentConsumption().getMemorySize());
    // Assign 2nd container of 1GB
    applyCSAssignment(clusterResource, e.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), e, nodes, apps);
    // user_0 has no headroom due to user-limit-factor of 1.0. However capacity
    // scheduler will assign one container more than user-limit-factor. So,
    // this container went to user_0's app_1. so, headroom for queue 'e'e is
    // still 1GB for user_1
    assertEquals(1 * GB, e.getTotalPendingResourcesConsideringUserLimit(clusterResource, RMNodeLabelsManager.NO_LABEL, false).getMemorySize());
    // user_0's apps:
    assertEquals(1 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(1 * GB, app_1.getCurrentConsumption().getMemorySize());
    // user_1's apps:
    assertEquals(0 * GB, app_2.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_3.getCurrentConsumption().getMemorySize());
    // Assign 3rd container.
    applyCSAssignment(clusterResource, e.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), e, nodes, apps);
    // Container was allocated to user_1's app_2 since user_1, Now, no headroom
    // is left.
    assertEquals(0 * GB, e.getTotalPendingResourcesConsideringUserLimit(clusterResource, RMNodeLabelsManager.NO_LABEL, false).getMemorySize());
    // user_0's apps:
    assertEquals(1 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(1 * GB, app_1.getCurrentConsumption().getMemorySize());
    // user_1's apps:
    assertEquals(1 * GB, app_2.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_3.getCurrentConsumption().getMemorySize());
    // Assign 4th container.
    applyCSAssignment(clusterResource, e.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), e, nodes, apps);
    // Allocated to user_1's app_2 since scheduler allocates 1 container
    // above user resource limit. Available headroom still 0.
    assertEquals(0 * GB, e.getTotalPendingResourcesConsideringUserLimit(clusterResource, RMNodeLabelsManager.NO_LABEL, false).getMemorySize());
    // user_0's apps:
    long app_0_consumption = app_0.getCurrentConsumption().getMemorySize();
    assertEquals(1 * GB, app_0_consumption);
    long app_1_consumption = app_1.getCurrentConsumption().getMemorySize();
    assertEquals(1 * GB, app_1_consumption);
    // user_1's apps:
    long app_2_consumption = app_2.getCurrentConsumption().getMemorySize();
    assertEquals(2 * GB, app_2_consumption);
    long app_3_consumption = app_3.getCurrentConsumption().getMemorySize();
    assertEquals(0 * GB, app_3_consumption);
    // Attempt to assign 5th container. Will be a no-op.
    applyCSAssignment(clusterResource, e.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), e, nodes, apps);
    // Cannot allocate 5th container because both users are above their allowed
    // user resource limit. Values should be the same as previously.
    assertEquals(0 * GB, e.getTotalPendingResourcesConsideringUserLimit(clusterResource, RMNodeLabelsManager.NO_LABEL, false).getMemorySize());
    // user_0's apps:
    assertEquals(app_0_consumption, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(app_1_consumption, app_1.getCurrentConsumption().getMemorySize());
    // user_1's apps:
    assertEquals(app_2_consumption, app_2.getCurrentConsumption().getMemorySize());
    assertEquals(app_3_consumption, app_3.getCurrentConsumption().getMemorySize());
    // Increase user-limit-factor from 1GB to 10GB (1% * 10 * 100GB = 10GB).
    // Pending for both user_0 and user_1 are still 1GB each, so user-limit-
    // factor is no longer the limiting factor.
    e.setUserLimitFactor(10.0f);
    applyCSAssignment(clusterResource, e.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), e, nodes, apps);
    // Next container goes to user_0's app_1, since it still wanted 1GB.
    assertEquals(1 * GB, e.getTotalPendingResourcesConsideringUserLimit(clusterResource, RMNodeLabelsManager.NO_LABEL, false).getMemorySize());
    // user_0's apps:
    assertEquals(1 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(2 * GB, app_1.getCurrentConsumption().getMemorySize());
    // user_1's apps:
    assertEquals(2 * GB, app_2.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_3.getCurrentConsumption().getMemorySize());
    applyCSAssignment(clusterResource, e.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), e, nodes, apps);
    // Last container goes to user_1's app_3, since it still wanted 1GB.
    // user_0's apps:
    assertEquals(0 * GB, e.getTotalPendingResourcesConsideringUserLimit(clusterResource, RMNodeLabelsManager.NO_LABEL, false).getMemorySize());
    assertEquals(1 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(2 * GB, app_1.getCurrentConsumption().getMemorySize());
    // user_1's apps:
    assertEquals(2 * GB, app_2.getCurrentConsumption().getMemorySize());
    assertEquals(1 * GB, app_3.getCurrentConsumption().getMemorySize());
    // Release each container from app_0
    for (RMContainer rmContainer : app_0.getLiveContainers()) {
        e.completedContainer(clusterResource, app_0, node_0, rmContainer, ContainerStatus.newInstance(rmContainer.getContainerId(), ContainerState.COMPLETE, "", ContainerExitStatus.KILLED_BY_RESOURCEMANAGER), RMContainerEventType.KILL, null, true);
    }
    // Release each container from app_1
    for (RMContainer rmContainer : app_1.getLiveContainers()) {
        e.completedContainer(clusterResource, app_1, node_0, rmContainer, ContainerStatus.newInstance(rmContainer.getContainerId(), ContainerState.COMPLETE, "", ContainerExitStatus.KILLED_BY_RESOURCEMANAGER), RMContainerEventType.KILL, null, true);
    }
}
Also used : FiCaSchedulerNode(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode) Priority(org.apache.hadoop.yarn.api.records.Priority) Resource(org.apache.hadoop.yarn.api.records.Resource) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) RMContainer(org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer) FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp) ResourceLimits(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits) NodeId(org.apache.hadoop.yarn.api.records.NodeId) ActiveUsersManager(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager) Test(org.junit.Test)

Example 42 with ApplicationAttemptId

use of org.apache.hadoop.yarn.api.records.ApplicationAttemptId in project hadoop by apache.

the class TestLeafQueue method testApplicationQueuePercent.

@Test
public void testApplicationQueuePercent() throws Exception {
    Resource res = Resource.newInstance(10 * 1024, 10);
    CapacityScheduler scheduler = mock(CapacityScheduler.class);
    when(scheduler.getClusterResource()).thenReturn(res);
    when(scheduler.getResourceCalculator()).thenReturn(new DefaultResourceCalculator());
    ApplicationAttemptId appAttId = createAppAttemptId(0, 0);
    RMContext rmContext = mock(RMContext.class);
    when(rmContext.getEpoch()).thenReturn(3L);
    when(rmContext.getScheduler()).thenReturn(scheduler);
    when(rmContext.getRMApps()).thenReturn(new ConcurrentHashMap<ApplicationId, RMApp>());
    RMNodeLabelsManager nlm = mock(RMNodeLabelsManager.class);
    when(nlm.getResourceByLabel(any(), any())).thenReturn(res);
    when(rmContext.getNodeLabelManager()).thenReturn(nlm);
    // Queue "test" consumes 100% of the cluster, so its capacity and absolute
    // capacity are both 1.0f.
    Queue queue = createQueue("test", null, 1.0f, 1.0f);
    final String user = "user1";
    FiCaSchedulerApp app = new FiCaSchedulerApp(appAttId, user, queue, queue.getAbstractUsersManager(), rmContext);
    // Resource request
    Resource requestedResource = Resource.newInstance(1536, 2);
    app.getAppAttemptResourceUsage().incUsed(requestedResource);
    // In "test" queue, 1536 used is 15% of both the queue and the cluster
    assertEquals(15.0f, app.getResourceUsageReport().getQueueUsagePercentage(), 0.01f);
    assertEquals(15.0f, app.getResourceUsageReport().getClusterUsagePercentage(), 0.01f);
    // Queue "test2" is a child of root and its capacity is 50% of root. As a
    // child of root, its absolute capaicty is also 50%.
    queue = createQueue("test2", null, 0.5f, 0.5f);
    app = new FiCaSchedulerApp(appAttId, user, queue, queue.getAbstractUsersManager(), rmContext);
    app.getAppAttemptResourceUsage().incUsed(requestedResource);
    // In "test2" queue, 1536 used is 30% of "test2" and 15% of the cluster.
    assertEquals(30.0f, app.getResourceUsageReport().getQueueUsagePercentage(), 0.01f);
    assertEquals(15.0f, app.getResourceUsageReport().getClusterUsagePercentage(), 0.01f);
    // Queue "test2.1" is 50% of queue "test2", which is 50% of the cluster.
    // Therefore, "test2.1" capacity is 50% and absolute capacity is 25%.
    AbstractCSQueue qChild = createQueue("test2.1", queue, 0.5f, 0.25f);
    app = new FiCaSchedulerApp(appAttId, user, qChild, qChild.getAbstractUsersManager(), rmContext);
    app.getAppAttemptResourceUsage().incUsed(requestedResource);
    // In "test2.1" queue, 1536 used is 60% of "test2.1" and 15% of the cluster.
    assertEquals(60.0f, app.getResourceUsageReport().getQueueUsagePercentage(), 0.01f);
    assertEquals(15.0f, app.getResourceUsageReport().getClusterUsagePercentage(), 0.01f);
}
Also used : RMContext(org.apache.hadoop.yarn.server.resourcemanager.RMContext) RMApp(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp) Resource(org.apache.hadoop.yarn.api.records.Resource) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) DefaultResourceCalculator(org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator) FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) Queue(org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue) RMNodeLabelsManager(org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager) Test(org.junit.Test)

Example 43 with ApplicationAttemptId

use of org.apache.hadoop.yarn.api.records.ApplicationAttemptId in project hadoop by apache.

the class TestLeafQueue method testAppAttemptMetrics.

@Test
public void testAppAttemptMetrics() throws Exception {
    // Manipulate queue 'a'
    LeafQueue a = stubLeafQueue((LeafQueue) queues.get(B));
    // Users
    final String user_0 = "user_0";
    // Submit applications
    final ApplicationAttemptId appAttemptId_0 = TestUtils.getMockApplicationAttemptId(0, 1);
    AppAddedSchedulerEvent addAppEvent = new AppAddedSchedulerEvent(appAttemptId_0.getApplicationId(), a.getQueueName(), user_0);
    cs.handle(addAppEvent);
    AppAttemptAddedSchedulerEvent addAttemptEvent = new AppAttemptAddedSchedulerEvent(appAttemptId_0, false);
    cs.handle(addAttemptEvent);
    AppAttemptRemovedSchedulerEvent event = new AppAttemptRemovedSchedulerEvent(appAttemptId_0, RMAppAttemptState.FAILED, false);
    cs.handle(event);
    assertEquals(0, a.getMetrics().getAppsPending());
    assertEquals(0, a.getMetrics().getAppsFailed());
    // Attempt the same application again
    final ApplicationAttemptId appAttemptId_1 = TestUtils.getMockApplicationAttemptId(0, 2);
    FiCaSchedulerApp app_1 = new FiCaSchedulerApp(appAttemptId_1, user_0, a, null, spyRMContext);
    app_1.setAMResource(Resource.newInstance(100, 1));
    // same user
    a.submitApplicationAttempt(app_1, user_0);
    assertEquals(1, a.getMetrics().getAppsSubmitted());
    assertEquals(1, a.getMetrics().getAppsPending());
    assertEquals(1, a.getUser(user_0).getActiveApplications());
    assertEquals(app_1.getAMResource().getMemorySize(), a.getMetrics().getUsedAMResourceMB());
    assertEquals(app_1.getAMResource().getVirtualCores(), a.getMetrics().getUsedAMResourceVCores());
    event = new AppAttemptRemovedSchedulerEvent(appAttemptId_0, RMAppAttemptState.FINISHED, false);
    cs.handle(event);
    AppRemovedSchedulerEvent rEvent = new AppRemovedSchedulerEvent(appAttemptId_0.getApplicationId(), RMAppState.FINISHED);
    cs.handle(rEvent);
    assertEquals(1, a.getMetrics().getAppsSubmitted());
    assertEquals(0, a.getMetrics().getAppsPending());
    assertEquals(0, a.getMetrics().getAppsFailed());
    assertEquals(1, a.getMetrics().getAppsCompleted());
    QueueMetrics userMetrics = a.getMetrics().getUserMetrics(user_0);
    assertEquals(1, userMetrics.getAppsSubmitted());
}
Also used : AppRemovedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppRemovedSchedulerEvent) QueueMetrics(org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics) AppAddedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent) FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp) AppAttemptRemovedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptRemovedSchedulerEvent) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) AppAttemptAddedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptAddedSchedulerEvent) Test(org.junit.Test)

Example 44 with ApplicationAttemptId

use of org.apache.hadoop.yarn.api.records.ApplicationAttemptId in project hadoop by apache.

the class TestLeafQueue method testReservationExchange.

@Test
public void testReservationExchange() throws Exception {
    // Manipulate queue 'a'
    LeafQueue a = stubLeafQueue((LeafQueue) queues.get(A));
    //unset maxCapacity
    a.setMaxCapacity(1.0f);
    a.setUserLimitFactor(10);
    // Users
    final String user_0 = "user_0";
    final String user_1 = "user_1";
    // Submit applications
    final ApplicationAttemptId appAttemptId_0 = TestUtils.getMockApplicationAttemptId(0, 0);
    FiCaSchedulerApp app_0 = new FiCaSchedulerApp(appAttemptId_0, user_0, a, mock(ActiveUsersManager.class), spyRMContext);
    a.submitApplicationAttempt(app_0, user_0);
    final ApplicationAttemptId appAttemptId_1 = TestUtils.getMockApplicationAttemptId(1, 0);
    FiCaSchedulerApp app_1 = new FiCaSchedulerApp(appAttemptId_1, user_1, a, mock(ActiveUsersManager.class), spyRMContext);
    a.submitApplicationAttempt(app_1, user_1);
    // Setup some nodes
    String host_0 = "127.0.0.1";
    FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 4 * GB);
    String host_1 = "127.0.0.2";
    FiCaSchedulerNode node_1 = TestUtils.getMockNode(host_1, DEFAULT_RACK, 0, 4 * GB);
    when(csContext.getNode(node_0.getNodeID())).thenReturn(node_0);
    when(csContext.getNode(node_1.getNodeID())).thenReturn(node_1);
    when(csContext.getClusterResource()).thenReturn(Resource.newInstance(8, 1));
    Map<ApplicationAttemptId, FiCaSchedulerApp> apps = ImmutableMap.of(app_0.getApplicationAttemptId(), app_0, app_1.getApplicationAttemptId(), app_1);
    Map<NodeId, FiCaSchedulerNode> nodes = ImmutableMap.of(node_0.getNodeID(), node_0, node_1.getNodeID(), node_1);
    final int numNodes = 3;
    Resource clusterResource = Resources.createResource(numNodes * (4 * GB), numNodes * 16);
    when(csContext.getNumClusterNodes()).thenReturn(numNodes);
    when(csContext.getMaximumResourceCapability()).thenReturn(Resources.createResource(4 * GB, 16));
    when(a.getMaximumAllocation()).thenReturn(Resources.createResource(4 * GB, 16));
    // 1G / 4G 
    when(a.getMinimumAllocationFactor()).thenReturn(0.25f);
    // Setup resource-requests
    Priority priority = TestUtils.createMockPriority(1);
    app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 2, true, priority, recordFactory)));
    app_1.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 4 * GB, 1, true, priority, recordFactory)));
    // Start testing...
    // Only 1 container
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    assertEquals(1 * GB, a.getUsedResources().getMemorySize());
    assertEquals(1 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_1.getCurrentConsumption().getMemorySize());
    // Also 2nd -> minCapacity = 1024 since (.1 * 8G) < minAlloc, also
    // you can get one container more than user-limit
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    assertEquals(2 * GB, a.getUsedResources().getMemorySize());
    assertEquals(2 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_1.getCurrentConsumption().getMemorySize());
    // Now, reservation should kick in for app_1
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    assertEquals(6 * GB, a.getUsedResources().getMemorySize());
    assertEquals(2 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_1.getCurrentConsumption().getMemorySize());
    assertEquals(4 * GB, app_1.getCurrentReservation().getMemorySize());
    assertEquals(2 * GB, node_0.getAllocatedResource().getMemorySize());
    // Now free 1 container from app_0 i.e. 1G, and re-reserve it
    RMContainer rmContainer = app_0.getLiveContainers().iterator().next();
    a.completedContainer(clusterResource, app_0, node_0, rmContainer, ContainerStatus.newInstance(rmContainer.getContainerId(), ContainerState.COMPLETE, "", ContainerExitStatus.KILLED_BY_RESOURCEMANAGER), RMContainerEventType.KILL, null, true);
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    assertEquals(5 * GB, a.getUsedResources().getMemorySize());
    assertEquals(1 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_1.getCurrentConsumption().getMemorySize());
    assertEquals(4 * GB, app_1.getCurrentReservation().getMemorySize());
    assertEquals(1 * GB, node_0.getAllocatedResource().getMemorySize());
    assertEquals(1, app_1.getReReservations(toSchedulerKey(priority)));
    // Re-reserve
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    assertEquals(5 * GB, a.getUsedResources().getMemorySize());
    assertEquals(1 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_1.getCurrentConsumption().getMemorySize());
    assertEquals(4 * GB, app_1.getCurrentReservation().getMemorySize());
    assertEquals(1 * GB, node_0.getAllocatedResource().getMemorySize());
    assertEquals(2, app_1.getReReservations(toSchedulerKey(priority)));
    // Try to schedule on node_1 now, should *move* the reservation
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    assertEquals(9 * GB, a.getUsedResources().getMemorySize());
    assertEquals(1 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(4 * GB, app_1.getCurrentConsumption().getMemorySize());
    assertEquals(4 * GB, app_1.getCurrentReservation().getMemorySize());
    assertEquals(4 * GB, node_1.getAllocatedResource().getMemorySize());
    // Doesn't change yet... only when reservation is cancelled or a different
    // container is reserved
    assertEquals(2, app_1.getReReservations(toSchedulerKey(priority)));
    // Now finish another container from app_0 and see the reservation cancelled
    rmContainer = app_0.getLiveContainers().iterator().next();
    a.completedContainer(clusterResource, app_0, node_0, rmContainer, ContainerStatus.newInstance(rmContainer.getContainerId(), ContainerState.COMPLETE, "", ContainerExitStatus.KILLED_BY_RESOURCEMANAGER), RMContainerEventType.KILL, null, true);
    applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
    assertEquals(4 * GB, a.getUsedResources().getMemorySize());
    assertEquals(0 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(4 * GB, app_1.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_1.getCurrentReservation().getMemorySize());
    assertEquals(0 * GB, node_0.getAllocatedResource().getMemorySize());
}
Also used : FiCaSchedulerNode(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode) Priority(org.apache.hadoop.yarn.api.records.Priority) Resource(org.apache.hadoop.yarn.api.records.Resource) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) RMContainer(org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer) FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp) ResourceLimits(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits) NodeId(org.apache.hadoop.yarn.api.records.NodeId) ActiveUsersManager(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager) Test(org.junit.Test)

Example 45 with ApplicationAttemptId

use of org.apache.hadoop.yarn.api.records.ApplicationAttemptId in project hadoop by apache.

the class TestLeafQueue method testConcurrentAccess.

@Test
public void testConcurrentAccess() throws Exception {
    YarnConfiguration conf = new YarnConfiguration();
    MockRM rm = new MockRM();
    rm.init(conf);
    rm.start();
    final String queue = "default";
    final String user = "user";
    CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
    final LeafQueue defaultQueue = (LeafQueue) cs.getQueue(queue);
    final List<FiCaSchedulerApp> listOfApps = createListOfApps(10000, user, defaultQueue);
    final CyclicBarrier cb = new CyclicBarrier(2);
    final List<ConcurrentModificationException> conException = new ArrayList<ConcurrentModificationException>();
    Thread submitAndRemove = new Thread(new Runnable() {

        @Override
        public void run() {
            for (FiCaSchedulerApp fiCaSchedulerApp : listOfApps) {
                defaultQueue.submitApplicationAttempt(fiCaSchedulerApp, user);
            }
            try {
                cb.await();
            } catch (Exception e) {
            // Ignore
            }
            for (FiCaSchedulerApp fiCaSchedulerApp : listOfApps) {
                defaultQueue.finishApplicationAttempt(fiCaSchedulerApp, queue);
            }
        }
    }, "SubmitAndRemoveApplicationAttempt Thread");
    Thread getAppsInQueue = new Thread(new Runnable() {

        List<ApplicationAttemptId> apps = new ArrayList<ApplicationAttemptId>();

        @Override
        public void run() {
            try {
                try {
                    cb.await();
                } catch (Exception e) {
                // Ignore
                }
                defaultQueue.collectSchedulerApplications(apps);
            } catch (ConcurrentModificationException e) {
                conException.add(e);
            }
        }
    }, "GetAppsInQueue Thread");
    submitAndRemove.start();
    getAppsInQueue.start();
    submitAndRemove.join();
    getAppsInQueue.join();
    assertTrue("ConcurrentModificationException is thrown", conException.isEmpty());
    rm.stop();
}
Also used : ConcurrentModificationException(java.util.ConcurrentModificationException) ArrayList(java.util.ArrayList) MockRM(org.apache.hadoop.yarn.server.resourcemanager.MockRM) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) ConcurrentModificationException(java.util.ConcurrentModificationException) IOException(java.io.IOException) CyclicBarrier(java.util.concurrent.CyclicBarrier) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp) Test(org.junit.Test)

Aggregations

ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)514 Test (org.junit.Test)362 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)222 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)170 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)109 RMApp (org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp)104 Configuration (org.apache.hadoop.conf.Configuration)87 Resource (org.apache.hadoop.yarn.api.records.Resource)82 ArrayList (java.util.ArrayList)75 NodeId (org.apache.hadoop.yarn.api.records.NodeId)74 NodeAddedSchedulerEvent (org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent)65 RMNode (org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode)63 NodeUpdateSchedulerEvent (org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent)60 Path (org.apache.hadoop.fs.Path)55 JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)53 Priority (org.apache.hadoop.yarn.api.records.Priority)52 RMContainer (org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer)51 Container (org.apache.hadoop.yarn.api.records.Container)50 FiCaSchedulerApp (org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp)49 HashMap (java.util.HashMap)42