Search in sources :

Example 6 with ResourceUsage

use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage in project hadoop by apache.

the class TestApplicationLimitsByPartition method testHeadroom.

@Test
public void testHeadroom() throws Exception {
    /*
     * Test Case: Verify Headroom calculated is sum of headrooms for each
     * partition requested. So submit a app with requests for default partition
     * and 'x' partition, so the total headroom for the user should be sum of
     * the head room for both labels.
     */
    simpleNodeLabelMappingToManager();
    CapacitySchedulerConfiguration csConf = (CapacitySchedulerConfiguration) TestUtils.getComplexConfigurationWithQueueLabels(conf);
    final String A1 = CapacitySchedulerConfiguration.ROOT + ".a" + ".a1";
    final String B2 = CapacitySchedulerConfiguration.ROOT + ".b" + ".b2";
    csConf.setUserLimit(A1, 25);
    csConf.setUserLimit(B2, 25);
    YarnConfiguration conf = new YarnConfiguration();
    CapacitySchedulerContext csContext = mock(CapacitySchedulerContext.class);
    when(csContext.getConfiguration()).thenReturn(csConf);
    when(csContext.getConf()).thenReturn(conf);
    when(csContext.getMinimumResourceCapability()).thenReturn(Resources.createResource(GB));
    when(csContext.getMaximumResourceCapability()).thenReturn(Resources.createResource(16 * GB));
    when(csContext.getResourceCalculator()).thenReturn(resourceCalculator);
    RMContext rmContext = TestUtils.getMockRMContext();
    RMContext spyRMContext = spy(rmContext);
    when(spyRMContext.getNodeLabelManager()).thenReturn(mgr);
    when(csContext.getRMContext()).thenReturn(spyRMContext);
    mgr.activateNode(NodeId.newInstance("h0", 0), // default Label
    Resource.newInstance(160 * GB, 16));
    mgr.activateNode(NodeId.newInstance("h1", 0), // label x
    Resource.newInstance(160 * GB, 16));
    mgr.activateNode(NodeId.newInstance("h2", 0), // label y
    Resource.newInstance(160 * GB, 16));
    // Say cluster has 100 nodes of 16G each
    Resource clusterResource = Resources.createResource(160 * GB);
    when(csContext.getClusterResource()).thenReturn(clusterResource);
    Map<String, CSQueue> queues = new HashMap<String, CSQueue>();
    CSQueue rootQueue = CapacitySchedulerQueueManager.parseQueue(csContext, csConf, null, "root", queues, queues, TestUtils.spyHook);
    ResourceUsage queueResUsage = rootQueue.getQueueResourceUsage();
    when(csContext.getClusterResourceUsage()).thenReturn(queueResUsage);
    // Manipulate queue 'a'
    LeafQueue queue = TestLeafQueue.stubLeafQueue((LeafQueue) queues.get("b2"));
    queue.updateClusterResource(clusterResource, new ResourceLimits(clusterResource));
    String rack_0 = "rack_0";
    FiCaSchedulerNode node_0 = TestUtils.getMockNode("h0", rack_0, 0, 160 * GB);
    FiCaSchedulerNode node_1 = TestUtils.getMockNode("h1", rack_0, 0, 160 * GB);
    final String user_0 = "user_0";
    final String user_1 = "user_1";
    RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
    ConcurrentMap<ApplicationId, RMApp> spyApps = spy(new ConcurrentHashMap<ApplicationId, RMApp>());
    RMApp rmApp = mock(RMApp.class);
    ResourceRequest amResourceRequest = mock(ResourceRequest.class);
    Resource amResource = Resources.createResource(0, 0);
    when(amResourceRequest.getCapability()).thenReturn(amResource);
    when(rmApp.getAMResourceRequest()).thenReturn(amResourceRequest);
    Mockito.doReturn(rmApp).when(spyApps).get((ApplicationId) Matchers.any());
    when(spyRMContext.getRMApps()).thenReturn(spyApps);
    RMAppAttempt rmAppAttempt = mock(RMAppAttempt.class);
    when(rmApp.getRMAppAttempt((ApplicationAttemptId) Matchers.any())).thenReturn(rmAppAttempt);
    when(rmApp.getCurrentAppAttempt()).thenReturn(rmAppAttempt);
    Mockito.doReturn(rmApp).when(spyApps).get((ApplicationId) Matchers.any());
    Mockito.doReturn(true).when(spyApps).containsKey((ApplicationId) Matchers.any());
    Priority priority_1 = TestUtils.createMockPriority(1);
    // Submit first application with some resource-requests from user_0,
    // and check headroom
    final ApplicationAttemptId appAttemptId_0_0 = TestUtils.getMockApplicationAttemptId(0, 0);
    FiCaSchedulerApp app_0_0 = new FiCaSchedulerApp(appAttemptId_0_0, user_0, queue, queue.getAbstractUsersManager(), spyRMContext);
    queue.submitApplicationAttempt(app_0_0, user_0);
    List<ResourceRequest> app_0_0_requests = new ArrayList<ResourceRequest>();
    app_0_0_requests.add(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 2, true, priority_1, recordFactory));
    app_0_0.updateResourceRequests(app_0_0_requests);
    // Schedule to compute
    queue.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    //head room = queue capacity = 50 % 90% 160 GB * 0.25 (UL)
    Resource expectedHeadroom = Resources.createResource((int) (0.5 * 0.9 * 160 * 0.25) * GB, 1);
    assertEquals(expectedHeadroom, app_0_0.getHeadroom());
    // Submit second application from user_0, check headroom
    final ApplicationAttemptId appAttemptId_0_1 = TestUtils.getMockApplicationAttemptId(1, 0);
    FiCaSchedulerApp app_0_1 = new FiCaSchedulerApp(appAttemptId_0_1, user_0, queue, queue.getAbstractUsersManager(), spyRMContext);
    queue.submitApplicationAttempt(app_0_1, user_0);
    List<ResourceRequest> app_0_1_requests = new ArrayList<ResourceRequest>();
    app_0_1_requests.add(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 2, true, priority_1, recordFactory));
    app_0_1.updateResourceRequests(app_0_1_requests);
    app_0_1_requests.clear();
    app_0_1_requests.add(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 2, true, priority_1, recordFactory, "y"));
    app_0_1.updateResourceRequests(app_0_1_requests);
    // Schedule to compute
    queue.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), // Schedule to compute
    SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    queue.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), // Schedule to compute
    SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    // no change
    assertEquals(expectedHeadroom, app_0_0.getHeadroom());
    //head room for default label + head room for y partition
    //head room for y partition = 100% 50%(b queue capacity ) *  160 * GB
    Resource expectedHeadroomWithReqInY = Resources.add(Resources.createResource((int) (0.25 * 0.5 * 160) * GB, 1), expectedHeadroom);
    assertEquals(expectedHeadroomWithReqInY, app_0_1.getHeadroom());
    // Submit first application from user_1, check for new headroom
    final ApplicationAttemptId appAttemptId_1_0 = TestUtils.getMockApplicationAttemptId(2, 0);
    FiCaSchedulerApp app_1_0 = new FiCaSchedulerApp(appAttemptId_1_0, user_1, queue, queue.getAbstractUsersManager(), spyRMContext);
    queue.submitApplicationAttempt(app_1_0, user_1);
    List<ResourceRequest> app_1_0_requests = new ArrayList<ResourceRequest>();
    app_1_0_requests.add(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 2, true, priority_1, recordFactory));
    app_1_0.updateResourceRequests(app_1_0_requests);
    app_1_0_requests.clear();
    app_1_0_requests.add(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 2, true, priority_1, recordFactory, "y"));
    app_1_0.updateResourceRequests(app_1_0_requests);
    // Schedule to compute
    queue.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), // Schedule to compute
    SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    //head room = queue capacity = (50 % 90% 160 GB)/2 (for 2 users)
    expectedHeadroom = Resources.createResource((int) (0.5 * 0.9 * 160 * 0.25) * GB, 1);
    //head room for default label + head room for y partition
    //head room for y partition = 100% 50%(b queue capacity ) *  160 * GB
    expectedHeadroomWithReqInY = Resources.add(Resources.createResource((int) (0.25 * 0.5 * 160) * GB, 1), expectedHeadroom);
    assertEquals(expectedHeadroom, app_0_0.getHeadroom());
    assertEquals(expectedHeadroomWithReqInY, app_0_1.getHeadroom());
    assertEquals(expectedHeadroomWithReqInY, app_1_0.getHeadroom());
}
Also used : RMApp(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp) RMAppAttempt(org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt) HashMap(java.util.HashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ArrayList(java.util.ArrayList) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp) RMContext(org.apache.hadoop.yarn.server.resourcemanager.RMContext) FiCaSchedulerNode(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode) ResourceUsage(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage) Priority(org.apache.hadoop.yarn.api.records.Priority) Resource(org.apache.hadoop.yarn.api.records.Resource) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) RecordFactory(org.apache.hadoop.yarn.factories.RecordFactory) ResourceLimits(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits) ResourceRequest(org.apache.hadoop.yarn.api.records.ResourceRequest) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) Test(org.junit.Test)

Example 7 with ResourceUsage

use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage in project hadoop by apache.

the class TestReservations method setup.

private void setup(CapacitySchedulerConfiguration csConf, boolean addUserLimits) throws Exception {
    csConf.setBoolean(CapacitySchedulerConfiguration.ENABLE_USER_METRICS, true);
    final String newRoot = "root" + System.currentTimeMillis();
    // final String newRoot = "root";
    setupQueueConfiguration(csConf, newRoot, addUserLimits);
    YarnConfiguration conf = new YarnConfiguration();
    cs.setConf(conf);
    csContext = mock(CapacitySchedulerContext.class);
    when(csContext.getConfiguration()).thenReturn(csConf);
    when(csContext.getConf()).thenReturn(conf);
    when(csContext.getMinimumResourceCapability()).thenReturn(Resources.createResource(GB, 1));
    when(csContext.getMaximumResourceCapability()).thenReturn(Resources.createResource(16 * GB, 12));
    when(csContext.getClusterResource()).thenReturn(Resources.createResource(100 * 16 * GB, 100 * 12));
    when(csContext.getResourceCalculator()).thenReturn(resourceCalculator);
    when(csContext.getPreemptionManager()).thenReturn(new PreemptionManager());
    when(csContext.getRMContext()).thenReturn(rmContext);
    RMContainerTokenSecretManager containerTokenSecretManager = new RMContainerTokenSecretManager(conf);
    containerTokenSecretManager.rollMasterKey();
    when(csContext.getContainerTokenSecretManager()).thenReturn(containerTokenSecretManager);
    root = CapacitySchedulerQueueManager.parseQueue(csContext, csConf, null, CapacitySchedulerConfiguration.ROOT, queues, queues, TestUtils.spyHook);
    ResourceUsage queueResUsage = root.getQueueResourceUsage();
    when(csContext.getClusterResourceUsage()).thenReturn(queueResUsage);
    spyRMContext = spy(rmContext);
    when(spyRMContext.getScheduler()).thenReturn(cs);
    when(spyRMContext.getYarnConfiguration()).thenReturn(new YarnConfiguration());
    cs.setRMContext(spyRMContext);
    cs.init(csConf);
    cs.start();
    when(cs.getNumClusterNodes()).thenReturn(3);
}
Also used : YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) ResourceUsage(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage) PreemptionManager(org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.PreemptionManager) RMContainerTokenSecretManager(org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager)

Example 8 with ResourceUsage

use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage in project hadoop by apache.

the class UsersManager method updateActiveUsersResourceUsage.

private void updateActiveUsersResourceUsage(String userName) {
    try {
        this.writeLock.lock();
        // For UT case: We might need to add the user to users list.
        User user = getUserAndAddIfAbsent(userName);
        ResourceUsage resourceUsage = user.getResourceUsage();
        // to active list.
        if (nonActiveUsersSet.contains(userName)) {
            nonActiveUsersSet.remove(userName);
            activeUsersSet.add(userName);
            // is moved from non-active to active.
            for (String partition : resourceUsage.getNodePartitionsSet()) {
                totalResUsageForNonActiveUsers.decUsed(partition, resourceUsage.getUsed(partition));
                totalResUsageForActiveUsers.incUsed(partition, resourceUsage.getUsed(partition));
            }
            if (LOG.isDebugEnabled()) {
                LOG.debug("User '" + userName + "' has become active. Hence move user to active list." + "Active users size = " + activeUsersSet.size() + "Non-active users size = " + nonActiveUsersSet.size() + "Total Resource usage for active users=" + totalResUsageForActiveUsers.getAllUsed() + "." + "Total Resource usage for non-active users=" + totalResUsageForNonActiveUsers.getAllUsed());
            }
        }
    } finally {
        this.writeLock.unlock();
    }
}
Also used : ResourceUsage(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage)

Example 9 with ResourceUsage

use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage in project hadoop by apache.

the class UsersManager method updateResourceUsagePerUser.

private void updateResourceUsagePerUser(User user, Resource resource, String nodePartition, boolean isAllocate) {
    ResourceUsage totalResourceUsageForUsers = getTotalResourceUsagePerUser(user.userName);
    if (isAllocate) {
        user.getResourceUsage().incUsed(nodePartition, resource);
        totalResourceUsageForUsers.incUsed(nodePartition, resource);
    } else {
        user.getResourceUsage().decUsed(nodePartition, resource);
        totalResourceUsageForUsers.decUsed(nodePartition, resource);
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug("User resource is updated." + "Total Resource usage for active users=" + totalResUsageForActiveUsers.getAllUsed() + "." + "Total Resource usage for non-active users=" + totalResUsageForNonActiveUsers.getAllUsed());
    }
}
Also used : ResourceUsage(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage)

Example 10 with ResourceUsage

use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage in project hadoop by apache.

the class TestApplicationLimits method testHeadroom.

@Test
public void testHeadroom() throws Exception {
    CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration();
    csConf.setUserLimit(CapacitySchedulerConfiguration.ROOT + "." + A, 25);
    setupQueueConfiguration(csConf);
    YarnConfiguration conf = new YarnConfiguration();
    CapacitySchedulerContext csContext = mock(CapacitySchedulerContext.class);
    when(csContext.getConfiguration()).thenReturn(csConf);
    when(csContext.getConf()).thenReturn(conf);
    when(csContext.getMinimumResourceCapability()).thenReturn(Resources.createResource(GB));
    when(csContext.getMaximumResourceCapability()).thenReturn(Resources.createResource(16 * GB));
    when(csContext.getResourceCalculator()).thenReturn(resourceCalculator);
    when(csContext.getRMContext()).thenReturn(rmContext);
    // Say cluster has 100 nodes of 16G each
    Resource clusterResource = Resources.createResource(100 * 16 * GB);
    when(csContext.getClusterResource()).thenReturn(clusterResource);
    Map<String, CSQueue> queues = new HashMap<String, CSQueue>();
    CSQueue rootQueue = CapacitySchedulerQueueManager.parseQueue(csContext, csConf, null, "root", queues, queues, TestUtils.spyHook);
    ResourceUsage queueCapacities = rootQueue.getQueueResourceUsage();
    when(csContext.getClusterResourceUsage()).thenReturn(queueCapacities);
    // Manipulate queue 'a'
    LeafQueue queue = TestLeafQueue.stubLeafQueue((LeafQueue) queues.get(A));
    queue.updateClusterResource(clusterResource, new ResourceLimits(clusterResource));
    String host_0 = "host_0";
    String rack_0 = "rack_0";
    FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, rack_0, 0, 16 * GB);
    final String user_0 = "user_0";
    final String user_1 = "user_1";
    RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
    RMContext rmContext = TestUtils.getMockRMContext();
    RMContext spyRMContext = spy(rmContext);
    ConcurrentMap<ApplicationId, RMApp> spyApps = spy(new ConcurrentHashMap<ApplicationId, RMApp>());
    RMApp rmApp = mock(RMApp.class);
    ResourceRequest amResourceRequest = mock(ResourceRequest.class);
    Resource amResource = Resources.createResource(0, 0);
    when(amResourceRequest.getCapability()).thenReturn(amResource);
    when(rmApp.getAMResourceRequest()).thenReturn(amResourceRequest);
    Mockito.doReturn(rmApp).when(spyApps).get((ApplicationId) Matchers.any());
    when(spyRMContext.getRMApps()).thenReturn(spyApps);
    RMAppAttempt rmAppAttempt = mock(RMAppAttempt.class);
    when(rmApp.getRMAppAttempt((ApplicationAttemptId) Matchers.any())).thenReturn(rmAppAttempt);
    when(rmApp.getCurrentAppAttempt()).thenReturn(rmAppAttempt);
    Mockito.doReturn(rmApp).when(spyApps).get((ApplicationId) Matchers.any());
    Mockito.doReturn(true).when(spyApps).containsKey((ApplicationId) Matchers.any());
    Priority priority_1 = TestUtils.createMockPriority(1);
    // Submit first application with some resource-requests from user_0, 
    // and check headroom
    final ApplicationAttemptId appAttemptId_0_0 = TestUtils.getMockApplicationAttemptId(0, 0);
    FiCaSchedulerApp app_0_0 = new FiCaSchedulerApp(appAttemptId_0_0, user_0, queue, queue.getAbstractUsersManager(), spyRMContext);
    queue.submitApplicationAttempt(app_0_0, user_0);
    List<ResourceRequest> app_0_0_requests = new ArrayList<ResourceRequest>();
    app_0_0_requests.add(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 2, true, priority_1, recordFactory));
    app_0_0.updateResourceRequests(app_0_0_requests);
    // Schedule to compute 
    queue.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    Resource expectedHeadroom = Resources.createResource(5 * 16 * GB, 1);
    assertEquals(expectedHeadroom, app_0_0.getHeadroom());
    // Submit second application from user_0, check headroom
    final ApplicationAttemptId appAttemptId_0_1 = TestUtils.getMockApplicationAttemptId(1, 0);
    FiCaSchedulerApp app_0_1 = new FiCaSchedulerApp(appAttemptId_0_1, user_0, queue, queue.getAbstractUsersManager(), spyRMContext);
    queue.submitApplicationAttempt(app_0_1, user_0);
    List<ResourceRequest> app_0_1_requests = new ArrayList<ResourceRequest>();
    app_0_1_requests.add(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 2, true, priority_1, recordFactory));
    app_0_1.updateResourceRequests(app_0_1_requests);
    // Schedule to compute 
    queue.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), // Schedule to compute
    SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    assertEquals(expectedHeadroom, app_0_0.getHeadroom());
    // no change
    assertEquals(expectedHeadroom, app_0_1.getHeadroom());
    // Submit first application from user_1, check  for new headroom
    final ApplicationAttemptId appAttemptId_1_0 = TestUtils.getMockApplicationAttemptId(2, 0);
    FiCaSchedulerApp app_1_0 = new FiCaSchedulerApp(appAttemptId_1_0, user_1, queue, queue.getAbstractUsersManager(), spyRMContext);
    queue.submitApplicationAttempt(app_1_0, user_1);
    List<ResourceRequest> app_1_0_requests = new ArrayList<ResourceRequest>();
    app_1_0_requests.add(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 2, true, priority_1, recordFactory));
    app_1_0.updateResourceRequests(app_1_0_requests);
    // Schedule to compute 
    queue.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), // Schedule to compute
    SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    // changes
    expectedHeadroom = Resources.createResource(10 * 16 * GB / 2, 1);
    assertEquals(expectedHeadroom, app_0_0.getHeadroom());
    assertEquals(expectedHeadroom, app_0_1.getHeadroom());
    assertEquals(expectedHeadroom, app_1_0.getHeadroom());
    // Now reduce cluster size and check for the smaller headroom
    clusterResource = Resources.createResource(90 * 16 * GB);
    // Any change is cluster resource needs to enforce user-limit recomputation.
    // In existing code, LeafQueue#updateClusterResource handled this. However
    // here that method was not used.
    queue.getUsersManager().userLimitNeedsRecompute();
    queue.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), // Schedule to compute
    SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    // changes
    expectedHeadroom = Resources.createResource(9 * 16 * GB / 2, 1);
    assertEquals(expectedHeadroom, app_0_0.getHeadroom());
    assertEquals(expectedHeadroom, app_0_1.getHeadroom());
    assertEquals(expectedHeadroom, app_1_0.getHeadroom());
}
Also used : RMApp(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp) RMAppAttempt(org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp) RMContext(org.apache.hadoop.yarn.server.resourcemanager.RMContext) FiCaSchedulerNode(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode) ResourceUsage(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage) Priority(org.apache.hadoop.yarn.api.records.Priority) Resource(org.apache.hadoop.yarn.api.records.Resource) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) RecordFactory(org.apache.hadoop.yarn.factories.RecordFactory) ResourceLimits(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits) ResourceRequest(org.apache.hadoop.yarn.api.records.ResourceRequest) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) Test(org.junit.Test)

Aggregations

ResourceUsage (org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage)15 Resource (org.apache.hadoop.yarn.api.records.Resource)6 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)4 Matchers.anyString (org.mockito.Matchers.anyString)4 ArrayList (java.util.ArrayList)3 HashMap (java.util.HashMap)3 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)3 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)3 Priority (org.apache.hadoop.yarn.api.records.Priority)3 ResourceRequest (org.apache.hadoop.yarn.api.records.ResourceRequest)3 RMApp (org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp)3 LeafQueue (org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue)3 FiCaSchedulerApp (org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp)3 TreeSet (java.util.TreeSet)2 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)2 RecordFactory (org.apache.hadoop.yarn.factories.RecordFactory)2 RMContext (org.apache.hadoop.yarn.server.resourcemanager.RMContext)2 RMAppAttempt (org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt)2 ResourceLimits (org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits)2 ParentQueue (org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.ParentQueue)2