Search in sources :

Example 6 with RMContext

use of org.apache.hadoop.yarn.server.resourcemanager.RMContext in project hadoop by apache.

the class TestRMNodeLabelsManager method testReplaceLabelsFromNode.

@Test
public void testReplaceLabelsFromNode() throws Exception {
    RMContext rmContext = mock(RMContext.class);
    Dispatcher syncDispatcher = new InlineDispatcher();
    SchedulerEventHandler schedEventsHandler = new SchedulerEventHandler();
    syncDispatcher.register(SchedulerEventType.class, schedEventsHandler);
    when(rmContext.getDispatcher()).thenReturn(syncDispatcher);
    mgr.setRMContext(rmContext);
    mgr.addToCluserNodeLabelsWithDefaultExclusivity(toSet("p1", "p2", "p3"));
    mgr.activateNode(NodeId.newInstance("n1", 1), SMALL_RESOURCE);
    mgr.activateNode(NodeId.newInstance("n2", 1), SMALL_RESOURCE);
    mgr.activateNode(NodeId.newInstance("n3", 1), SMALL_RESOURCE);
    mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n1:1"), toSet("p1"), toNodeId("n2:1"), toSet("p2"), toNodeId("n3"), toSet("p3")));
    assertTrue("Event should be sent when there is change in labels", schedEventsHandler.receivedEvent);
    assertEquals("3 node label mapping modified", 3, schedEventsHandler.updatedNodeToLabels.size());
    ImmutableMap<NodeId, Set<String>> modifiedMap = ImmutableMap.of(toNodeId("n1:1"), toSet("p1"), toNodeId("n2:1"), toSet("p2"), toNodeId("n3:1"), toSet("p3"));
    assertEquals("Node label mapping is not matching", modifiedMap, schedEventsHandler.updatedNodeToLabels);
    schedEventsHandler.receivedEvent = false;
    mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n1:1"), toSet("p1")));
    assertFalse("No event should be sent when there is no change in labels", schedEventsHandler.receivedEvent);
    schedEventsHandler.receivedEvent = false;
    mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n2:1"), toSet("p1"), toNodeId("n3"), toSet("p3")));
    assertTrue("Event should be sent when there is change in labels", schedEventsHandler.receivedEvent);
    assertEquals("Single node label mapping modified", 1, schedEventsHandler.updatedNodeToLabels.size());
    assertCollectionEquals(toSet("p1"), schedEventsHandler.updatedNodeToLabels.get(toNodeId("n2:1")));
    schedEventsHandler.receivedEvent = false;
    mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n3"), toSet("p2")));
    assertTrue("Event should be sent when there is change in labels @ HOST", schedEventsHandler.receivedEvent);
    assertEquals("Single node label mapping modified", 1, schedEventsHandler.updatedNodeToLabels.size());
    assertCollectionEquals(toSet("p2"), schedEventsHandler.updatedNodeToLabels.get(toNodeId("n3:1")));
    schedEventsHandler.receivedEvent = false;
    mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n1"), toSet("p2")));
    assertTrue("Event should be sent when labels are modified at host though labels were set @ NM level", schedEventsHandler.receivedEvent);
    assertEquals("Single node label mapping modified", 1, schedEventsHandler.updatedNodeToLabels.size());
    assertCollectionEquals(toSet("p2"), schedEventsHandler.updatedNodeToLabels.get(toNodeId("n1:1")));
    schedEventsHandler.receivedEvent = false;
}
Also used : RMContext(org.apache.hadoop.yarn.server.resourcemanager.RMContext) ImmutableSet(com.google.common.collect.ImmutableSet) Set(java.util.Set) NodeId(org.apache.hadoop.yarn.api.records.NodeId) InlineDispatcher(org.apache.hadoop.yarn.event.InlineDispatcher) Dispatcher(org.apache.hadoop.yarn.event.Dispatcher) InlineDispatcher(org.apache.hadoop.yarn.event.InlineDispatcher) Test(org.junit.Test)

Example 7 with RMContext

use of org.apache.hadoop.yarn.server.resourcemanager.RMContext in project hadoop by apache.

the class ReservationSystemTestUtil method createMockRMContext.

public static RMContext createMockRMContext() {
    RMContext context = mock(RMContext.class);
    when(context.getStateStore()).thenReturn(new MemoryRMStateStore());
    return context;
}
Also used : RMContext(org.apache.hadoop.yarn.server.resourcemanager.RMContext) MemoryRMStateStore(org.apache.hadoop.yarn.server.resourcemanager.recovery.MemoryRMStateStore)

Example 8 with RMContext

use of org.apache.hadoop.yarn.server.resourcemanager.RMContext in project hadoop by apache.

the class TestChildQueueOrder method testSortedQueues.

@Test
@SuppressWarnings("unchecked")
public void testSortedQueues() throws Exception {
    // Setup queue configs
    setupSortedQueues(csConf);
    Map<String, CSQueue> queues = new HashMap<String, CSQueue>();
    CSQueue root = CapacitySchedulerQueueManager.parseQueue(csContext, csConf, null, CapacitySchedulerConfiguration.ROOT, queues, queues, TestUtils.spyHook);
    // Setup some nodes
    final int memoryPerNode = 10;
    final int coresPerNode = 16;
    final int numNodes = 1;
    FiCaSchedulerNode node_0 = TestUtils.getMockNode("host_0", DEFAULT_RACK, 0, memoryPerNode * GB);
    doNothing().when(node_0).releaseContainer(any(ContainerId.class), anyBoolean());
    final Resource clusterResource = Resources.createResource(numNodes * (memoryPerNode * GB), numNodes * coresPerNode);
    when(csContext.getNumClusterNodes()).thenReturn(numNodes);
    // Start testing
    CSQueue a = queues.get(A);
    CSQueue b = queues.get(B);
    CSQueue c = queues.get(C);
    CSQueue d = queues.get(D);
    // Make a/b/c/d has >0 pending resource, so that allocation will continue.
    queues.get(CapacitySchedulerConfiguration.ROOT).getQueueResourceUsage().incPending(Resources.createResource(1 * GB));
    a.getQueueResourceUsage().incPending(Resources.createResource(1 * GB));
    b.getQueueResourceUsage().incPending(Resources.createResource(1 * GB));
    c.getQueueResourceUsage().incPending(Resources.createResource(1 * GB));
    d.getQueueResourceUsage().incPending(Resources.createResource(1 * GB));
    final String user_0 = "user_0";
    // Stub an App and its containerCompleted
    FiCaSchedulerApp app_0 = getMockApplication(0, user_0);
    doReturn(true).when(app_0).containerCompleted(any(RMContainer.class), any(ContainerStatus.class), any(RMContainerEventType.class), any(String.class));
    Priority priority = TestUtils.createMockPriority(1);
    ContainerAllocationExpirer expirer = mock(ContainerAllocationExpirer.class);
    DrainDispatcher drainDispatcher = new DrainDispatcher();
    RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class);
    SystemMetricsPublisher publisher = mock(SystemMetricsPublisher.class);
    RMContext rmContext = mock(RMContext.class);
    when(rmContext.getContainerAllocationExpirer()).thenReturn(expirer);
    when(rmContext.getDispatcher()).thenReturn(drainDispatcher);
    when(rmContext.getRMApplicationHistoryWriter()).thenReturn(writer);
    when(rmContext.getSystemMetricsPublisher()).thenReturn(publisher);
    when(rmContext.getYarnConfiguration()).thenReturn(new YarnConfiguration());
    ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(app_0.getApplicationId(), 1);
    ContainerId containerId = BuilderUtils.newContainerId(appAttemptId, 1);
    Container container = TestUtils.getMockContainer(containerId, node_0.getNodeID(), Resources.createResource(1 * GB), priority);
    RMContainer rmContainer = new RMContainerImpl(container, SchedulerRequestKey.extractFrom(container), appAttemptId, node_0.getNodeID(), "user", rmContext);
    // Assign {1,2,3,4} 1GB containers respectively to queues
    stubQueueAllocation(a, clusterResource, node_0, 1 * GB);
    stubQueueAllocation(b, clusterResource, node_0, 0 * GB);
    stubQueueAllocation(c, clusterResource, node_0, 0 * GB);
    stubQueueAllocation(d, clusterResource, node_0, 0 * GB);
    root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    for (int i = 0; i < 2; i++) {
        stubQueueAllocation(a, clusterResource, node_0, 0 * GB);
        stubQueueAllocation(b, clusterResource, node_0, 1 * GB);
        stubQueueAllocation(c, clusterResource, node_0, 0 * GB);
        stubQueueAllocation(d, clusterResource, node_0, 0 * GB);
        root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    }
    for (int i = 0; i < 3; i++) {
        stubQueueAllocation(a, clusterResource, node_0, 0 * GB);
        stubQueueAllocation(b, clusterResource, node_0, 0 * GB);
        stubQueueAllocation(c, clusterResource, node_0, 1 * GB);
        stubQueueAllocation(d, clusterResource, node_0, 0 * GB);
        root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    }
    for (int i = 0; i < 4; i++) {
        stubQueueAllocation(a, clusterResource, node_0, 0 * GB);
        stubQueueAllocation(b, clusterResource, node_0, 0 * GB);
        stubQueueAllocation(c, clusterResource, node_0, 0 * GB);
        stubQueueAllocation(d, clusterResource, node_0, 1 * GB);
        root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    }
    verifyQueueMetrics(a, 1 * GB, clusterResource);
    verifyQueueMetrics(b, 2 * GB, clusterResource);
    verifyQueueMetrics(c, 3 * GB, clusterResource);
    verifyQueueMetrics(d, 4 * GB, clusterResource);
    LOG.info("status child-queues: " + ((ParentQueue) root).getChildQueuesToPrint());
    //Release 3 x 1GB containers from D
    for (int i = 0; i < 3; i++) {
        d.completedContainer(clusterResource, app_0, node_0, rmContainer, null, RMContainerEventType.KILL, null, true);
    }
    verifyQueueMetrics(a, 1 * GB, clusterResource);
    verifyQueueMetrics(b, 2 * GB, clusterResource);
    verifyQueueMetrics(c, 3 * GB, clusterResource);
    verifyQueueMetrics(d, 1 * GB, clusterResource);
    //reset manually resources on node
    node_0 = TestUtils.getMockNode("host_0", DEFAULT_RACK, 0, (memoryPerNode - 1 - 2 - 3 - 1) * GB);
    LOG.info("status child-queues: " + ((ParentQueue) root).getChildQueuesToPrint());
    // Assign 2 x 1GB Containers to A 
    for (int i = 0; i < 2; i++) {
        stubQueueAllocation(a, clusterResource, node_0, 1 * GB);
        stubQueueAllocation(b, clusterResource, node_0, 0 * GB);
        stubQueueAllocation(c, clusterResource, node_0, 0 * GB);
        stubQueueAllocation(d, clusterResource, node_0, 0 * GB);
        root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    }
    verifyQueueMetrics(a, 3 * GB, clusterResource);
    verifyQueueMetrics(b, 2 * GB, clusterResource);
    verifyQueueMetrics(c, 3 * GB, clusterResource);
    verifyQueueMetrics(d, 1 * GB, clusterResource);
    LOG.info("status child-queues: " + ((ParentQueue) root).getChildQueuesToPrint());
    //Release 1GB Container from A
    a.completedContainer(clusterResource, app_0, node_0, rmContainer, null, RMContainerEventType.KILL, null, true);
    verifyQueueMetrics(a, 2 * GB, clusterResource);
    verifyQueueMetrics(b, 2 * GB, clusterResource);
    verifyQueueMetrics(c, 3 * GB, clusterResource);
    verifyQueueMetrics(d, 1 * GB, clusterResource);
    //reset manually resources on node
    node_0 = TestUtils.getMockNode("host_0", DEFAULT_RACK, 0, (memoryPerNode - 2 - 2 - 3 - 1) * GB);
    LOG.info("status child-queues: " + ((ParentQueue) root).getChildQueuesToPrint());
    // Assign 1GB container to B 
    stubQueueAllocation(a, clusterResource, node_0, 0 * GB);
    stubQueueAllocation(b, clusterResource, node_0, 1 * GB);
    stubQueueAllocation(c, clusterResource, node_0, 0 * GB);
    stubQueueAllocation(d, clusterResource, node_0, 0 * GB);
    root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    verifyQueueMetrics(a, 2 * GB, clusterResource);
    verifyQueueMetrics(b, 3 * GB, clusterResource);
    verifyQueueMetrics(c, 3 * GB, clusterResource);
    verifyQueueMetrics(d, 1 * GB, clusterResource);
    LOG.info("status child-queues: " + ((ParentQueue) root).getChildQueuesToPrint());
    //Release 1GB container resources from B
    b.completedContainer(clusterResource, app_0, node_0, rmContainer, null, RMContainerEventType.KILL, null, true);
    verifyQueueMetrics(a, 2 * GB, clusterResource);
    verifyQueueMetrics(b, 2 * GB, clusterResource);
    verifyQueueMetrics(c, 3 * GB, clusterResource);
    verifyQueueMetrics(d, 1 * GB, clusterResource);
    //reset manually resources on node
    node_0 = TestUtils.getMockNode("host_0", DEFAULT_RACK, 0, (memoryPerNode - 2 - 2 - 3 - 1) * GB);
    LOG.info("status child-queues: " + ((ParentQueue) root).getChildQueuesToPrint());
    // Assign 1GB container to A
    stubQueueAllocation(a, clusterResource, node_0, 1 * GB);
    stubQueueAllocation(b, clusterResource, node_0, 0 * GB);
    stubQueueAllocation(c, clusterResource, node_0, 0 * GB);
    stubQueueAllocation(d, clusterResource, node_0, 0 * GB);
    root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    verifyQueueMetrics(a, 3 * GB, clusterResource);
    verifyQueueMetrics(b, 2 * GB, clusterResource);
    verifyQueueMetrics(c, 3 * GB, clusterResource);
    verifyQueueMetrics(d, 1 * GB, clusterResource);
    LOG.info("status child-queues: " + ((ParentQueue) root).getChildQueuesToPrint());
    // Now do the real test, where B and D request a 1GB container
    // D should should get the next container if the order is correct
    stubQueueAllocation(a, clusterResource, node_0, 0 * GB);
    stubQueueAllocation(b, clusterResource, node_0, 1 * GB);
    stubQueueAllocation(c, clusterResource, node_0, 0 * GB);
    stubQueueAllocation(d, clusterResource, node_0, 1 * GB);
    root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    InOrder allocationOrder = inOrder(d, b);
    allocationOrder.verify(d).assignContainers(eq(clusterResource), any(PlacementSet.class), any(ResourceLimits.class), any(SchedulingMode.class));
    allocationOrder.verify(b).assignContainers(eq(clusterResource), any(PlacementSet.class), any(ResourceLimits.class), any(SchedulingMode.class));
    verifyQueueMetrics(a, 3 * GB, clusterResource);
    verifyQueueMetrics(b, 2 * GB, clusterResource);
    verifyQueueMetrics(c, 3 * GB, clusterResource);
    //D got the container
    verifyQueueMetrics(d, 2 * GB, clusterResource);
    LOG.info("status child-queues: " + ((ParentQueue) root).getChildQueuesToPrint());
}
Also used : DrainDispatcher(org.apache.hadoop.yarn.event.DrainDispatcher) HashMap(java.util.HashMap) ContainerAllocationExpirer(org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer) RMContainer(org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer) ContainerStatus(org.apache.hadoop.yarn.api.records.ContainerStatus) RMContainer(org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer) Container(org.apache.hadoop.yarn.api.records.Container) RMContainerImpl(org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) SystemMetricsPublisher(org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp) PlacementSet(org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.PlacementSet) RMContext(org.apache.hadoop.yarn.server.resourcemanager.RMContext) InOrder(org.mockito.InOrder) FiCaSchedulerNode(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode) Priority(org.apache.hadoop.yarn.api.records.Priority) Resource(org.apache.hadoop.yarn.api.records.Resource) RMApplicationHistoryWriter(org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) RMContainerEventType(org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType) ResourceLimits(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits) Test(org.junit.Test)

Example 9 with RMContext

use of org.apache.hadoop.yarn.server.resourcemanager.RMContext in project hadoop by apache.

the class TestLeafQueue method testApplicationQueuePercent.

@Test
public void testApplicationQueuePercent() throws Exception {
    Resource res = Resource.newInstance(10 * 1024, 10);
    CapacityScheduler scheduler = mock(CapacityScheduler.class);
    when(scheduler.getClusterResource()).thenReturn(res);
    when(scheduler.getResourceCalculator()).thenReturn(new DefaultResourceCalculator());
    ApplicationAttemptId appAttId = createAppAttemptId(0, 0);
    RMContext rmContext = mock(RMContext.class);
    when(rmContext.getEpoch()).thenReturn(3L);
    when(rmContext.getScheduler()).thenReturn(scheduler);
    when(rmContext.getRMApps()).thenReturn(new ConcurrentHashMap<ApplicationId, RMApp>());
    RMNodeLabelsManager nlm = mock(RMNodeLabelsManager.class);
    when(nlm.getResourceByLabel(any(), any())).thenReturn(res);
    when(rmContext.getNodeLabelManager()).thenReturn(nlm);
    // Queue "test" consumes 100% of the cluster, so its capacity and absolute
    // capacity are both 1.0f.
    Queue queue = createQueue("test", null, 1.0f, 1.0f);
    final String user = "user1";
    FiCaSchedulerApp app = new FiCaSchedulerApp(appAttId, user, queue, queue.getAbstractUsersManager(), rmContext);
    // Resource request
    Resource requestedResource = Resource.newInstance(1536, 2);
    app.getAppAttemptResourceUsage().incUsed(requestedResource);
    // In "test" queue, 1536 used is 15% of both the queue and the cluster
    assertEquals(15.0f, app.getResourceUsageReport().getQueueUsagePercentage(), 0.01f);
    assertEquals(15.0f, app.getResourceUsageReport().getClusterUsagePercentage(), 0.01f);
    // Queue "test2" is a child of root and its capacity is 50% of root. As a
    // child of root, its absolute capaicty is also 50%.
    queue = createQueue("test2", null, 0.5f, 0.5f);
    app = new FiCaSchedulerApp(appAttId, user, queue, queue.getAbstractUsersManager(), rmContext);
    app.getAppAttemptResourceUsage().incUsed(requestedResource);
    // In "test2" queue, 1536 used is 30% of "test2" and 15% of the cluster.
    assertEquals(30.0f, app.getResourceUsageReport().getQueueUsagePercentage(), 0.01f);
    assertEquals(15.0f, app.getResourceUsageReport().getClusterUsagePercentage(), 0.01f);
    // Queue "test2.1" is 50% of queue "test2", which is 50% of the cluster.
    // Therefore, "test2.1" capacity is 50% and absolute capacity is 25%.
    AbstractCSQueue qChild = createQueue("test2.1", queue, 0.5f, 0.25f);
    app = new FiCaSchedulerApp(appAttId, user, qChild, qChild.getAbstractUsersManager(), rmContext);
    app.getAppAttemptResourceUsage().incUsed(requestedResource);
    // In "test2.1" queue, 1536 used is 60% of "test2.1" and 15% of the cluster.
    assertEquals(60.0f, app.getResourceUsageReport().getQueueUsagePercentage(), 0.01f);
    assertEquals(15.0f, app.getResourceUsageReport().getClusterUsagePercentage(), 0.01f);
}
Also used : RMContext(org.apache.hadoop.yarn.server.resourcemanager.RMContext) RMApp(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp) Resource(org.apache.hadoop.yarn.api.records.Resource) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) DefaultResourceCalculator(org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator) FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) Queue(org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue) RMNodeLabelsManager(org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager) Test(org.junit.Test)

Example 10 with RMContext

use of org.apache.hadoop.yarn.server.resourcemanager.RMContext in project hadoop by apache.

the class TestQueueMappings method setup.

@Before
public void setup() {
    CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration();
    setupQueueConfiguration(csConf);
    conf = new YarnConfiguration(csConf);
    cs = new CapacityScheduler();
    RMContext rmContext = TestUtils.getMockRMContext();
    cs.setConf(conf);
    cs.setRMContext(rmContext);
    cs.init(conf);
    cs.start();
}
Also used : RMContext(org.apache.hadoop.yarn.server.resourcemanager.RMContext) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) Before(org.junit.Before)

Aggregations

RMContext (org.apache.hadoop.yarn.server.resourcemanager.RMContext)78 Test (org.junit.Test)45 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)33 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)24 Resource (org.apache.hadoop.yarn.api.records.Resource)24 Configuration (org.apache.hadoop.conf.Configuration)20 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)20 Priority (org.apache.hadoop.yarn.api.records.Priority)14 Before (org.junit.Before)14 IOException (java.io.IOException)12 ResourceManager (org.apache.hadoop.yarn.server.resourcemanager.ResourceManager)11 Dispatcher (org.apache.hadoop.yarn.event.Dispatcher)10 RMApp (org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp)10 RMNode (org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode)10 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)9 RMApplicationHistoryWriter (org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter)9 SystemMetricsPublisher (org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher)9 CapacitySchedulerConfiguration (org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration)8 FiCaSchedulerApp (org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp)8 NodeAddedSchedulerEvent (org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent)8