Search in sources :

Example 16 with QueueMetrics

use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics in project hadoop by apache.

the class TestFSAppAttempt method testHeadroom.

@Test
public void testHeadroom() {
    final FairScheduler mockScheduler = Mockito.mock(FairScheduler.class);
    Mockito.when(mockScheduler.getClock()).thenReturn(scheduler.getClock());
    final FSLeafQueue mockQueue = Mockito.mock(FSLeafQueue.class);
    final Resource queueMaxResources = Resource.newInstance(5 * 1024, 3);
    final Resource queueFairShare = Resources.createResource(4096, 2);
    final Resource queueUsage = Resource.newInstance(2048, 2);
    final Resource queueStarvation = Resources.subtract(queueFairShare, queueUsage);
    final Resource queueMaxResourcesAvailable = Resources.subtract(queueMaxResources, queueUsage);
    final Resource clusterResource = Resources.createResource(8192, 8);
    final Resource clusterUsage = Resources.createResource(2048, 2);
    final Resource clusterAvailable = Resources.subtract(clusterResource, clusterUsage);
    final QueueMetrics fakeRootQueueMetrics = Mockito.mock(QueueMetrics.class);
    Mockito.when(mockQueue.getMaxShare()).thenReturn(queueMaxResources);
    Mockito.when(mockQueue.getFairShare()).thenReturn(queueFairShare);
    Mockito.when(mockQueue.getResourceUsage()).thenReturn(queueUsage);
    Mockito.when(mockScheduler.getClusterResource()).thenReturn(clusterResource);
    Mockito.when(fakeRootQueueMetrics.getAllocatedResources()).thenReturn(clusterUsage);
    Mockito.when(mockScheduler.getRootQueueMetrics()).thenReturn(fakeRootQueueMetrics);
    ApplicationAttemptId applicationAttemptId = createAppAttemptId(1, 1);
    RMContext rmContext = resourceManager.getRMContext();
    FSAppAttempt schedulerApp = new FSAppAttempt(mockScheduler, applicationAttemptId, "user1", mockQueue, null, rmContext);
    // Min of Memory and CPU across cluster and queue is used in
    // DominantResourceFairnessPolicy
    Mockito.when(mockQueue.getPolicy()).thenReturn(SchedulingPolicy.getInstance(DominantResourceFairnessPolicy.class));
    verifyHeadroom(schedulerApp, min(queueStarvation.getMemorySize(), clusterAvailable.getMemorySize(), queueMaxResourcesAvailable.getMemorySize()), min(queueStarvation.getVirtualCores(), clusterAvailable.getVirtualCores(), queueMaxResourcesAvailable.getVirtualCores()));
    // Fair and Fifo ignore CPU of queue, so use cluster available CPU
    Mockito.when(mockQueue.getPolicy()).thenReturn(SchedulingPolicy.getInstance(FairSharePolicy.class));
    verifyHeadroom(schedulerApp, min(queueStarvation.getMemorySize(), clusterAvailable.getMemorySize(), queueMaxResourcesAvailable.getMemorySize()), Math.min(clusterAvailable.getVirtualCores(), queueMaxResourcesAvailable.getVirtualCores()));
    Mockito.when(mockQueue.getPolicy()).thenReturn(SchedulingPolicy.getInstance(FifoPolicy.class));
    verifyHeadroom(schedulerApp, min(queueStarvation.getMemorySize(), clusterAvailable.getMemorySize(), queueMaxResourcesAvailable.getMemorySize()), Math.min(clusterAvailable.getVirtualCores(), queueMaxResourcesAvailable.getVirtualCores()));
}
Also used : QueueMetrics(org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics) RMContext(org.apache.hadoop.yarn.server.resourcemanager.RMContext) FairSharePolicy(org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.FairSharePolicy) Resource(org.apache.hadoop.yarn.api.records.Resource) FifoPolicy(org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.FifoPolicy) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) DominantResourceFairnessPolicy(org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.DominantResourceFairnessPolicy) Test(org.junit.Test)

Example 17 with QueueMetrics

use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics in project hadoop by apache.

the class TestFifoScheduler method testAppAttemptMetrics.

@Test(timeout = 5000)
public void testAppAttemptMetrics() throws Exception {
    AsyncDispatcher dispatcher = new InlineDispatcher();
    FifoScheduler scheduler = new FifoScheduler();
    RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class);
    RMContext rmContext = new RMContextImpl(dispatcher, null, null, null, null, null, null, null, null, scheduler);
    ((RMContextImpl) rmContext).setSystemMetricsPublisher(mock(SystemMetricsPublisher.class));
    Configuration conf = new Configuration();
    ((RMContextImpl) rmContext).setScheduler(scheduler);
    scheduler.setRMContext(rmContext);
    scheduler.init(conf);
    scheduler.start();
    scheduler.reinitialize(conf, rmContext);
    QueueMetrics metrics = scheduler.getRootQueueMetrics();
    int beforeAppsSubmitted = metrics.getAppsSubmitted();
    ApplicationId appId = BuilderUtils.newApplicationId(200, 1);
    ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(appId, 1);
    SchedulerEvent appEvent = new AppAddedSchedulerEvent(appId, "queue", "user");
    scheduler.handle(appEvent);
    SchedulerEvent attemptEvent = new AppAttemptAddedSchedulerEvent(appAttemptId, false);
    scheduler.handle(attemptEvent);
    appAttemptId = BuilderUtils.newApplicationAttemptId(appId, 2);
    SchedulerEvent attemptEvent2 = new AppAttemptAddedSchedulerEvent(appAttemptId, false);
    scheduler.handle(attemptEvent2);
    int afterAppsSubmitted = metrics.getAppsSubmitted();
    Assert.assertEquals(1, afterAppsSubmitted - beforeAppsSubmitted);
    scheduler.stop();
}
Also used : RMContext(org.apache.hadoop.yarn.server.resourcemanager.RMContext) CapacitySchedulerConfiguration(org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) AppAddedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent) InlineDispatcher(org.apache.hadoop.yarn.event.InlineDispatcher) RMApplicationHistoryWriter(org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) NodeAddedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent) AppAddedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent) NodeResourceUpdateSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeResourceUpdateSchedulerEvent) SchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent) AppAttemptAddedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptAddedSchedulerEvent) NodeUpdateSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent) NodeRemovedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent) QueueMetrics(org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics) AsyncDispatcher(org.apache.hadoop.yarn.event.AsyncDispatcher) SystemMetricsPublisher(org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher) AppAttemptAddedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptAddedSchedulerEvent) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) RMContextImpl(org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl) Test(org.junit.Test)

Example 18 with QueueMetrics

use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics in project hadoop by apache.

the class TestSimpleCapacityReplanner method testReplanningPlanCapacityLoss.

@Test
public void testReplanningPlanCapacityLoss() throws PlanningException {
    Resource clusterCapacity = Resource.newInstance(100 * 1024, 100);
    Resource minAlloc = Resource.newInstance(1024, 1);
    Resource maxAlloc = Resource.newInstance(1024 * 8, 8);
    ResourceCalculator res = new DefaultResourceCalculator();
    long step = 1L;
    Clock clock = mock(Clock.class);
    ReservationAgent agent = mock(ReservationAgent.class);
    SharingPolicy policy = new NoOverCommitPolicy();
    policy.init("root.dedicated", null);
    QueueMetrics queueMetrics = mock(QueueMetrics.class);
    when(clock.getTime()).thenReturn(0L);
    SimpleCapacityReplanner enf = new SimpleCapacityReplanner(clock);
    RMContext context = ReservationSystemTestUtil.createMockRMContext();
    ReservationSchedulerConfiguration conf = mock(ReservationSchedulerConfiguration.class);
    when(conf.getEnforcementWindow(any(String.class))).thenReturn(6L);
    enf.init("blah", conf);
    // Initialize the plan with more resources
    InMemoryPlan plan = new InMemoryPlan(queueMetrics, policy, agent, clusterCapacity, step, res, minAlloc, maxAlloc, "dedicated", enf, true, context, clock);
    // add reservation filling the plan (separating them 1ms, so we are sure
    // s2 follows s1 on acceptance
    long ts = System.currentTimeMillis();
    ReservationId r1 = ReservationId.newInstance(ts, 1);
    int[] f5 = { 20, 20, 20, 20, 20 };
    ReservationDefinition rDef = ReservationSystemTestUtil.createSimpleReservationDefinition(0, 0 + f5.length, f5.length);
    assertTrue(plan.toString(), plan.addReservation(new InMemoryReservationAllocation(r1, rDef, "u3", "dedicated", 0, 0 + f5.length, generateAllocation(0, f5), res, minAlloc), false));
    when(clock.getTime()).thenReturn(1L);
    ReservationId r2 = ReservationId.newInstance(ts, 2);
    assertTrue(plan.toString(), plan.addReservation(new InMemoryReservationAllocation(r2, rDef, "u4", "dedicated", 0, 0 + f5.length, generateAllocation(0, f5), res, minAlloc), false));
    when(clock.getTime()).thenReturn(2L);
    ReservationId r3 = ReservationId.newInstance(ts, 3);
    assertTrue(plan.toString(), plan.addReservation(new InMemoryReservationAllocation(r3, rDef, "u5", "dedicated", 0, 0 + f5.length, generateAllocation(0, f5), res, minAlloc), false));
    when(clock.getTime()).thenReturn(3L);
    ReservationId r4 = ReservationId.newInstance(ts, 4);
    assertTrue(plan.toString(), plan.addReservation(new InMemoryReservationAllocation(r4, rDef, "u6", "dedicated", 0, 0 + f5.length, generateAllocation(0, f5), res, minAlloc), false));
    when(clock.getTime()).thenReturn(4L);
    ReservationId r5 = ReservationId.newInstance(ts, 5);
    assertTrue(plan.toString(), plan.addReservation(new InMemoryReservationAllocation(r5, rDef, "u7", "dedicated", 0, 0 + f5.length, generateAllocation(0, f5), res, minAlloc), false));
    int[] f6 = { 50, 50, 50, 50, 50 };
    ReservationId r6 = ReservationId.newInstance(ts, 6);
    assertTrue(plan.toString(), plan.addReservation(new InMemoryReservationAllocation(r6, rDef, "u3", "dedicated", 10, 10 + f6.length, generateAllocation(10, f6), res, minAlloc), false));
    when(clock.getTime()).thenReturn(6L);
    ReservationId r7 = ReservationId.newInstance(ts, 7);
    assertTrue(plan.toString(), plan.addReservation(new InMemoryReservationAllocation(r7, rDef, "u4", "dedicated", 10, 10 + f6.length, generateAllocation(10, f6), res, minAlloc), false));
    // remove some of the resources (requires replanning)
    plan.setTotalCapacity(Resource.newInstance(70 * 1024, 70));
    when(clock.getTime()).thenReturn(0L);
    // run the replanner
    enf.plan(plan, null);
    // check which reservation are still present
    assertNotNull(plan.getReservationById(r1));
    assertNotNull(plan.getReservationById(r2));
    assertNotNull(plan.getReservationById(r3));
    assertNotNull(plan.getReservationById(r6));
    assertNotNull(plan.getReservationById(r7));
    // and which ones are removed
    assertNull(plan.getReservationById(r4));
    assertNull(plan.getReservationById(r5));
    // check resources at each moment in time no more exceed capacity
    for (int i = 0; i < 20; i++) {
        long tot = 0;
        for (ReservationAllocation r : plan.getReservationsAtTime(i)) {
            tot = r.getResourcesAtTime(i).getMemorySize();
        }
        assertTrue(tot <= 70 * 1024);
    }
}
Also used : InMemoryPlan(org.apache.hadoop.yarn.server.resourcemanager.reservation.InMemoryPlan) RMContext(org.apache.hadoop.yarn.server.resourcemanager.RMContext) SharingPolicy(org.apache.hadoop.yarn.server.resourcemanager.reservation.SharingPolicy) ReservationDefinition(org.apache.hadoop.yarn.api.records.ReservationDefinition) Resource(org.apache.hadoop.yarn.api.records.Resource) Clock(org.apache.hadoop.yarn.util.Clock) ReservationSchedulerConfiguration(org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSchedulerConfiguration) DefaultResourceCalculator(org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator) ResourceCalculator(org.apache.hadoop.yarn.util.resource.ResourceCalculator) QueueMetrics(org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics) DefaultResourceCalculator(org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator) NoOverCommitPolicy(org.apache.hadoop.yarn.server.resourcemanager.reservation.NoOverCommitPolicy) ReservationId(org.apache.hadoop.yarn.api.records.ReservationId) InMemoryReservationAllocation(org.apache.hadoop.yarn.server.resourcemanager.reservation.InMemoryReservationAllocation) InMemoryReservationAllocation(org.apache.hadoop.yarn.server.resourcemanager.reservation.InMemoryReservationAllocation) ReservationAllocation(org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationAllocation) Test(org.junit.Test)

Example 19 with QueueMetrics

use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics in project hadoop by apache.

the class TestCapacityOverTimePolicy method setup.

@Before
public void setup() throws Exception {
    // 24h window
    timeWindow = 86400000L;
    // 1 sec step
    step = 1000L;
    // 25% avg cap on capacity
    avgConstraint = 25;
    // 70% instantaneous cap on capacity
    instConstraint = 70;
    initTime = System.currentTimeMillis();
    minAlloc = Resource.newInstance(1024, 1);
    res = new DefaultResourceCalculator();
    maxAlloc = Resource.newInstance(1024 * 8, 8);
    mAgent = mock(ReservationAgent.class);
    QueueMetrics rootQueueMetrics = mock(QueueMetrics.class);
    String reservationQ = ReservationSystemTestUtil.getFullReservationQueueName();
    Resource clusterResource = ReservationSystemTestUtil.calculateClusterResource(totCont);
    ReservationSchedulerConfiguration conf = ReservationSystemTestUtil.createConf(reservationQ, timeWindow, instConstraint, avgConstraint);
    CapacityOverTimePolicy policy = new CapacityOverTimePolicy();
    policy.init(reservationQ, conf);
    RMContext context = ReservationSystemTestUtil.createMockRMContext();
    plan = new InMemoryPlan(rootQueueMetrics, policy, mAgent, clusterResource, step, res, minAlloc, maxAlloc, "dedicated", null, true, context);
}
Also used : QueueMetrics(org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics) RMContext(org.apache.hadoop.yarn.server.resourcemanager.RMContext) DefaultResourceCalculator(org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator) ReservationAgent(org.apache.hadoop.yarn.server.resourcemanager.reservation.planning.ReservationAgent) Resource(org.apache.hadoop.yarn.api.records.Resource) Before(org.junit.Before)

Example 20 with QueueMetrics

use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics in project hadoop by apache.

the class TestNoOverCommitPolicy method setup.

@Before
public void setup() throws Exception {
    // 1 sec step
    step = 1000L;
    initTime = System.currentTimeMillis();
    minAlloc = Resource.newInstance(1024, 1);
    res = new DefaultResourceCalculator();
    maxAlloc = Resource.newInstance(1024 * 8, 8);
    mAgent = mock(ReservationAgent.class);
    String reservationQ = ReservationSystemTestUtil.getFullReservationQueueName();
    QueueMetrics rootQueueMetrics = mock(QueueMetrics.class);
    Resource clusterResource = ReservationSystemTestUtil.calculateClusterResource(totCont);
    ReservationSchedulerConfiguration conf = mock(ReservationSchedulerConfiguration.class);
    NoOverCommitPolicy policy = new NoOverCommitPolicy();
    policy.init(reservationQ, conf);
    RMContext context = ReservationSystemTestUtil.createMockRMContext();
    plan = new InMemoryPlan(rootQueueMetrics, policy, mAgent, clusterResource, step, res, minAlloc, maxAlloc, "dedicated", null, true, context);
}
Also used : QueueMetrics(org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics) RMContext(org.apache.hadoop.yarn.server.resourcemanager.RMContext) DefaultResourceCalculator(org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator) ReservationAgent(org.apache.hadoop.yarn.server.resourcemanager.reservation.planning.ReservationAgent) Resource(org.apache.hadoop.yarn.api.records.Resource) Before(org.junit.Before)

Aggregations

QueueMetrics (org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics)25 Test (org.junit.Test)14 Resource (org.apache.hadoop.yarn.api.records.Resource)10 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)9 RMContext (org.apache.hadoop.yarn.server.resourcemanager.RMContext)7 RMNode (org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode)7 NodeAddedSchedulerEvent (org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent)7 NodeUpdateSchedulerEvent (org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent)7 RMApp (org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp)6 Container (org.apache.hadoop.yarn.api.records.Container)5 Before (org.junit.Before)5 ArrayList (java.util.ArrayList)4 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)4 RMContainer (org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer)4 HashMap (java.util.HashMap)3 AllocateResponse (org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse)3 NodeId (org.apache.hadoop.yarn.api.records.NodeId)3 DistributedSchedulingAllocateResponse (org.apache.hadoop.yarn.server.api.protocolrecords.DistributedSchedulingAllocateResponse)3 InMemoryPlan (org.apache.hadoop.yarn.server.resourcemanager.reservation.InMemoryPlan)3 ReservationSchedulerConfiguration (org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSchedulerConfiguration)3