Search in sources :

Example 31 with SchedulerRequestKey

use of org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey in project hadoop by apache.

the class TestAppSchedulingInfo method testSchedulerKeyAccounting.

@Test
public void testSchedulerKeyAccounting() {
    ApplicationId appIdImpl = ApplicationId.newInstance(0, 1);
    ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appIdImpl, 1);
    Queue queue = mock(Queue.class);
    doReturn(mock(QueueMetrics.class)).when(queue).getMetrics();
    AppSchedulingInfo info = new AppSchedulingInfo(appAttemptId, "test", queue, mock(ActiveUsersManager.class), 0, new ResourceUsage());
    Assert.assertEquals(0, info.getSchedulerKeys().size());
    Priority pri1 = Priority.newInstance(1);
    ResourceRequest req1 = ResourceRequest.newInstance(pri1, ResourceRequest.ANY, Resource.newInstance(1024, 1), 1);
    Priority pri2 = Priority.newInstance(2);
    ResourceRequest req2 = ResourceRequest.newInstance(pri2, ResourceRequest.ANY, Resource.newInstance(1024, 1), 2);
    List<ResourceRequest> reqs = new ArrayList<>();
    reqs.add(req1);
    reqs.add(req2);
    info.updateResourceRequests(reqs, false);
    ArrayList<SchedulerRequestKey> keys = new ArrayList<>(info.getSchedulerKeys());
    Assert.assertEquals(2, keys.size());
    Assert.assertEquals(SchedulerRequestKey.create(req1), keys.get(0));
    Assert.assertEquals(SchedulerRequestKey.create(req2), keys.get(1));
    // iterate to verify no ConcurrentModificationException
    for (SchedulerRequestKey schedulerKey : info.getSchedulerKeys()) {
        info.allocate(NodeType.OFF_SWITCH, null, schedulerKey, null);
    }
    Assert.assertEquals(1, info.getSchedulerKeys().size());
    Assert.assertEquals(SchedulerRequestKey.create(req2), info.getSchedulerKeys().iterator().next());
    req2 = ResourceRequest.newInstance(pri2, ResourceRequest.ANY, Resource.newInstance(1024, 1), 1);
    reqs.clear();
    reqs.add(req2);
    info.updateResourceRequests(reqs, false);
    info.allocate(NodeType.OFF_SWITCH, null, SchedulerRequestKey.create(req2), null);
    Assert.assertEquals(0, info.getSchedulerKeys().size());
    req1 = ResourceRequest.newInstance(pri1, ResourceRequest.ANY, Resource.newInstance(1024, 1), 5);
    reqs.clear();
    reqs.add(req1);
    info.updateResourceRequests(reqs, false);
    Assert.assertEquals(1, info.getSchedulerKeys().size());
    Assert.assertEquals(SchedulerRequestKey.create(req1), info.getSchedulerKeys().iterator().next());
    req1 = ResourceRequest.newInstance(pri1, ResourceRequest.ANY, Resource.newInstance(1024, 1), 0);
    reqs.clear();
    reqs.add(req1);
    info.updateResourceRequests(reqs, false);
    Assert.assertEquals(0, info.getSchedulerKeys().size());
}
Also used : Priority(org.apache.hadoop.yarn.api.records.Priority) ArrayList(java.util.ArrayList) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) SchedulerRequestKey(org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey) ResourceRequest(org.apache.hadoop.yarn.api.records.ResourceRequest) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) FSLeafQueue(org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSLeafQueue) Test(org.junit.Test)

Example 32 with SchedulerRequestKey

use of org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey in project hadoop by apache.

the class TestSchedulerApplicationAttempt method testSchedulingOpportunityOverflow.

@Test
public void testSchedulingOpportunityOverflow() throws Exception {
    ApplicationAttemptId attemptId = createAppAttemptId(0, 0);
    Queue queue = createQueue("test", null);
    RMContext rmContext = mock(RMContext.class);
    when(rmContext.getEpoch()).thenReturn(3L);
    SchedulerApplicationAttempt app = new SchedulerApplicationAttempt(attemptId, "user", queue, queue.getAbstractUsersManager(), rmContext);
    Priority priority = Priority.newInstance(1);
    SchedulerRequestKey schedulerKey = toSchedulerKey(priority);
    assertEquals(0, app.getSchedulingOpportunities(schedulerKey));
    app.addSchedulingOpportunity(schedulerKey);
    assertEquals(1, app.getSchedulingOpportunities(schedulerKey));
    // verify the count is capped at MAX_VALUE and does not overflow
    app.setSchedulingOpportunities(schedulerKey, Integer.MAX_VALUE - 1);
    assertEquals(Integer.MAX_VALUE - 1, app.getSchedulingOpportunities(schedulerKey));
    app.addSchedulingOpportunity(schedulerKey);
    assertEquals(Integer.MAX_VALUE, app.getSchedulingOpportunities(schedulerKey));
    app.addSchedulingOpportunity(schedulerKey);
    assertEquals(Integer.MAX_VALUE, app.getSchedulingOpportunities(schedulerKey));
}
Also used : RMContext(org.apache.hadoop.yarn.server.resourcemanager.RMContext) Priority(org.apache.hadoop.yarn.api.records.Priority) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) SchedulerRequestKey(org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey) Test(org.junit.Test)

Example 33 with SchedulerRequestKey

use of org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey in project hadoop by apache.

the class TestLeafQueue method testLocalityDelaySkipsApplication.

@Test
public void testLocalityDelaySkipsApplication() throws Exception {
    // Manipulate queue 'a'
    LeafQueue a = stubLeafQueue((LeafQueue) queues.get(A));
    // User
    String user_0 = "user_0";
    // Submit applications
    final ApplicationAttemptId appAttemptId_0 = TestUtils.getMockApplicationAttemptId(0, 0);
    FiCaSchedulerApp app_0 = new FiCaSchedulerApp(appAttemptId_0, user_0, a, mock(ActiveUsersManager.class), spyRMContext);
    a.submitApplicationAttempt(app_0, user_0);
    final ApplicationAttemptId appAttemptId_1 = TestUtils.getMockApplicationAttemptId(1, 0);
    FiCaSchedulerApp app_1 = new FiCaSchedulerApp(appAttemptId_1, user_0, a, mock(ActiveUsersManager.class), spyRMContext);
    a.submitApplicationAttempt(app_1, user_0);
    // Setup some nodes and racks
    String host_0 = "127.0.0.1";
    String rack_0 = "rack_0";
    FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, rack_0, 0, 8 * GB);
    String host_1 = "127.0.0.2";
    String rack_1 = "rack_1";
    FiCaSchedulerNode node_1 = TestUtils.getMockNode(host_1, rack_1, 0, 8 * GB);
    String host_2 = "127.0.0.3";
    String rack_2 = "rack_2";
    FiCaSchedulerNode node_2 = TestUtils.getMockNode(host_2, rack_2, 0, 8 * GB);
    Map<ApplicationAttemptId, FiCaSchedulerApp> apps = ImmutableMap.of(app_0.getApplicationAttemptId(), app_0, app_1.getApplicationAttemptId(), app_1);
    Map<NodeId, FiCaSchedulerNode> nodes = ImmutableMap.of(node_0.getNodeID(), node_0, node_1.getNodeID(), node_1, node_2.getNodeID(), node_2);
    final int numNodes = 3;
    Resource clusterResource = Resources.createResource(numNodes * (8 * GB), numNodes * 16);
    when(csContext.getNumClusterNodes()).thenReturn(numNodes);
    // Setup resource-requests and submit
    // App0 has node local request for host_0/host_1, and app1 has node local
    // request for host2.
    Priority priority = TestUtils.createMockPriority(1);
    SchedulerRequestKey schedulerKey = toSchedulerKey(priority);
    List<ResourceRequest> app_0_requests_0 = new ArrayList<ResourceRequest>();
    app_0_requests_0.add(TestUtils.createResourceRequest(host_0, 1 * GB, 1, true, priority, recordFactory));
    app_0_requests_0.add(TestUtils.createResourceRequest(rack_0, 1 * GB, 1, true, priority, recordFactory));
    app_0_requests_0.add(TestUtils.createResourceRequest(host_1, 1 * GB, 1, true, priority, recordFactory));
    app_0_requests_0.add(TestUtils.createResourceRequest(rack_1, 1 * GB, 1, true, priority, recordFactory));
    app_0_requests_0.add(// one extra
    TestUtils.createResourceRequest(// one extra
    ResourceRequest.ANY, // one extra
    1 * GB, // one extra
    3, true, priority, recordFactory));
    app_0.updateResourceRequests(app_0_requests_0);
    List<ResourceRequest> app_1_requests_0 = new ArrayList<ResourceRequest>();
    app_1_requests_0.add(TestUtils.createResourceRequest(host_2, 1 * GB, 1, true, priority, recordFactory));
    app_1_requests_0.add(TestUtils.createResourceRequest(rack_2, 1 * GB, 1, true, priority, recordFactory));
    app_1_requests_0.add(// one extra
    TestUtils.createResourceRequest(// one extra
    ResourceRequest.ANY, // one extra
    1 * GB, // one extra
    1, true, priority, recordFactory));
    app_1.updateResourceRequests(app_1_requests_0);
    // Start testing...
    // When doing allocation, even if app_0 submit earlier than app_1, app_1 can
    // still get allocated because app_0 is waiting for node-locality-delay
    CSAssignment assignment = null;
    // Check app_0's scheduling opportunities increased and app_1 get allocated
    assignment = a.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    applyCSAssignment(clusterResource, assignment, a, nodes, apps);
    verifyContainerAllocated(assignment, NodeType.NODE_LOCAL);
    assertEquals(1, app_0.getSchedulingOpportunities(schedulerKey));
    assertEquals(3, app_0.getOutstandingAsksCount(schedulerKey));
    assertEquals(0, app_0.getLiveContainers().size());
    assertEquals(1, app_1.getLiveContainers().size());
}
Also used : FiCaSchedulerNode(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode) Priority(org.apache.hadoop.yarn.api.records.Priority) Resource(org.apache.hadoop.yarn.api.records.Resource) ArrayList(java.util.ArrayList) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) SchedulerRequestKey(org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey) FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp) ResourceLimits(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits) NodeId(org.apache.hadoop.yarn.api.records.NodeId) ActiveUsersManager(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager) ResourceRequest(org.apache.hadoop.yarn.api.records.ResourceRequest) Test(org.junit.Test)

Example 34 with SchedulerRequestKey

use of org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey in project hadoop by apache.

the class TestReservations method testGetAppToUnreserve.

@Test
public void testGetAppToUnreserve() throws Exception {
    CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration();
    setup(csConf);
    final String user_0 = "user_0";
    final ApplicationAttemptId appAttemptId_0 = TestUtils.getMockApplicationAttemptId(0, 0);
    LeafQueue a = stubLeafQueue((LeafQueue) queues.get(A));
    FiCaSchedulerApp app_0 = new FiCaSchedulerApp(appAttemptId_0, user_0, a, mock(ActiveUsersManager.class), spyRMContext);
    String host_0 = "host_0";
    FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 8 * GB);
    String host_1 = "host_1";
    FiCaSchedulerNode node_1 = TestUtils.getMockNode(host_1, DEFAULT_RACK, 0, 8 * GB);
    Resource clusterResource = Resources.createResource(2 * 8 * GB);
    // Setup resource-requests
    Priority p = TestUtils.createMockPriority(5);
    SchedulerRequestKey priorityMap = toSchedulerKey(p);
    Resource capability = Resources.createResource(2 * GB, 0);
    RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class);
    SystemMetricsPublisher publisher = mock(SystemMetricsPublisher.class);
    RMContext rmContext = mock(RMContext.class);
    ContainerAllocationExpirer expirer = mock(ContainerAllocationExpirer.class);
    DrainDispatcher drainDispatcher = new DrainDispatcher();
    when(rmContext.getContainerAllocationExpirer()).thenReturn(expirer);
    when(rmContext.getDispatcher()).thenReturn(drainDispatcher);
    when(rmContext.getRMApplicationHistoryWriter()).thenReturn(writer);
    when(rmContext.getSystemMetricsPublisher()).thenReturn(publisher);
    when(rmContext.getYarnConfiguration()).thenReturn(new YarnConfiguration());
    ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(app_0.getApplicationId(), 1);
    ContainerId containerId = BuilderUtils.newContainerId(appAttemptId, 1);
    Container container = TestUtils.getMockContainer(containerId, node_1.getNodeID(), Resources.createResource(2 * GB), priorityMap.getPriority());
    RMContainer rmContainer = new RMContainerImpl(container, SchedulerRequestKey.extractFrom(container), appAttemptId, node_1.getNodeID(), "user", rmContext);
    Container container_1 = TestUtils.getMockContainer(containerId, node_0.getNodeID(), Resources.createResource(1 * GB), priorityMap.getPriority());
    RMContainer rmContainer_1 = new RMContainerImpl(container_1, SchedulerRequestKey.extractFrom(container_1), appAttemptId, node_0.getNodeID(), "user", rmContext);
    // no reserved containers
    NodeId unreserveId = app_0.getNodeIdToUnreserve(priorityMap, capability, cs.getResourceCalculator(), clusterResource);
    assertEquals(null, unreserveId);
    // no reserved containers - reserve then unreserve
    app_0.reserve(node_0, priorityMap, rmContainer_1, container_1);
    app_0.unreserve(priorityMap, node_0, rmContainer_1);
    unreserveId = app_0.getNodeIdToUnreserve(priorityMap, capability, cs.getResourceCalculator(), clusterResource);
    assertEquals(null, unreserveId);
    // no container large enough is reserved
    app_0.reserve(node_0, priorityMap, rmContainer_1, container_1);
    unreserveId = app_0.getNodeIdToUnreserve(priorityMap, capability, cs.getResourceCalculator(), clusterResource);
    assertEquals(null, unreserveId);
    // reserve one that is now large enough
    app_0.reserve(node_1, priorityMap, rmContainer, container);
    unreserveId = app_0.getNodeIdToUnreserve(priorityMap, capability, cs.getResourceCalculator(), clusterResource);
    assertEquals(node_1.getNodeID(), unreserveId);
}
Also used : DrainDispatcher(org.apache.hadoop.yarn.event.DrainDispatcher) RMContext(org.apache.hadoop.yarn.server.resourcemanager.RMContext) FiCaSchedulerNode(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode) ContainerAllocationExpirer(org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer) Priority(org.apache.hadoop.yarn.api.records.Priority) Resource(org.apache.hadoop.yarn.api.records.Resource) RMApplicationHistoryWriter(org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) RMContainer(org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer) SchedulerRequestKey(org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey) RMContainer(org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer) Container(org.apache.hadoop.yarn.api.records.Container) RMContainerImpl(org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl) SystemMetricsPublisher(org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp) NodeId(org.apache.hadoop.yarn.api.records.NodeId) ActiveUsersManager(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager) Test(org.junit.Test)

Example 35 with SchedulerRequestKey

use of org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey in project hadoop by apache.

the class TestContinuousScheduling method testFairSchedulerContinuousSchedulingInitTime.

@Test
public void testFairSchedulerContinuousSchedulingInitTime() throws Exception {
    scheduler.start();
    int priorityValue;
    Priority priority;
    FSAppAttempt fsAppAttempt;
    ResourceRequest request1;
    ResourceRequest request2;
    ApplicationAttemptId id11;
    priorityValue = 1;
    id11 = createAppAttemptId(1, 1);
    createMockRMApp(id11);
    priority = Priority.newInstance(priorityValue);
    scheduler.addApplication(id11.getApplicationId(), "root.queue1", "user1", false);
    scheduler.addApplicationAttempt(id11, false, false);
    fsAppAttempt = scheduler.getApplicationAttempt(id11);
    String hostName = "127.0.0.1";
    RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(16 * 1024, 16), 1, hostName);
    List<ResourceRequest> ask1 = new ArrayList<>();
    request1 = createResourceRequest(1024, 8, node1.getRackName(), priorityValue, 1, true);
    request2 = createResourceRequest(1024, 8, ResourceRequest.ANY, priorityValue, 1, true);
    ask1.add(request1);
    ask1.add(request2);
    scheduler.allocate(id11, ask1, new ArrayList<ContainerId>(), null, null, NULL_UPDATE_REQUESTS);
    NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
    scheduler.handle(nodeEvent1);
    FSSchedulerNode node = (FSSchedulerNode) scheduler.getSchedulerNode(node1.getNodeID());
    // Tick the time and let the fsApp startTime different from initScheduler
    // time
    mockClock.tickSec(delayThresholdTimeMs / 1000);
    scheduler.attemptScheduling(node);
    Map<SchedulerRequestKey, Long> lastScheduledContainer = fsAppAttempt.getLastScheduledContainer();
    long initSchedulerTime = lastScheduledContainer.get(TestUtils.toSchedulerKey(priority));
    assertEquals(delayThresholdTimeMs, initSchedulerTime);
}
Also used : NodeAddedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent) Priority(org.apache.hadoop.yarn.api.records.Priority) ArrayList(java.util.ArrayList) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) SchedulerRequestKey(org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey) RMNode(org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) ResourceRequest(org.apache.hadoop.yarn.api.records.ResourceRequest) Test(org.junit.Test)

Aggregations

SchedulerRequestKey (org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey)35 Test (org.junit.Test)16 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)15 Priority (org.apache.hadoop.yarn.api.records.Priority)15 Resource (org.apache.hadoop.yarn.api.records.Resource)13 ResourceRequest (org.apache.hadoop.yarn.api.records.ResourceRequest)12 NodeId (org.apache.hadoop.yarn.api.records.NodeId)10 RMContainer (org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer)10 Container (org.apache.hadoop.yarn.api.records.Container)9 FiCaSchedulerNode (org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode)9 ArrayList (java.util.ArrayList)8 HashMap (java.util.HashMap)8 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)8 FiCaSchedulerApp (org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp)8 ActiveUsersManager (org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager)7 RMContext (org.apache.hadoop.yarn.server.resourcemanager.RMContext)6 ResourceLimits (org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits)6 Map (java.util.Map)5 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)4 RMContainerImpl (org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl)4