Search in sources :

Example 6 with ContainerRequestEvent

use of org.apache.hadoop.mapreduce.v2.app.rm.ContainerRequestEvent in project hadoop by apache.

the class TestRMContainerAllocator method testPreemptReducers.

@Test(timeout = 30000)
public void testPreemptReducers() throws Exception {
    LOG.info("Running testPreemptReducers");
    Configuration conf = new Configuration();
    MyResourceManager rm = new MyResourceManager(conf);
    rm.start();
    DrainDispatcher dispatcher = (DrainDispatcher) rm.getRMContext().getDispatcher();
    // Submit the application
    RMApp app = rm.submitApp(1024);
    dispatcher.await();
    MockNM amNodeManager = rm.registerNode("amNM:1234", 2048);
    amNodeManager.nodeHeartbeat(true);
    dispatcher.await();
    ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt().getAppAttemptId();
    rm.sendAMLaunched(appAttemptId);
    dispatcher.await();
    JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
    Job mockJob = mock(Job.class);
    when(mockJob.getReport()).thenReturn(MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0, 0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
    MyContainerAllocator allocator = new MyContainerAllocator(rm, conf, appAttemptId, mockJob, SystemClock.getInstance());
    allocator.setMapResourceRequest(BuilderUtils.newResource(1024, 1));
    allocator.setReduceResourceRequest(BuilderUtils.newResource(1024, 1));
    RMContainerAllocator.AssignedRequests assignedRequests = allocator.getAssignedRequests();
    RMContainerAllocator.ScheduledRequests scheduledRequests = allocator.getScheduledRequests();
    ContainerRequestEvent event1 = createReq(jobId, 1, 2048, new String[] { "h1" }, false, false);
    scheduledRequests.maps.put(mock(TaskAttemptId.class), new RMContainerRequestor.ContainerRequest(event1, null, null));
    assignedRequests.reduces.put(mock(TaskAttemptId.class), mock(Container.class));
    allocator.preemptReducesIfNeeded();
    Assert.assertEquals("The reducer is not preempted", 1, assignedRequests.preemptionWaitingReduces.size());
}
Also used : DrainDispatcher(org.apache.hadoop.yarn.event.DrainDispatcher) RMApp(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) MockNM(org.apache.hadoop.yarn.server.resourcemanager.MockNM) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) Container(org.apache.hadoop.yarn.api.records.Container) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) Test(org.junit.Test)

Example 7 with ContainerRequestEvent

use of org.apache.hadoop.mapreduce.v2.app.rm.ContainerRequestEvent in project hadoop by apache.

the class TestRMContainerAllocator method checkAssignments.

private void checkAssignments(ContainerRequestEvent[] requests, List<TaskAttemptContainerAssignedEvent> assignments, boolean checkHostMatch) {
    Assert.assertNotNull("Container not assigned", assignments);
    Assert.assertEquals("Assigned count not correct", requests.length, assignments.size());
    // check for uniqueness of containerIDs
    Set<ContainerId> containerIds = new HashSet<ContainerId>();
    for (TaskAttemptContainerAssignedEvent assigned : assignments) {
        containerIds.add(assigned.getContainer().getId());
    }
    Assert.assertEquals("Assigned containers must be different", assignments.size(), containerIds.size());
    // check for all assignment
    for (ContainerRequestEvent req : requests) {
        TaskAttemptContainerAssignedEvent assigned = null;
        for (TaskAttemptContainerAssignedEvent ass : assignments) {
            if (ass.getTaskAttemptID().equals(req.getAttemptID())) {
                assigned = ass;
                break;
            }
        }
        checkAssignment(req, assigned, checkHostMatch);
    }
}
Also used : ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) HashSet(java.util.HashSet) TaskAttemptContainerAssignedEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent)

Example 8 with ContainerRequestEvent

use of org.apache.hadoop.mapreduce.v2.app.rm.ContainerRequestEvent in project hadoop by apache.

the class TestRMContainerAllocator method testBlackListedNodes.

@Test
public void testBlackListedNodes() throws Exception {
    LOG.info("Running testBlackListedNodes");
    Configuration conf = new Configuration();
    conf.setBoolean(MRJobConfig.MR_AM_JOB_NODE_BLACKLISTING_ENABLE, true);
    conf.setInt(MRJobConfig.MAX_TASK_FAILURES_PER_TRACKER, 1);
    conf.setInt(MRJobConfig.MR_AM_IGNORE_BLACKLISTING_BLACKLISTED_NODE_PERECENT, -1);
    MyResourceManager rm = new MyResourceManager(conf);
    rm.start();
    DrainDispatcher dispatcher = (DrainDispatcher) rm.getRMContext().getDispatcher();
    // Submit the application
    RMApp app = rm.submitApp(1024);
    dispatcher.await();
    MockNM amNodeManager = rm.registerNode("amNM:1234", 2048);
    amNodeManager.nodeHeartbeat(true);
    dispatcher.await();
    ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt().getAppAttemptId();
    rm.sendAMLaunched(appAttemptId);
    dispatcher.await();
    JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
    Job mockJob = mock(Job.class);
    when(mockJob.getReport()).thenReturn(MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0, 0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
    MyContainerAllocator allocator = new MyContainerAllocator(rm, conf, appAttemptId, mockJob);
    // add resources to scheduler
    MockNM nodeManager1 = rm.registerNode("h1:1234", 10240);
    MockNM nodeManager2 = rm.registerNode("h2:1234", 10240);
    MockNM nodeManager3 = rm.registerNode("h3:1234", 10240);
    dispatcher.await();
    // create the container request
    ContainerRequestEvent event1 = createReq(jobId, 1, 1024, new String[] { "h1" });
    allocator.sendRequest(event1);
    // send 1 more request with different resource req
    ContainerRequestEvent event2 = createReq(jobId, 2, 1024, new String[] { "h2" });
    allocator.sendRequest(event2);
    // send another request with different resource and priority
    ContainerRequestEvent event3 = createReq(jobId, 3, 1024, new String[] { "h3" });
    allocator.sendRequest(event3);
    // this tells the scheduler about the requests
    // as nodes are not added, no allocations
    List<TaskAttemptContainerAssignedEvent> assigned = allocator.schedule();
    dispatcher.await();
    Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
    // Send events to blacklist nodes h1 and h2
    ContainerFailedEvent f1 = createFailEvent(jobId, 1, "h1", false);
    allocator.sendFailure(f1);
    ContainerFailedEvent f2 = createFailEvent(jobId, 1, "h2", false);
    allocator.sendFailure(f2);
    // update resources in scheduler
    // Node heartbeat
    nodeManager1.nodeHeartbeat(true);
    // Node heartbeat
    nodeManager2.nodeHeartbeat(true);
    dispatcher.await();
    assigned = allocator.schedule();
    Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
    dispatcher.await();
    Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
    assertBlacklistAdditionsAndRemovals(2, 0, rm);
    // mark h1/h2 as bad nodes
    nodeManager1.nodeHeartbeat(false);
    nodeManager2.nodeHeartbeat(false);
    dispatcher.await();
    assigned = allocator.schedule();
    dispatcher.await();
    assertBlacklistAdditionsAndRemovals(0, 0, rm);
    Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
    // Node heartbeat
    nodeManager3.nodeHeartbeat(true);
    dispatcher.await();
    assigned = allocator.schedule();
    dispatcher.await();
    assertBlacklistAdditionsAndRemovals(0, 0, rm);
    Assert.assertTrue("No of assignments must be 3", assigned.size() == 3);
    // validate that all containers are assigned to h3
    for (TaskAttemptContainerAssignedEvent assig : assigned) {
        Assert.assertTrue("Assigned container host not correct", "h3".equals(assig.getContainer().getNodeId().getHost()));
    }
}
Also used : DrainDispatcher(org.apache.hadoop.yarn.event.DrainDispatcher) RMApp(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) MockNM(org.apache.hadoop.yarn.server.resourcemanager.MockNM) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) TaskAttemptContainerAssignedEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) Test(org.junit.Test)

Example 9 with ContainerRequestEvent

use of org.apache.hadoop.mapreduce.v2.app.rm.ContainerRequestEvent in project hadoop by apache.

the class TestRMContainerAllocator method testAvoidAskMoreReducersWhenReducerPreemptionIsRequired.

@Test
public void testAvoidAskMoreReducersWhenReducerPreemptionIsRequired() throws Exception {
    LOG.info("Running testAvoidAskMoreReducersWhenReducerPreemptionIsRequired");
    Configuration conf = new Configuration();
    MyResourceManager rm = new MyResourceManager(conf);
    rm.start();
    DrainDispatcher dispatcher = (DrainDispatcher) rm.getRMContext().getDispatcher();
    // Submit the application
    RMApp app = rm.submitApp(1024);
    dispatcher.await();
    MockNM amNodeManager = rm.registerNode("amNM:1234", 1260);
    amNodeManager.nodeHeartbeat(true);
    dispatcher.await();
    ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt().getAppAttemptId();
    rm.sendAMLaunched(appAttemptId);
    dispatcher.await();
    JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
    Job mockJob = mock(Job.class);
    when(mockJob.getReport()).thenReturn(MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0, 0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
    MyContainerAllocator allocator = new MyContainerAllocator(rm, conf, appAttemptId, mockJob);
    // Use a controlled clock to advance time for test.
    ControlledClock clock = (ControlledClock) allocator.getContext().getClock();
    clock.setTime(System.currentTimeMillis());
    // Register nodes to RM.
    MockNM nodeManager = rm.registerNode("h1:1234", 1024);
    dispatcher.await();
    // Request 2 maps and 1 reducer(sone on nodes which are not registered).
    ContainerRequestEvent event1 = createReq(jobId, 1, 1024, new String[] { "h1" });
    allocator.sendRequest(event1);
    ContainerRequestEvent event2 = createReq(jobId, 2, 1024, new String[] { "h2" });
    allocator.sendRequest(event2);
    ContainerRequestEvent event3 = createReq(jobId, 3, 1024, new String[] { "h2" }, false, true);
    allocator.sendRequest(event3);
    // This will tell the scheduler about the requests but there will be no
    // allocations as nodes are not added.
    allocator.schedule();
    dispatcher.await();
    // Advance clock so that maps can be considered as hanging.
    clock.setTime(System.currentTimeMillis() + 500000L);
    // Request for another reducer on h3 which has not registered.
    ContainerRequestEvent event4 = createReq(jobId, 4, 1024, new String[] { "h3" }, false, true);
    allocator.sendRequest(event4);
    allocator.schedule();
    dispatcher.await();
    // Update resources in scheduler through node heartbeat from h1.
    nodeManager.nodeHeartbeat(true);
    dispatcher.await();
    rm.getMyFifoScheduler().forceResourceLimit(Resource.newInstance(1024, 1));
    allocator.schedule();
    dispatcher.await();
    // One map is assigned.
    Assert.assertEquals(1, allocator.getAssignedRequests().maps.size());
    // Send deallocate request for map so that no maps are assigned after this.
    ContainerAllocatorEvent deallocate = createDeallocateEvent(jobId, 1, false);
    allocator.sendDeallocate(deallocate);
    // Now one reducer should be scheduled and one should be pending.
    Assert.assertEquals(1, allocator.getScheduledRequests().reduces.size());
    Assert.assertEquals(1, allocator.getNumOfPendingReduces());
    // No map should be assigned and one should be scheduled.
    Assert.assertEquals(1, allocator.getScheduledRequests().maps.size());
    Assert.assertEquals(0, allocator.getAssignedRequests().maps.size());
    Assert.assertEquals(6, allocator.getAsk().size());
    for (ResourceRequest req : allocator.getAsk()) {
        boolean isReduce = req.getPriority().equals(RMContainerAllocator.PRIORITY_REDUCE);
        if (isReduce) {
            // 1 reducer each asked on h2, * and default-rack
            Assert.assertTrue((req.getResourceName().equals("*") || req.getResourceName().equals("/default-rack") || req.getResourceName().equals("h2")) && req.getNumContainers() == 1);
        } else {
            //map
            // 0 mappers asked on h1 and 1 each on * and default-rack
            Assert.assertTrue(((req.getResourceName().equals("*") || req.getResourceName().equals("/default-rack")) && req.getNumContainers() == 1) || (req.getResourceName().equals("h1") && req.getNumContainers() == 0));
        }
    }
    clock.setTime(System.currentTimeMillis() + 500000L + 10 * 60 * 1000);
    // On next allocate request to scheduler, headroom reported will be 2048.
    rm.getMyFifoScheduler().forceResourceLimit(Resource.newInstance(2048, 0));
    allocator.schedule();
    dispatcher.await();
    // After allocate response from scheduler, all scheduled reduces are ramped
    // down and move to pending. 3 asks are also updated with 0 containers to
    // indicate ramping down of reduces to scheduler.
    Assert.assertEquals(0, allocator.getScheduledRequests().reduces.size());
    Assert.assertEquals(2, allocator.getNumOfPendingReduces());
    Assert.assertEquals(3, allocator.getAsk().size());
    for (ResourceRequest req : allocator.getAsk()) {
        Assert.assertEquals(RMContainerAllocator.PRIORITY_REDUCE, req.getPriority());
        Assert.assertTrue(req.getResourceName().equals("*") || req.getResourceName().equals("/default-rack") || req.getResourceName().equals("h2"));
        Assert.assertEquals(Resource.newInstance(1024, 1), req.getCapability());
        Assert.assertEquals(0, req.getNumContainers());
    }
}
Also used : DrainDispatcher(org.apache.hadoop.yarn.event.DrainDispatcher) RMApp(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) MockNM(org.apache.hadoop.yarn.server.resourcemanager.MockNM) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) ControlledClock(org.apache.hadoop.yarn.util.ControlledClock) ResourceRequest(org.apache.hadoop.yarn.api.records.ResourceRequest) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) Test(org.junit.Test)

Example 10 with ContainerRequestEvent

use of org.apache.hadoop.mapreduce.v2.app.rm.ContainerRequestEvent in project hadoop by apache.

the class RMContainerAllocator method handleMapContainerRequest.

@SuppressWarnings({ "unchecked" })
private void handleMapContainerRequest(ContainerRequestEvent reqEvent) {
    assert (reqEvent.getAttemptID().getTaskId().getTaskType().equals(TaskType.MAP));
    Resource supportedMaxContainerCapability = getMaxContainerCapability();
    JobId jobId = getJob().getID();
    if (mapResourceRequest.equals(Resources.none())) {
        mapResourceRequest = reqEvent.getCapability();
        eventHandler.handle(new JobHistoryEvent(jobId, new NormalizedResourceEvent(org.apache.hadoop.mapreduce.TaskType.MAP, mapResourceRequest.getMemorySize())));
        LOG.info("mapResourceRequest:" + mapResourceRequest);
    }
    boolean mapContainerRequestAccepted = true;
    if (mapResourceRequest.getMemorySize() > supportedMaxContainerCapability.getMemorySize() || mapResourceRequest.getVirtualCores() > supportedMaxContainerCapability.getVirtualCores()) {
        mapContainerRequestAccepted = false;
    }
    if (mapContainerRequestAccepted) {
        // set the resources
        reqEvent.getCapability().setMemorySize(mapResourceRequest.getMemorySize());
        reqEvent.getCapability().setVirtualCores(mapResourceRequest.getVirtualCores());
        //maps are immediately scheduled
        scheduledRequests.addMap(reqEvent);
    } else {
        String diagMsg = "The required MAP capability is more than the " + "supported max container capability in the cluster. Killing" + " the Job. mapResourceRequest: " + mapResourceRequest + " maxContainerCapability:" + supportedMaxContainerCapability;
        LOG.info(diagMsg);
        eventHandler.handle(new JobDiagnosticsUpdateEvent(jobId, diagMsg));
        eventHandler.handle(new JobEvent(jobId, JobEventType.JOB_KILL));
    }
}
Also used : NormalizedResourceEvent(org.apache.hadoop.mapreduce.jobhistory.NormalizedResourceEvent) JobEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent) Resource(org.apache.hadoop.yarn.api.records.Resource) JobHistoryEvent(org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent) JobDiagnosticsUpdateEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobDiagnosticsUpdateEvent) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId)

Aggregations

JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)20 Test (org.junit.Test)20 Configuration (org.apache.hadoop.conf.Configuration)18 Job (org.apache.hadoop.mapreduce.v2.app.job.Job)18 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)18 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)18 DrainDispatcher (org.apache.hadoop.yarn.event.DrainDispatcher)14 MockNM (org.apache.hadoop.yarn.server.resourcemanager.MockNM)14 RMApp (org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp)14 TaskAttemptContainerAssignedEvent (org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent)11 TaskAttemptId (org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId)5 Resource (org.apache.hadoop.yarn.api.records.Resource)5 JobHistoryEvent (org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent)4 JobEvent (org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent)4 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)4 ControlledClock (org.apache.hadoop.yarn.util.ControlledClock)4 Container (org.apache.hadoop.yarn.api.records.Container)3 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)3 MapTaskAttemptImpl (org.apache.hadoop.mapred.MapTaskAttemptImpl)2 NormalizedResourceEvent (org.apache.hadoop.mapreduce.jobhistory.NormalizedResourceEvent)2