Search in sources :

Example 11 with ContainerAllocator

use of org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator in project hadoop by apache.

the class MRAppBenchmark method benchmark1.

@Test
public void benchmark1() throws Exception {
    // Adjust for benchmarking. Start with thousands.
    int maps = 100;
    int reduces = 0;
    System.out.println("Running benchmark with maps:" + maps + " reduces:" + reduces);
    run(new MRApp(maps, reduces, true, this.getClass().getName(), true) {

        @Override
        protected ContainerAllocator createContainerAllocator(ClientService clientService, AppContext context) {
            AMPreemptionPolicy policy = new NoopAMPreemptionPolicy();
            return new RMContainerAllocator(clientService, context, policy) {

                @Override
                protected ApplicationMasterProtocol createSchedulerProxy() {
                    return new ApplicationMasterProtocol() {

                        @Override
                        public RegisterApplicationMasterResponse registerApplicationMaster(RegisterApplicationMasterRequest request) throws IOException {
                            RegisterApplicationMasterResponse response = Records.newRecord(RegisterApplicationMasterResponse.class);
                            response.setMaximumResourceCapability(Resource.newInstance(10240, 1));
                            return response;
                        }

                        @Override
                        public FinishApplicationMasterResponse finishApplicationMaster(FinishApplicationMasterRequest request) throws IOException {
                            FinishApplicationMasterResponse response = Records.newRecord(FinishApplicationMasterResponse.class);
                            return response;
                        }

                        @Override
                        public AllocateResponse allocate(AllocateRequest request) throws IOException {
                            AllocateResponse response = Records.newRecord(AllocateResponse.class);
                            List<ResourceRequest> askList = request.getAskList();
                            List<Container> containers = new ArrayList<Container>();
                            for (ResourceRequest req : askList) {
                                if (!ResourceRequest.isAnyLocation(req.getResourceName())) {
                                    continue;
                                }
                                int numContainers = req.getNumContainers();
                                for (int i = 0; i < numContainers; i++) {
                                    ContainerId containerId = ContainerId.newContainerId(getContext().getApplicationAttemptId(), request.getResponseId() + i);
                                    containers.add(Container.newInstance(containerId, NodeId.newInstance("host" + containerId.getContainerId(), 2345), "host" + containerId.getContainerId() + ":5678", req.getCapability(), req.getPriority(), null));
                                }
                            }
                            response.setAllocatedContainers(containers);
                            response.setResponseId(request.getResponseId() + 1);
                            response.setNumClusterNodes(350);
                            return response;
                        }
                    };
                }
            };
        }
    });
}
Also used : ClientService(org.apache.hadoop.mapreduce.v2.app.client.ClientService) AllocateRequest(org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest) NoopAMPreemptionPolicy(org.apache.hadoop.mapreduce.v2.app.rm.preemption.NoopAMPreemptionPolicy) ApplicationMasterProtocol(org.apache.hadoop.yarn.api.ApplicationMasterProtocol) FinishApplicationMasterResponse(org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse) IOException(java.io.IOException) RMContainerAllocator(org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator) ContainerAllocator(org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator) FinishApplicationMasterRequest(org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest) AllocateResponse(org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse) Container(org.apache.hadoop.yarn.api.records.Container) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) RegisterApplicationMasterResponse(org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse) ArrayList(java.util.ArrayList) List(java.util.List) ResourceRequest(org.apache.hadoop.yarn.api.records.ResourceRequest) NoopAMPreemptionPolicy(org.apache.hadoop.mapreduce.v2.app.rm.preemption.NoopAMPreemptionPolicy) AMPreemptionPolicy(org.apache.hadoop.mapreduce.v2.app.rm.preemption.AMPreemptionPolicy) RegisterApplicationMasterRequest(org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest) RMContainerAllocator(org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator) Test(org.junit.Test)

Example 12 with ContainerAllocator

use of org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator in project hadoop by apache.

the class TestStagingCleanup method testDeletionofStagingOnKillLastTry.

// FIXME:
// Disabled this test because currently, when shutdown hook triggered at
// lastRetry in RM view, cleanup will not do. This should be supported after
// YARN-2261 completed
//   @Test (timeout = 30000)
public void testDeletionofStagingOnKillLastTry() throws IOException {
    conf.set(MRJobConfig.MAPREDUCE_JOB_DIR, stagingJobDir);
    fs = mock(FileSystem.class);
    when(fs.delete(any(Path.class), anyBoolean())).thenReturn(true);
    //Staging Dir exists
    String user = UserGroupInformation.getCurrentUser().getShortUserName();
    Path stagingDir = MRApps.getStagingAreaDir(conf, user);
    when(fs.exists(stagingDir)).thenReturn(true);
    ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(), 0);
    ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1);
    JobId jobid = recordFactory.newRecordInstance(JobId.class);
    jobid.setAppId(appId);
    ContainerAllocator mockAlloc = mock(ContainerAllocator.class);
    //no retry
    MRAppMaster appMaster = new TestMRApp(attemptId, mockAlloc);
    appMaster.init(conf);
    assertTrue("appMaster.isLastAMRetry() is false", appMaster.isLastAMRetry());
    //simulate the process being killed
    MRAppMaster.MRAppMasterShutdownHook hook = new MRAppMaster.MRAppMasterShutdownHook(appMaster);
    hook.run();
    assertTrue("MRAppMaster isn't stopped", appMaster.isInState(Service.STATE.STOPPED));
    verify(fs).delete(stagingJobPath, true);
}
Also used : Path(org.apache.hadoop.fs.Path) FileSystem(org.apache.hadoop.fs.FileSystem) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) ContainerAllocator(org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator)

Example 13 with ContainerAllocator

use of org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator in project hadoop by apache.

the class TestStagingCleanup method testPreservePatternMatchedAndFailedStaging.

@Test
public void testPreservePatternMatchedAndFailedStaging() throws IOException {
    conf.set(MRJobConfig.MAPREDUCE_JOB_DIR, stagingJobDir);
    // When RESERVE_FILES_PATTERN and PRESERVE_FAILED_TASK_FILES are set,
    // files in staging dir are always kept.
    conf.set(MRJobConfig.PRESERVE_FILES_PATTERN, "JobDir");
    conf.setBoolean(MRJobConfig.PRESERVE_FAILED_TASK_FILES, true);
    fs = mock(FileSystem.class);
    when(fs.delete(any(Path.class), anyBoolean())).thenReturn(true);
    //Staging Dir exists
    String user = UserGroupInformation.getCurrentUser().getShortUserName();
    Path stagingDir = MRApps.getStagingAreaDir(conf, user);
    when(fs.exists(stagingDir)).thenReturn(true);
    ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(), 0);
    ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1);
    JobId jobid = recordFactory.newRecordInstance(JobId.class);
    jobid.setAppId(appId);
    ContainerAllocator mockAlloc = mock(ContainerAllocator.class);
    Assert.assertTrue(MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS > 1);
    MRAppMaster appMaster = new TestMRApp(attemptId, mockAlloc, JobStateInternal.RUNNING, MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS);
    appMaster.init(conf);
    appMaster.start();
    appMaster.shutDownJob();
    //test whether notifyIsLastAMRetry called
    Assert.assertEquals(true, ((TestMRApp) appMaster).getTestIsLastAMRetry());
    verify(fs, times(0)).delete(stagingJobPath, true);
}
Also used : Path(org.apache.hadoop.fs.Path) FileSystem(org.apache.hadoop.fs.FileSystem) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) ContainerAllocator(org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator) Test(org.junit.Test)

Example 14 with ContainerAllocator

use of org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator in project hadoop by apache.

the class TestRMContainerAllocator method testReportedAppProgressWithOnlyMaps.

@Test
public void testReportedAppProgressWithOnlyMaps() throws Exception {
    LOG.info("Running testReportedAppProgressWithOnlyMaps");
    Configuration conf = new Configuration();
    final MyResourceManager rm = new MyResourceManager(conf);
    rm.start();
    DrainDispatcher rmDispatcher = (DrainDispatcher) rm.getRMContext().getDispatcher();
    // Submit the application
    RMApp rmApp = rm.submitApp(1024);
    rmDispatcher.await();
    MockNM amNodeManager = rm.registerNode("amNM:1234", 11264);
    amNodeManager.nodeHeartbeat(true);
    rmDispatcher.await();
    final ApplicationAttemptId appAttemptId = rmApp.getCurrentAppAttempt().getAppAttemptId();
    rm.sendAMLaunched(appAttemptId);
    rmDispatcher.await();
    MRApp mrApp = new MRApp(appAttemptId, ContainerId.newContainerId(appAttemptId, 0), 10, 0, false, this.getClass().getName(), true, 1) {

        @Override
        protected Dispatcher createDispatcher() {
            return new DrainDispatcher();
        }

        protected ContainerAllocator createContainerAllocator(ClientService clientService, AppContext context) {
            return new MyContainerAllocator(rm, appAttemptId, context);
        }

        ;
    };
    Assert.assertEquals(0.0, rmApp.getProgress(), 0.0);
    mrApp.submit(conf);
    Job job = mrApp.getContext().getAllJobs().entrySet().iterator().next().getValue();
    DrainDispatcher amDispatcher = (DrainDispatcher) mrApp.getDispatcher();
    MyContainerAllocator allocator = (MyContainerAllocator) mrApp.getContainerAllocator();
    mrApp.waitForInternalState((JobImpl) job, JobStateInternal.RUNNING);
    amDispatcher.await();
    // Wait till all map-attempts request for containers
    for (Task t : job.getTasks().values()) {
        mrApp.waitForInternalState((TaskAttemptImpl) t.getAttempts().values().iterator().next(), TaskAttemptStateInternal.UNASSIGNED);
    }
    amDispatcher.await();
    allocator.schedule();
    rmDispatcher.await();
    amNodeManager.nodeHeartbeat(true);
    rmDispatcher.await();
    allocator.schedule();
    rmDispatcher.await();
    // Wait for all map-tasks to be running
    for (Task t : job.getTasks().values()) {
        mrApp.waitForState(t, TaskState.RUNNING);
    }
    // Send heartbeat
    allocator.schedule();
    rmDispatcher.await();
    Assert.assertEquals(0.05f, job.getProgress(), 0.001f);
    Assert.assertEquals(0.05f, rmApp.getProgress(), 0.001f);
    Iterator<Task> it = job.getTasks().values().iterator();
    // Finish off 1 map so that map-progress is 10%
    finishNextNTasks(rmDispatcher, amNodeManager, mrApp, it, 1);
    allocator.schedule();
    rmDispatcher.await();
    Assert.assertEquals(0.14f, job.getProgress(), 0.001f);
    Assert.assertEquals(0.14f, rmApp.getProgress(), 0.001f);
    // Finish off 5 more map so that map-progress is 60%
    finishNextNTasks(rmDispatcher, amNodeManager, mrApp, it, 5);
    allocator.schedule();
    rmDispatcher.await();
    Assert.assertEquals(0.59f, job.getProgress(), 0.001f);
    Assert.assertEquals(0.59f, rmApp.getProgress(), 0.001f);
    // Finish off remaining map so that map-progress is 100%
    finishNextNTasks(rmDispatcher, amNodeManager, mrApp, it, 4);
    allocator.schedule();
    rmDispatcher.await();
    Assert.assertEquals(0.95f, job.getProgress(), 0.001f);
    Assert.assertEquals(0.95f, rmApp.getProgress(), 0.001f);
}
Also used : DrainDispatcher(org.apache.hadoop.yarn.event.DrainDispatcher) RMApp(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp) Task(org.apache.hadoop.mapreduce.v2.app.job.Task) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) ClientService(org.apache.hadoop.mapreduce.v2.app.client.ClientService) MockNM(org.apache.hadoop.yarn.server.resourcemanager.MockNM) RunningAppContext(org.apache.hadoop.mapreduce.v2.app.MRAppMaster.RunningAppContext) AppContext(org.apache.hadoop.mapreduce.v2.app.AppContext) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) MRApp(org.apache.hadoop.mapreduce.v2.app.MRApp) Test(org.junit.Test)

Example 15 with ContainerAllocator

use of org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator in project hadoop by apache.

the class TestRMContainerAllocator method testHandlingFinishedContainers.

/**
   * MAPREDUCE-6771. Test if RMContainerAllocator generates the events in the
   * right order while processing finished containers.
   */
@Test
public void testHandlingFinishedContainers() {
    EventHandler eventHandler = mock(EventHandler.class);
    AppContext context = mock(RunningAppContext.class);
    when(context.getClock()).thenReturn(new ControlledClock());
    when(context.getClusterInfo()).thenReturn(new ClusterInfo(Resource.newInstance(10240, 1)));
    when(context.getEventHandler()).thenReturn(eventHandler);
    RMContainerAllocator containerAllocator = new RMContainerAllocatorForFinishedContainer(null, context, mock(AMPreemptionPolicy.class));
    ContainerStatus finishedContainer = ContainerStatus.newInstance(mock(ContainerId.class), ContainerState.COMPLETE, "", 0);
    containerAllocator.processFinishedContainer(finishedContainer);
    InOrder inOrder = inOrder(eventHandler);
    inOrder.verify(eventHandler).handle(isA(TaskAttemptDiagnosticsUpdateEvent.class));
    inOrder.verify(eventHandler).handle(isA(TaskAttemptEvent.class));
    inOrder.verifyNoMoreInteractions();
}
Also used : ClusterInfo(org.apache.hadoop.mapreduce.v2.app.ClusterInfo) ContainerStatus(org.apache.hadoop.yarn.api.records.ContainerStatus) InOrder(org.mockito.InOrder) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) TaskAttemptDiagnosticsUpdateEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptDiagnosticsUpdateEvent) RunningAppContext(org.apache.hadoop.mapreduce.v2.app.MRAppMaster.RunningAppContext) AppContext(org.apache.hadoop.mapreduce.v2.app.AppContext) EventHandler(org.apache.hadoop.yarn.event.EventHandler) TaskAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent) NoopAMPreemptionPolicy(org.apache.hadoop.mapreduce.v2.app.rm.preemption.NoopAMPreemptionPolicy) AMPreemptionPolicy(org.apache.hadoop.mapreduce.v2.app.rm.preemption.AMPreemptionPolicy) ControlledClock(org.apache.hadoop.yarn.util.ControlledClock) Test(org.junit.Test)

Aggregations

Test (org.junit.Test)13 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)12 ContainerAllocator (org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator)11 FileSystem (org.apache.hadoop.fs.FileSystem)10 Path (org.apache.hadoop.fs.Path)10 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)9 JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)7 AppContext (org.apache.hadoop.mapreduce.v2.app.AppContext)5 ClientService (org.apache.hadoop.mapreduce.v2.app.client.ClientService)5 Configuration (org.apache.hadoop.conf.Configuration)4 RunningAppContext (org.apache.hadoop.mapreduce.v2.app.MRAppMaster.RunningAppContext)4 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)4 MRApp (org.apache.hadoop.mapreduce.v2.app.MRApp)3 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)3 DrainDispatcher (org.apache.hadoop.yarn.event.DrainDispatcher)3 MockNM (org.apache.hadoop.yarn.server.resourcemanager.MockNM)3 RMApp (org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp)3 IOException (java.io.IOException)2 Job (org.apache.hadoop.mapreduce.v2.app.job.Job)2 Task (org.apache.hadoop.mapreduce.v2.app.job.Task)2