Search in sources :

Example 36 with MRApp

use of org.apache.hadoop.mapreduce.v2.app.MRApp in project hadoop by apache.

the class MRAppBenchmark method run.

/**
   * Runs memory and time benchmark with Mock MRApp.
   * @param app Application to submit
   * @throws Exception On application failure
   */
public void run(MRApp app) throws Exception {
    Logger rootLogger = LogManager.getRootLogger();
    rootLogger.setLevel(Level.WARN);
    long startTime = System.currentTimeMillis();
    Job job = app.submit(new Configuration());
    while (!job.getReport().getJobState().equals(JobState.SUCCEEDED)) {
        printStat(job, startTime);
        Thread.sleep(2000);
    }
    printStat(job, startTime);
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) Logger(org.apache.log4j.Logger) Job(org.apache.hadoop.mapreduce.v2.app.job.Job)

Example 37 with MRApp

use of org.apache.hadoop.mapreduce.v2.app.MRApp in project hadoop by apache.

the class MRAppBenchmark method benchmark1.

@Test
public void benchmark1() throws Exception {
    // Adjust for benchmarking. Start with thousands.
    int maps = 100;
    int reduces = 0;
    System.out.println("Running benchmark with maps:" + maps + " reduces:" + reduces);
    run(new MRApp(maps, reduces, true, this.getClass().getName(), true) {

        @Override
        protected ContainerAllocator createContainerAllocator(ClientService clientService, AppContext context) {
            AMPreemptionPolicy policy = new NoopAMPreemptionPolicy();
            return new RMContainerAllocator(clientService, context, policy) {

                @Override
                protected ApplicationMasterProtocol createSchedulerProxy() {
                    return new ApplicationMasterProtocol() {

                        @Override
                        public RegisterApplicationMasterResponse registerApplicationMaster(RegisterApplicationMasterRequest request) throws IOException {
                            RegisterApplicationMasterResponse response = Records.newRecord(RegisterApplicationMasterResponse.class);
                            response.setMaximumResourceCapability(Resource.newInstance(10240, 1));
                            return response;
                        }

                        @Override
                        public FinishApplicationMasterResponse finishApplicationMaster(FinishApplicationMasterRequest request) throws IOException {
                            FinishApplicationMasterResponse response = Records.newRecord(FinishApplicationMasterResponse.class);
                            return response;
                        }

                        @Override
                        public AllocateResponse allocate(AllocateRequest request) throws IOException {
                            AllocateResponse response = Records.newRecord(AllocateResponse.class);
                            List<ResourceRequest> askList = request.getAskList();
                            List<Container> containers = new ArrayList<Container>();
                            for (ResourceRequest req : askList) {
                                if (!ResourceRequest.isAnyLocation(req.getResourceName())) {
                                    continue;
                                }
                                int numContainers = req.getNumContainers();
                                for (int i = 0; i < numContainers; i++) {
                                    ContainerId containerId = ContainerId.newContainerId(getContext().getApplicationAttemptId(), request.getResponseId() + i);
                                    containers.add(Container.newInstance(containerId, NodeId.newInstance("host" + containerId.getContainerId(), 2345), "host" + containerId.getContainerId() + ":5678", req.getCapability(), req.getPriority(), null));
                                }
                            }
                            response.setAllocatedContainers(containers);
                            response.setResponseId(request.getResponseId() + 1);
                            response.setNumClusterNodes(350);
                            return response;
                        }
                    };
                }
            };
        }
    });
}
Also used : ClientService(org.apache.hadoop.mapreduce.v2.app.client.ClientService) AllocateRequest(org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest) NoopAMPreemptionPolicy(org.apache.hadoop.mapreduce.v2.app.rm.preemption.NoopAMPreemptionPolicy) ApplicationMasterProtocol(org.apache.hadoop.yarn.api.ApplicationMasterProtocol) FinishApplicationMasterResponse(org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse) IOException(java.io.IOException) RMContainerAllocator(org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator) ContainerAllocator(org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator) FinishApplicationMasterRequest(org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest) AllocateResponse(org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse) Container(org.apache.hadoop.yarn.api.records.Container) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) RegisterApplicationMasterResponse(org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse) ArrayList(java.util.ArrayList) List(java.util.List) ResourceRequest(org.apache.hadoop.yarn.api.records.ResourceRequest) NoopAMPreemptionPolicy(org.apache.hadoop.mapreduce.v2.app.rm.preemption.NoopAMPreemptionPolicy) AMPreemptionPolicy(org.apache.hadoop.mapreduce.v2.app.rm.preemption.AMPreemptionPolicy) RegisterApplicationMasterRequest(org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest) RMContainerAllocator(org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator) Test(org.junit.Test)

Example 38 with MRApp

use of org.apache.hadoop.mapreduce.v2.app.MRApp in project hadoop by apache.

the class TestAMInfos method testAMInfosWithoutRecoveryEnabled.

@Test
public void testAMInfosWithoutRecoveryEnabled() throws Exception {
    int runCount = 0;
    MRApp app = new MRAppWithHistory(1, 0, false, this.getClass().getName(), true, ++runCount);
    Configuration conf = new Configuration();
    conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
    Job job = app.submit(conf);
    app.waitForState(job, JobState.RUNNING);
    long am1StartTime = app.getAllAMInfos().get(0).getStartTime();
    Assert.assertEquals("No of tasks not correct", 1, job.getTasks().size());
    Iterator<Task> it = job.getTasks().values().iterator();
    Task mapTask = it.next();
    app.waitForState(mapTask, TaskState.RUNNING);
    TaskAttempt taskAttempt = mapTask.getAttempts().values().iterator().next();
    app.waitForState(taskAttempt, TaskAttemptState.RUNNING);
    // stop the app
    app.stop();
    // rerun
    app = new MRAppWithHistory(1, 0, false, this.getClass().getName(), false, ++runCount);
    conf = new Configuration();
    // in rerun the AMInfo will be recovered from previous run even if recovery
    // is not enabled.
    conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE, false);
    conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
    job = app.submit(conf);
    app.waitForState(job, JobState.RUNNING);
    Assert.assertEquals("No of tasks not correct", 1, job.getTasks().size());
    it = job.getTasks().values().iterator();
    mapTask = it.next();
    // There should be two AMInfos
    List<AMInfo> amInfos = app.getAllAMInfos();
    Assert.assertEquals(2, amInfos.size());
    AMInfo amInfoOne = amInfos.get(0);
    Assert.assertEquals(am1StartTime, amInfoOne.getStartTime());
    app.stop();
}
Also used : AMInfo(org.apache.hadoop.mapreduce.v2.api.records.AMInfo) MRAppWithHistory(org.apache.hadoop.mapreduce.v2.app.TestRecovery.MRAppWithHistory) Task(org.apache.hadoop.mapreduce.v2.app.job.Task) Configuration(org.apache.hadoop.conf.Configuration) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) Test(org.junit.Test)

Example 39 with MRApp

use of org.apache.hadoop.mapreduce.v2.app.MRApp in project hadoop by apache.

the class TestFail method testTimedOutTask.

@Test
public //All Task attempts are timed out, leading to Job failure
void testTimedOutTask() throws Exception {
    MRApp app = new TimeOutTaskMRApp(1, 0);
    Configuration conf = new Configuration();
    int maxAttempts = 2;
    conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, maxAttempts);
    // disable uberization (requires entire job to be reattempted, so max for
    // subtask attempts is overridden to 1)
    conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
    Job job = app.submit(conf);
    app.waitForState(job, JobState.FAILED);
    Map<TaskId, Task> tasks = job.getTasks();
    Assert.assertEquals("Num tasks is not correct", 1, tasks.size());
    Task task = tasks.values().iterator().next();
    Assert.assertEquals("Task state not correct", TaskState.FAILED, task.getReport().getTaskState());
    Map<TaskAttemptId, TaskAttempt> attempts = tasks.values().iterator().next().getAttempts();
    Assert.assertEquals("Num attempts is not correct", maxAttempts, attempts.size());
    for (TaskAttempt attempt : attempts.values()) {
        Assert.assertEquals("Attempt state not correct", TaskAttemptState.FAILED, attempt.getReport().getTaskAttemptState());
    }
}
Also used : Task(org.apache.hadoop.mapreduce.v2.app.job.Task) TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) Configuration(org.apache.hadoop.conf.Configuration) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) Test(org.junit.Test)

Example 40 with MRApp

use of org.apache.hadoop.mapreduce.v2.app.MRApp in project hadoop by apache.

the class TestFail method testReduceFailureMaxPercent.

@Test
public void testReduceFailureMaxPercent() throws Exception {
    MRApp app = new MockFirstFailingTaskMRApp(2, 4);
    Configuration conf = new Configuration();
    //reduce the no of attempts so test run faster
    conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, 1);
    conf.setInt(MRJobConfig.REDUCE_MAX_ATTEMPTS, 2);
    //no failure due to Map
    conf.setInt(MRJobConfig.MAP_FAILURES_MAX_PERCENT, 50);
    conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, 1);
    conf.setInt(MRJobConfig.REDUCE_FAILURES_MAXPERCENT, 20);
    conf.setInt(MRJobConfig.REDUCE_MAX_ATTEMPTS, 1);
    Job job = app.submit(conf);
    app.waitForState(job, JobState.FAILED);
    //setting the failure percentage to 25% (1/4 is 25) will
    //make the Job successful
    app = new MockFirstFailingTaskMRApp(2, 4);
    conf = new Configuration();
    //reduce the no of attempts so test run faster
    conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, 1);
    conf.setInt(MRJobConfig.REDUCE_MAX_ATTEMPTS, 2);
    //no failure due to Map
    conf.setInt(MRJobConfig.MAP_FAILURES_MAX_PERCENT, 50);
    conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, 1);
    conf.setInt(MRJobConfig.REDUCE_FAILURES_MAXPERCENT, 25);
    conf.setInt(MRJobConfig.REDUCE_MAX_ATTEMPTS, 1);
    job = app.submit(conf);
    app.waitForState(job, JobState.SUCCEEDED);
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) Test(org.junit.Test)

Aggregations

Test (org.junit.Test)61 Configuration (org.apache.hadoop.conf.Configuration)60 Job (org.apache.hadoop.mapreduce.v2.app.job.Job)57 Task (org.apache.hadoop.mapreduce.v2.app.job.Task)44 TaskAttempt (org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt)37 TaskAttemptEvent (org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent)26 MRApp (org.apache.hadoop.mapreduce.v2.app.MRApp)23 TaskId (org.apache.hadoop.mapreduce.v2.api.records.TaskId)15 JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)14 TaskAttemptId (org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId)14 JobEvent (org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent)8 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)8 IOException (java.io.IOException)6 HistoryFileInfo (org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.HistoryFileInfo)6 AppContext (org.apache.hadoop.mapreduce.v2.app.AppContext)5 CountDownLatch (java.util.concurrent.CountDownLatch)4 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)4 FileContext (org.apache.hadoop.fs.FileContext)4 Path (org.apache.hadoop.fs.Path)4 TaskAttemptCompletionEvent (org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent)4