Search in sources :

Example 11 with MRAppWithHistory

use of org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEvents.MRAppWithHistory in project hadoop by apache.

the class TestJobHistoryParsing method testJobHistoryMethods.

/**
   * Simple test some methods of JobHistory
   */
@Test(timeout = 20000)
public void testJobHistoryMethods() throws Exception {
    LOG.info("STARTING testJobHistoryMethods");
    try {
        Configuration configuration = new Configuration();
        configuration.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, MyResolver.class, DNSToSwitchMapping.class);
        RackResolver.init(configuration);
        MRApp app = new MRAppWithHistory(1, 1, true, this.getClass().getName(), true);
        app.submit(configuration);
        Job job = app.getContext().getAllJobs().values().iterator().next();
        JobId jobId = job.getID();
        LOG.info("JOBID is " + TypeConverter.fromYarn(jobId).toString());
        app.waitForState(job, JobState.SUCCEEDED);
        // make sure job history events are handled
        app.waitForState(Service.STATE.STOPPED);
        JobHistory jobHistory = new JobHistory();
        jobHistory.init(configuration);
        // Method getAllJobs
        Assert.assertEquals(1, jobHistory.getAllJobs().size());
        // and with ApplicationId
        Assert.assertEquals(1, jobHistory.getAllJobs(app.getAppID()).size());
        JobsInfo jobsinfo = jobHistory.getPartialJobs(0L, 10L, null, "default", 0L, System.currentTimeMillis() + 1, 0L, System.currentTimeMillis() + 1, JobState.SUCCEEDED);
        Assert.assertEquals(1, jobsinfo.getJobs().size());
        Assert.assertNotNull(jobHistory.getApplicationAttemptId());
        // test Application Id
        Assert.assertEquals("application_0_0000", jobHistory.getApplicationID().toString());
        Assert.assertEquals("Job History Server", jobHistory.getApplicationName());
        // method does not work
        Assert.assertNull(jobHistory.getEventHandler());
        // method does not work
        Assert.assertNull(jobHistory.getClock());
        // method does not work
        Assert.assertNull(jobHistory.getClusterInfo());
    } finally {
        LOG.info("FINISHED testJobHistoryMethods");
    }
}
Also used : MRAppWithHistory(org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEvents.MRAppWithHistory) Configuration(org.apache.hadoop.conf.Configuration) JobsInfo(org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobsInfo) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) MRApp(org.apache.hadoop.mapreduce.v2.app.MRApp) Test(org.junit.Test)

Example 12 with MRAppWithHistory

use of org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEvents.MRAppWithHistory in project hadoop by apache.

the class TestJobHistoryParsing method testDeleteFileInfo.

/**
   * Test clean old history files. Files should be deleted after 1 week by
   * default.
   */
@Test(timeout = 15000)
public void testDeleteFileInfo() throws Exception {
    LOG.info("STARTING testDeleteFileInfo");
    try {
        Configuration conf = new Configuration();
        conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, MyResolver.class, DNSToSwitchMapping.class);
        RackResolver.init(conf);
        MRApp app = new MRAppWithHistory(1, 1, true, this.getClass().getName(), true);
        app.submit(conf);
        Job job = app.getContext().getAllJobs().values().iterator().next();
        JobId jobId = job.getID();
        app.waitForState(job, JobState.SUCCEEDED);
        // make sure all events are flushed
        app.waitForState(Service.STATE.STOPPED);
        HistoryFileManager hfm = new HistoryFileManager();
        hfm.init(conf);
        HistoryFileInfo fileInfo = hfm.getFileInfo(jobId);
        hfm.initExisting();
        // directory
        while (fileInfo.isMovePending()) {
            Thread.sleep(300);
        }
        Assert.assertNotNull(hfm.jobListCache.values());
        // try to remove fileInfo
        hfm.clean();
        // check that fileInfo does not deleted
        Assert.assertFalse(fileInfo.isDeleted());
        // correct live time
        hfm.setMaxHistoryAge(-1);
        hfm.clean();
        hfm.stop();
        Assert.assertTrue("Thread pool shutdown", hfm.moveToDoneExecutor.isTerminated());
        // should be deleted !
        Assert.assertTrue("file should be deleted ", fileInfo.isDeleted());
    } finally {
        LOG.info("FINISHED testDeleteFileInfo");
    }
}
Also used : MRAppWithHistory(org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEvents.MRAppWithHistory) HistoryFileInfo(org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.HistoryFileInfo) Configuration(org.apache.hadoop.conf.Configuration) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) MRApp(org.apache.hadoop.mapreduce.v2.app.MRApp) Test(org.junit.Test)

Example 13 with MRAppWithHistory

use of org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEvents.MRAppWithHistory in project hadoop by apache.

the class TestAMInfos method testAMInfosWithoutRecoveryEnabled.

@Test
public void testAMInfosWithoutRecoveryEnabled() throws Exception {
    int runCount = 0;
    MRApp app = new MRAppWithHistory(1, 0, false, this.getClass().getName(), true, ++runCount);
    Configuration conf = new Configuration();
    conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
    Job job = app.submit(conf);
    app.waitForState(job, JobState.RUNNING);
    long am1StartTime = app.getAllAMInfos().get(0).getStartTime();
    Assert.assertEquals("No of tasks not correct", 1, job.getTasks().size());
    Iterator<Task> it = job.getTasks().values().iterator();
    Task mapTask = it.next();
    app.waitForState(mapTask, TaskState.RUNNING);
    TaskAttempt taskAttempt = mapTask.getAttempts().values().iterator().next();
    app.waitForState(taskAttempt, TaskAttemptState.RUNNING);
    // stop the app
    app.stop();
    // rerun
    app = new MRAppWithHistory(1, 0, false, this.getClass().getName(), false, ++runCount);
    conf = new Configuration();
    // in rerun the AMInfo will be recovered from previous run even if recovery
    // is not enabled.
    conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE, false);
    conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
    job = app.submit(conf);
    app.waitForState(job, JobState.RUNNING);
    Assert.assertEquals("No of tasks not correct", 1, job.getTasks().size());
    it = job.getTasks().values().iterator();
    mapTask = it.next();
    // There should be two AMInfos
    List<AMInfo> amInfos = app.getAllAMInfos();
    Assert.assertEquals(2, amInfos.size());
    AMInfo amInfoOne = amInfos.get(0);
    Assert.assertEquals(am1StartTime, amInfoOne.getStartTime());
    app.stop();
}
Also used : AMInfo(org.apache.hadoop.mapreduce.v2.api.records.AMInfo) MRAppWithHistory(org.apache.hadoop.mapreduce.v2.app.TestRecovery.MRAppWithHistory) Task(org.apache.hadoop.mapreduce.v2.app.job.Task) Configuration(org.apache.hadoop.conf.Configuration) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) Test(org.junit.Test)

Example 14 with MRAppWithHistory

use of org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEvents.MRAppWithHistory in project hadoop by apache.

the class TestRecovery method testRecoveryWithOldCommiter.

@Test
public void testRecoveryWithOldCommiter() throws Exception {
    int runCount = 0;
    MRApp app = new MRAppWithHistory(1, 2, false, this.getClass().getName(), true, ++runCount);
    Configuration conf = new Configuration();
    conf.setBoolean("mapred.mapper.new-api", false);
    conf.setBoolean("mapred.reducer.new-api", false);
    conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
    conf.set(FileOutputFormat.OUTDIR, outputDir.toString());
    Job job = app.submit(conf);
    app.waitForState(job, JobState.RUNNING);
    Assert.assertEquals("No of tasks not correct", 3, job.getTasks().size());
    Iterator<Task> it = job.getTasks().values().iterator();
    Task mapTask1 = it.next();
    Task reduceTask1 = it.next();
    // all maps must be running
    app.waitForState(mapTask1, TaskState.RUNNING);
    TaskAttempt task1Attempt1 = mapTask1.getAttempts().values().iterator().next();
    //before sending the TA_DONE, event make sure attempt has come to 
    //RUNNING state
    app.waitForState(task1Attempt1, TaskAttemptState.RUNNING);
    //send the done signal to the map
    app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt1.getID(), TaskAttemptEventType.TA_DONE));
    //wait for map task to complete
    app.waitForState(mapTask1, TaskState.SUCCEEDED);
    // Verify the shuffle-port
    Assert.assertEquals(5467, task1Attempt1.getShufflePort());
    app.waitForState(reduceTask1, TaskState.RUNNING);
    TaskAttempt reduce1Attempt1 = reduceTask1.getAttempts().values().iterator().next();
    // write output corresponding to reduce1
    writeOutput(reduce1Attempt1, conf);
    //send the done signal to the 1st reduce
    app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduce1Attempt1.getID(), TaskAttemptEventType.TA_DONE));
    //wait for first reduce task to complete
    app.waitForState(reduceTask1, TaskState.SUCCEEDED);
    //stop the app before the job completes.
    app.stop();
    //rerun
    //in rerun the map will be recovered from previous run
    app = new MRAppWithHistory(1, 2, false, this.getClass().getName(), false, ++runCount);
    conf = new Configuration();
    conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE, true);
    conf.setBoolean("mapred.mapper.new-api", false);
    conf.setBoolean("mapred.reducer.new-api", false);
    conf.set(FileOutputFormat.OUTDIR, outputDir.toString());
    conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
    job = app.submit(conf);
    app.waitForState(job, JobState.RUNNING);
    Assert.assertEquals("No of tasks not correct", 3, job.getTasks().size());
    it = job.getTasks().values().iterator();
    mapTask1 = it.next();
    reduceTask1 = it.next();
    Task reduceTask2 = it.next();
    // map will be recovered, no need to send done
    app.waitForState(mapTask1, TaskState.SUCCEEDED);
    // Verify the shuffle-port after recovery
    task1Attempt1 = mapTask1.getAttempts().values().iterator().next();
    Assert.assertEquals(5467, task1Attempt1.getShufflePort());
    // first reduce will be recovered, no need to send done
    app.waitForState(reduceTask1, TaskState.SUCCEEDED);
    app.waitForState(reduceTask2, TaskState.RUNNING);
    TaskAttempt reduce2Attempt = reduceTask2.getAttempts().values().iterator().next();
    //before sending the TA_DONE, event make sure attempt has come to 
    //RUNNING state
    app.waitForState(reduce2Attempt, TaskAttemptState.RUNNING);
    //send the done signal to the 2nd reduce task
    app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduce2Attempt.getID(), TaskAttemptEventType.TA_DONE));
    //wait to get it completed
    app.waitForState(reduceTask2, TaskState.SUCCEEDED);
    app.waitForState(job, JobState.SUCCEEDED);
    app.verifyCompleted();
    validateOutput();
}
Also used : Task(org.apache.hadoop.mapreduce.v2.app.job.Task) Configuration(org.apache.hadoop.conf.Configuration) TaskAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) Test(org.junit.Test)

Example 15 with MRAppWithHistory

use of org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEvents.MRAppWithHistory in project hadoop by apache.

the class TestRecovery method testRecoveryWithSpillEncryption.

@Test
public void testRecoveryWithSpillEncryption() throws Exception {
    int runCount = 0;
    MRApp app = new MRAppWithHistory(1, 1, false, this.getClass().getName(), true, ++runCount) {
    };
    Configuration conf = new Configuration();
    conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE, true);
    conf.setBoolean("mapred.mapper.new-api", true);
    conf.setBoolean("mapred.reducer.new-api", true);
    conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
    conf.set(FileOutputFormat.OUTDIR, outputDir.toString());
    conf.setBoolean(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA, true);
    // run the MR job at the first attempt
    Job jobAttempt1 = app.submit(conf);
    app.waitForState(jobAttempt1, JobState.RUNNING);
    Iterator<Task> tasks = jobAttempt1.getTasks().values().iterator();
    // finish the map task but the reduce task
    Task mapper = tasks.next();
    app.waitForState(mapper, TaskState.RUNNING);
    TaskAttempt mapAttempt = mapper.getAttempts().values().iterator().next();
    app.waitForState(mapAttempt, TaskAttemptState.RUNNING);
    app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapAttempt.getID(), TaskAttemptEventType.TA_DONE));
    app.waitForState(mapper, TaskState.SUCCEEDED);
    // crash the first attempt of the MR job
    app.stop();
    // run the MR job again at the second attempt
    app = new MRAppWithHistory(1, 1, false, this.getClass().getName(), false, ++runCount);
    Job jobAttempt2 = app.submit(conf);
    Assert.assertTrue("Recovery from previous job attempt is processed even " + "though intermediate data encryption is enabled.", !app.recovered());
    // The map task succeeded from previous job attempt will not be recovered
    // because the data spill encryption is enabled.
    // Let's finish the job at the second attempt and verify its completion.
    app.waitForState(jobAttempt2, JobState.RUNNING);
    tasks = jobAttempt2.getTasks().values().iterator();
    mapper = tasks.next();
    Task reducer = tasks.next();
    // finish the map task first
    app.waitForState(mapper, TaskState.RUNNING);
    mapAttempt = mapper.getAttempts().values().iterator().next();
    app.waitForState(mapAttempt, TaskAttemptState.RUNNING);
    app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapAttempt.getID(), TaskAttemptEventType.TA_DONE));
    app.waitForState(mapper, TaskState.SUCCEEDED);
    // then finish the reduce task
    TaskAttempt redAttempt = reducer.getAttempts().values().iterator().next();
    app.waitForState(redAttempt, TaskAttemptState.RUNNING);
    app.getContext().getEventHandler().handle(new TaskAttemptEvent(redAttempt.getID(), TaskAttemptEventType.TA_DONE));
    app.waitForState(reducer, TaskState.SUCCEEDED);
    // verify that the job succeeds at the 2rd attempt
    app.waitForState(jobAttempt2, JobState.SUCCEEDED);
}
Also used : Task(org.apache.hadoop.mapreduce.v2.app.job.Task) Configuration(org.apache.hadoop.conf.Configuration) TaskAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) Test(org.junit.Test)

Aggregations

Configuration (org.apache.hadoop.conf.Configuration)20 Job (org.apache.hadoop.mapreduce.v2.app.job.Job)20 Test (org.junit.Test)19 Task (org.apache.hadoop.mapreduce.v2.app.job.Task)16 TaskAttempt (org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt)15 TaskAttemptEvent (org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent)12 JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)7 MRApp (org.apache.hadoop.mapreduce.v2.app.MRApp)7 MRAppWithHistory (org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEvents.MRAppWithHistory)5 AMInfo (org.apache.hadoop.mapreduce.v2.api.records.AMInfo)3 HistoryFileInfo (org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.HistoryFileInfo)3 IOException (java.io.IOException)2 TaskAttemptCompletionEvent (org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent)2 TaskId (org.apache.hadoop.mapreduce.v2.api.records.TaskId)2 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)2 PrintStream (java.io.PrintStream)1 ArrayList (java.util.ArrayList)1 HashMap (java.util.HashMap)1 StringTokenizer (java.util.StringTokenizer)1 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)1