Search in sources :

Example 71 with JobId

use of org.apache.hadoop.mapreduce.v2.api.records.JobId in project hadoop by apache.

the class TestKill method testKillTaskWaitKillJobBeforeTA_DONE.

@Test
public void testKillTaskWaitKillJobBeforeTA_DONE() throws Exception {
    CountDownLatch latch = new CountDownLatch(1);
    final Dispatcher dispatcher = new MyAsyncDispatch(latch, JobEventType.JOB_KILL);
    MRApp app = new MRApp(1, 1, false, this.getClass().getName(), true) {

        @Override
        public Dispatcher createDispatcher() {
            return dispatcher;
        }
    };
    Job job = app.submit(new Configuration());
    JobId jobId = app.getJobId();
    app.waitForState(job, JobState.RUNNING);
    Assert.assertEquals("Num tasks not correct", 2, job.getTasks().size());
    Iterator<Task> it = job.getTasks().values().iterator();
    Task mapTask = it.next();
    Task reduceTask = it.next();
    app.waitForState(mapTask, TaskState.RUNNING);
    app.waitForState(reduceTask, TaskState.RUNNING);
    TaskAttempt mapAttempt = mapTask.getAttempts().values().iterator().next();
    app.waitForState(mapAttempt, TaskAttemptState.RUNNING);
    TaskAttempt reduceAttempt = reduceTask.getAttempts().values().iterator().next();
    app.waitForState(reduceAttempt, TaskAttemptState.RUNNING);
    // The order in the dispatch event queue, from first to last
    // JobEventType.JOB_KILL
    // TA_DONE
    // TaskEventType.T_KILL ( from JobEventType.JOB_KILL handling )
    // TaskAttemptEventType.TA_CONTAINER_COMPLETED ( from TA_DONE handling )
    // TaskAttemptEventType.TA_KILL ( from TaskEventType.T_KILL handling )
    // TaskEventType.T_ATTEMPT_SUCCEEDED ( from TA_CONTAINER_COMPLETED handling )
    // TaskEventType.T_ATTEMPT_KILLED ( from TA_KILL handling )
    // Now kill the job
    app.getContext().getEventHandler().handle(new JobEvent(jobId, JobEventType.JOB_KILL));
    // Finish map
    app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapAttempt.getID(), TaskAttemptEventType.TA_DONE));
    //unblock
    latch.countDown();
    app.waitForInternalState((JobImpl) job, JobStateInternal.KILLED);
}
Also used : Task(org.apache.hadoop.mapreduce.v2.app.job.Task) Configuration(org.apache.hadoop.conf.Configuration) JobEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent) TaskAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt) CountDownLatch(java.util.concurrent.CountDownLatch) Dispatcher(org.apache.hadoop.yarn.event.Dispatcher) AsyncDispatcher(org.apache.hadoop.yarn.event.AsyncDispatcher) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) Test(org.junit.Test)

Example 72 with JobId

use of org.apache.hadoop.mapreduce.v2.api.records.JobId in project hadoop by apache.

the class MRAppMasterTestLaunchTime method testMRAppMasterSuccessLock.

@Test
public void testMRAppMasterSuccessLock() throws IOException, InterruptedException {
    String applicationAttemptIdStr = "appattempt_1317529182569_0004_000002";
    String containerIdStr = "container_1317529182569_0004_000002_1";
    String userName = "TestAppMasterUser";
    JobConf conf = new JobConf();
    conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
    ApplicationAttemptId applicationAttemptId = ApplicationAttemptId.fromString(applicationAttemptIdStr);
    JobId jobId = TypeConverter.toYarn(TypeConverter.fromYarn(applicationAttemptId.getApplicationId()));
    Path start = MRApps.getStartJobCommitFile(conf, userName, jobId);
    Path end = MRApps.getEndJobCommitSuccessFile(conf, userName, jobId);
    FileSystem fs = FileSystem.get(conf);
    fs.create(start).close();
    fs.create(end).close();
    ContainerId containerId = ContainerId.fromString(containerIdStr);
    MRAppMaster appMaster = new MRAppMasterTest(applicationAttemptId, containerId, "host", -1, -1, System.currentTimeMillis(), false, false);
    boolean caught = false;
    try {
        MRAppMaster.initAndStartAppMaster(appMaster, conf, userName);
    } catch (IOException e) {
        //The IO Exception is expected
        LOG.info("Caught expected Exception", e);
        caught = true;
    }
    assertTrue(caught);
    assertTrue(appMaster.errorHappenedShutDown);
    assertEquals(JobStateInternal.SUCCEEDED, appMaster.forcedState);
    appMaster.stop();
    // verify the final status is SUCCEEDED
    verifyFailedStatus((MRAppMasterTest) appMaster, "SUCCEEDED");
}
Also used : Path(org.apache.hadoop.fs.Path) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) FileSystem(org.apache.hadoop.fs.FileSystem) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) IOException(java.io.IOException) JobConf(org.apache.hadoop.mapred.JobConf) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) Test(org.junit.Test)

Example 73 with JobId

use of org.apache.hadoop.mapreduce.v2.api.records.JobId in project hadoop by apache.

the class MRAppMasterTestLaunchTime method testMRAppMasterFailLock.

@Test
public void testMRAppMasterFailLock() throws IOException, InterruptedException {
    String applicationAttemptIdStr = "appattempt_1317529182569_0004_000002";
    String containerIdStr = "container_1317529182569_0004_000002_1";
    String userName = "TestAppMasterUser";
    JobConf conf = new JobConf();
    conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
    ApplicationAttemptId applicationAttemptId = ApplicationAttemptId.fromString(applicationAttemptIdStr);
    JobId jobId = TypeConverter.toYarn(TypeConverter.fromYarn(applicationAttemptId.getApplicationId()));
    Path start = MRApps.getStartJobCommitFile(conf, userName, jobId);
    Path end = MRApps.getEndJobCommitFailureFile(conf, userName, jobId);
    FileSystem fs = FileSystem.get(conf);
    fs.create(start).close();
    fs.create(end).close();
    ContainerId containerId = ContainerId.fromString(containerIdStr);
    MRAppMaster appMaster = new MRAppMasterTest(applicationAttemptId, containerId, "host", -1, -1, System.currentTimeMillis(), false, false);
    boolean caught = false;
    try {
        MRAppMaster.initAndStartAppMaster(appMaster, conf, userName);
    } catch (IOException e) {
        //The IO Exception is expected
        LOG.info("Caught expected Exception", e);
        caught = true;
    }
    assertTrue(caught);
    assertTrue(appMaster.errorHappenedShutDown);
    assertEquals(JobStateInternal.FAILED, appMaster.forcedState);
    appMaster.stop();
    // verify the final status is FAILED
    verifyFailedStatus((MRAppMasterTest) appMaster, "FAILED");
}
Also used : Path(org.apache.hadoop.fs.Path) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) FileSystem(org.apache.hadoop.fs.FileSystem) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) IOException(java.io.IOException) JobConf(org.apache.hadoop.mapred.JobConf) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) Test(org.junit.Test)

Example 74 with JobId

use of org.apache.hadoop.mapreduce.v2.api.records.JobId in project hadoop by apache.

the class MRAppMasterTestLaunchTime method testMRAppMasterJobLaunchTime.

@Test
public void testMRAppMasterJobLaunchTime() throws IOException, InterruptedException {
    String applicationAttemptIdStr = "appattempt_1317529182569_0004_000002";
    String containerIdStr = "container_1317529182569_0004_000002_1";
    String userName = "TestAppMasterUser";
    JobConf conf = new JobConf();
    conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
    conf.setInt(MRJobConfig.NUM_REDUCES, 0);
    conf.set(JHAdminConfig.MR_HS_JHIST_FORMAT, "json");
    ApplicationAttemptId applicationAttemptId = ApplicationAttemptId.fromString(applicationAttemptIdStr);
    JobId jobId = TypeConverter.toYarn(TypeConverter.fromYarn(applicationAttemptId.getApplicationId()));
    File dir = new File(MRApps.getStagingAreaDir(conf, userName).toString(), jobId.toString());
    dir.mkdirs();
    File historyFile = new File(JobHistoryUtils.getStagingJobHistoryFile(new Path(dir.toURI().toString()), jobId, (applicationAttemptId.getAttemptId() - 1)).toUri().getRawPath());
    historyFile.createNewFile();
    FSDataOutputStream out = new FSDataOutputStream(new FileOutputStream(historyFile), null);
    EventWriter writer = new EventWriter(out, EventWriter.WriteMode.JSON);
    writer.close();
    FileSystem fs = FileSystem.get(conf);
    JobSplitWriter.createSplitFiles(new Path(dir.getAbsolutePath()), conf, fs, new org.apache.hadoop.mapred.InputSplit[0]);
    ContainerId containerId = ContainerId.fromString(containerIdStr);
    MRAppMasterTestLaunchTime appMaster = new MRAppMasterTestLaunchTime(applicationAttemptId, containerId, "host", -1, -1, System.currentTimeMillis());
    MRAppMaster.initAndStartAppMaster(appMaster, conf, userName);
    appMaster.stop();
    assertTrue("Job launch time should not be negative.", appMaster.jobLaunchTime.get() >= 0);
}
Also used : Path(org.apache.hadoop.fs.Path) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) EventWriter(org.apache.hadoop.mapreduce.jobhistory.EventWriter) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) FileOutputStream(java.io.FileOutputStream) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) JobConf(org.apache.hadoop.mapred.JobConf) File(java.io.File) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) Test(org.junit.Test)

Example 75 with JobId

use of org.apache.hadoop.mapreduce.v2.api.records.JobId in project hadoop by apache.

the class TestLocalContainerLauncher method testRenameMapOutputForReduce.

@Test
public void testRenameMapOutputForReduce() throws Exception {
    final JobConf conf = new JobConf();
    final MROutputFiles mrOutputFiles = new MROutputFiles();
    mrOutputFiles.setConf(conf);
    // make sure both dirs are distinct
    //
    conf.set(MRConfig.LOCAL_DIR, localDirs[0].toString());
    final Path mapOut = mrOutputFiles.getOutputFileForWrite(1);
    conf.set(MRConfig.LOCAL_DIR, localDirs[1].toString());
    final Path mapOutIdx = mrOutputFiles.getOutputIndexFileForWrite(1);
    Assert.assertNotEquals("Paths must be different!", mapOut.getParent(), mapOutIdx.getParent());
    // make both dirs part of LOCAL_DIR
    conf.setStrings(MRConfig.LOCAL_DIR, localDirs);
    final FileContext lfc = FileContext.getLocalFSFileContext(conf);
    lfc.create(mapOut, EnumSet.of(CREATE)).close();
    lfc.create(mapOutIdx, EnumSet.of(CREATE)).close();
    final JobId jobId = MRBuilderUtils.newJobId(12345L, 1, 2);
    final TaskId tid = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP);
    final TaskAttemptId taid = MRBuilderUtils.newTaskAttemptId(tid, 0);
    LocalContainerLauncher.renameMapOutputForReduce(conf, taid, mrOutputFiles);
}
Also used : Path(org.apache.hadoop.fs.Path) TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) FileContext(org.apache.hadoop.fs.FileContext) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) Test(org.junit.Test)

Aggregations

JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)276 Test (org.junit.Test)238 Job (org.apache.hadoop.mapreduce.v2.app.job.Job)212 ClientResponse (com.sun.jersey.api.client.ClientResponse)103 WebResource (com.sun.jersey.api.client.WebResource)103 Task (org.apache.hadoop.mapreduce.v2.app.job.Task)88 JSONObject (org.codehaus.jettison.json.JSONObject)81 Configuration (org.apache.hadoop.conf.Configuration)77 TaskId (org.apache.hadoop.mapreduce.v2.api.records.TaskId)61 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)55 Path (org.apache.hadoop.fs.Path)52 TaskAttemptId (org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId)51 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)47 TaskAttempt (org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt)41 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)36 AppContext (org.apache.hadoop.mapreduce.v2.app.AppContext)35 IOException (java.io.IOException)32 JobConf (org.apache.hadoop.mapred.JobConf)28 HistoryFileInfo (org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.HistoryFileInfo)25 JobID (org.apache.hadoop.mapreduce.JobID)23