Search in sources :

Example 11 with CommitterEventHandler

use of org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventHandler in project hadoop by apache.

the class TestCommitterEventHandler method testFailure.

@Test
public void testFailure() throws Exception {
    AppContext mockContext = mock(AppContext.class);
    OutputCommitter mockCommitter = mock(OutputCommitter.class);
    Clock mockClock = mock(Clock.class);
    CommitterEventHandler handler = new CommitterEventHandler(mockContext, mockCommitter, new TestingRMHeartbeatHandler());
    YarnConfiguration conf = new YarnConfiguration();
    conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
    JobContext mockJobContext = mock(JobContext.class);
    ApplicationAttemptId attemptid = ApplicationAttemptId.fromString("appattempt_1234567890000_0001_0");
    JobId jobId = TypeConverter.toYarn(TypeConverter.fromYarn(attemptid.getApplicationId()));
    WaitForItHandler waitForItHandler = new WaitForItHandler();
    when(mockContext.getApplicationID()).thenReturn(attemptid.getApplicationId());
    when(mockContext.getApplicationAttemptId()).thenReturn(attemptid);
    when(mockContext.getEventHandler()).thenReturn(waitForItHandler);
    when(mockContext.getClock()).thenReturn(mockClock);
    doThrow(new YarnRuntimeException("Intentional Failure")).when(mockCommitter).commitJob(any(JobContext.class));
    handler.init(conf);
    handler.start();
    try {
        handler.handle(new CommitterJobCommitEvent(jobId, mockJobContext));
        String user = UserGroupInformation.getCurrentUser().getShortUserName();
        Path startCommitFile = MRApps.getStartJobCommitFile(conf, user, jobId);
        Path endCommitSuccessFile = MRApps.getEndJobCommitSuccessFile(conf, user, jobId);
        Path endCommitFailureFile = MRApps.getEndJobCommitFailureFile(conf, user, jobId);
        Event e = waitForItHandler.getAndClearEvent();
        assertNotNull(e);
        assertTrue(e instanceof JobCommitFailedEvent);
        FileSystem fs = FileSystem.get(conf);
        assertTrue(fs.exists(startCommitFile));
        assertFalse(fs.exists(endCommitSuccessFile));
        assertTrue(fs.exists(endCommitFailureFile));
        verify(mockCommitter).commitJob(any(JobContext.class));
    } finally {
        handler.stop();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) OutputCommitter(org.apache.hadoop.mapreduce.OutputCommitter) AppContext(org.apache.hadoop.mapreduce.v2.app.AppContext) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) Clock(org.apache.hadoop.yarn.util.Clock) SystemClock(org.apache.hadoop.yarn.util.SystemClock) YarnRuntimeException(org.apache.hadoop.yarn.exceptions.YarnRuntimeException) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) FileSystem(org.apache.hadoop.fs.FileSystem) JobEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent) Event(org.apache.hadoop.yarn.event.Event) JobCommitFailedEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobCommitFailedEvent) JobCommitCompletedEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobCommitCompletedEvent) JobContext(org.apache.hadoop.mapreduce.JobContext) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) JobCommitFailedEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobCommitFailedEvent) Test(org.junit.Test)

Example 12 with CommitterEventHandler

use of org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventHandler in project hadoop by apache.

the class TestJobImpl method testKilledDuringSetup.

@Test(timeout = 20000)
public void testKilledDuringSetup() throws Exception {
    Configuration conf = new Configuration();
    conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
    AsyncDispatcher dispatcher = new AsyncDispatcher();
    dispatcher.init(conf);
    dispatcher.start();
    OutputCommitter committer = new StubbedOutputCommitter() {

        @Override
        public synchronized void setupJob(JobContext jobContext) throws IOException {
            while (!Thread.interrupted()) {
                try {
                    wait();
                } catch (InterruptedException e) {
                }
            }
        }
    };
    CommitterEventHandler commitHandler = createCommitterEventHandler(dispatcher, committer);
    commitHandler.init(conf);
    commitHandler.start();
    JobImpl job = createStubbedJob(conf, dispatcher, 2, null);
    JobId jobId = job.getID();
    job.handle(new JobEvent(jobId, JobEventType.JOB_INIT));
    assertJobState(job, JobStateInternal.INITED);
    job.handle(new JobStartEvent(jobId));
    assertJobState(job, JobStateInternal.SETUP);
    job.handle(new JobEvent(job.getID(), JobEventType.JOB_KILL));
    assertJobState(job, JobStateInternal.KILLED);
    dispatcher.stop();
    commitHandler.stop();
}
Also used : OutputCommitter(org.apache.hadoop.mapreduce.OutputCommitter) Configuration(org.apache.hadoop.conf.Configuration) AsyncDispatcher(org.apache.hadoop.yarn.event.AsyncDispatcher) JobEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent) JobStartEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobStartEvent) CommitterEventHandler(org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventHandler) JobContext(org.apache.hadoop.mapreduce.JobContext) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) Test(org.junit.Test)

Example 13 with CommitterEventHandler

use of org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventHandler in project hadoop by apache.

the class TestJobImpl method testFailAbortDoesntHang.

@Test(timeout = 10000)
public void testFailAbortDoesntHang() throws IOException {
    Configuration conf = new Configuration();
    conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
    conf.set(MRJobConfig.MR_AM_COMMITTER_CANCEL_TIMEOUT_MS, "1000");
    DrainDispatcher dispatcher = new DrainDispatcher();
    dispatcher.init(conf);
    dispatcher.start();
    OutputCommitter committer = Mockito.mock(OutputCommitter.class);
    CommitterEventHandler commitHandler = createCommitterEventHandler(dispatcher, committer);
    commitHandler.init(conf);
    commitHandler.start();
    //Job has only 1 mapper task. No reducers
    conf.setInt(MRJobConfig.NUM_REDUCES, 0);
    conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, 1);
    JobImpl job = createRunningStubbedJob(conf, dispatcher, 1, null);
    //FAIL_ABORT state
    for (Task t : job.tasks.values()) {
        TaskImpl task = (TaskImpl) t;
        task.handle(new TaskEvent(task.getID(), TaskEventType.T_SCHEDULE));
        for (TaskAttempt ta : task.getAttempts().values()) {
            task.handle(new TaskTAttemptEvent(ta.getID(), TaskEventType.T_ATTEMPT_FAILED));
        }
    }
    dispatcher.await();
    //Verify abortJob is called once and the job failed
    Mockito.verify(committer, Mockito.timeout(2000).times(1)).abortJob((JobContext) Mockito.any(), (State) Mockito.any());
    assertJobState(job, JobStateInternal.FAILED);
    dispatcher.stop();
}
Also used : DrainDispatcher(org.apache.hadoop.yarn.event.DrainDispatcher) OutputCommitter(org.apache.hadoop.mapreduce.OutputCommitter) Task(org.apache.hadoop.mapreduce.v2.app.job.Task) Configuration(org.apache.hadoop.conf.Configuration) JobTaskEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskEvent) TaskEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent) CommitterEventHandler(org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventHandler) TaskTAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskTAttemptEvent) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt) Test(org.junit.Test)

Example 14 with CommitterEventHandler

use of org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventHandler in project hadoop by apache.

the class TestJobImpl method testRebootedDuringCommit.

@Test(timeout = 20000)
public void testRebootedDuringCommit() throws Exception {
    Configuration conf = new Configuration();
    conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
    conf.setInt(MRJobConfig.MR_AM_MAX_ATTEMPTS, 2);
    AsyncDispatcher dispatcher = new AsyncDispatcher();
    dispatcher.init(conf);
    dispatcher.start();
    CyclicBarrier syncBarrier = new CyclicBarrier(2);
    OutputCommitter committer = new WaitingOutputCommitter(syncBarrier, true);
    CommitterEventHandler commitHandler = createCommitterEventHandler(dispatcher, committer);
    commitHandler.init(conf);
    commitHandler.start();
    AppContext mockContext = mock(AppContext.class);
    when(mockContext.isLastAMRetry()).thenReturn(true);
    when(mockContext.hasSuccessfullyUnregistered()).thenReturn(false);
    JobImpl job = createRunningStubbedJob(conf, dispatcher, 2, mockContext);
    completeJobTasks(job);
    assertJobState(job, JobStateInternal.COMMITTING);
    syncBarrier.await();
    job.handle(new JobEvent(job.getID(), JobEventType.JOB_AM_REBOOT));
    assertJobState(job, JobStateInternal.REBOOT);
    // return the external state as ERROR since this is last retry.
    Assert.assertEquals(JobState.RUNNING, job.getState());
    when(mockContext.hasSuccessfullyUnregistered()).thenReturn(true);
    Assert.assertEquals(JobState.ERROR, job.getState());
    dispatcher.stop();
    commitHandler.stop();
}
Also used : OutputCommitter(org.apache.hadoop.mapreduce.OutputCommitter) Configuration(org.apache.hadoop.conf.Configuration) AsyncDispatcher(org.apache.hadoop.yarn.event.AsyncDispatcher) JobEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent) AppContext(org.apache.hadoop.mapreduce.v2.app.AppContext) CommitterEventHandler(org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventHandler) CyclicBarrier(java.util.concurrent.CyclicBarrier) Test(org.junit.Test)

Example 15 with CommitterEventHandler

use of org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventHandler in project hadoop by apache.

the class TestJobImpl method testAbortJobCalledAfterKillingTasks.

@Test
public void testAbortJobCalledAfterKillingTasks() throws IOException {
    Configuration conf = new Configuration();
    conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
    conf.set(MRJobConfig.MR_AM_COMMITTER_CANCEL_TIMEOUT_MS, "1000");
    InlineDispatcher dispatcher = new InlineDispatcher();
    dispatcher.init(conf);
    dispatcher.start();
    OutputCommitter committer = Mockito.mock(OutputCommitter.class);
    CommitterEventHandler commitHandler = createCommitterEventHandler(dispatcher, committer);
    commitHandler.init(conf);
    commitHandler.start();
    JobImpl job = createRunningStubbedJob(conf, dispatcher, 2, null);
    //Fail one task. This should land the JobImpl in the FAIL_WAIT state
    job.handle(new JobTaskEvent(MRBuilderUtils.newTaskId(job.getID(), 1, TaskType.MAP), TaskState.FAILED));
    //Verify abort job hasn't been called
    Mockito.verify(committer, Mockito.never()).abortJob((JobContext) Mockito.any(), (State) Mockito.any());
    assertJobState(job, JobStateInternal.FAIL_WAIT);
    //Verify abortJob is called once and the job failed
    Mockito.verify(committer, Mockito.timeout(2000).times(1)).abortJob((JobContext) Mockito.any(), (State) Mockito.any());
    assertJobState(job, JobStateInternal.FAILED);
    dispatcher.stop();
}
Also used : OutputCommitter(org.apache.hadoop.mapreduce.OutputCommitter) Configuration(org.apache.hadoop.conf.Configuration) JobTaskEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskEvent) InlineDispatcher(org.apache.hadoop.yarn.event.InlineDispatcher) CommitterEventHandler(org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventHandler) Test(org.junit.Test)

Aggregations

OutputCommitter (org.apache.hadoop.mapreduce.OutputCommitter)16 Test (org.junit.Test)16 Configuration (org.apache.hadoop.conf.Configuration)15 CommitterEventHandler (org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventHandler)14 JobEvent (org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent)11 AsyncDispatcher (org.apache.hadoop.yarn.event.AsyncDispatcher)11 JobContext (org.apache.hadoop.mapreduce.JobContext)8 JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)7 AppContext (org.apache.hadoop.mapreduce.v2.app.AppContext)7 JobStartEvent (org.apache.hadoop.mapreduce.v2.app.job.event.JobStartEvent)6 CyclicBarrier (java.util.concurrent.CyclicBarrier)5 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)4 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)4 SystemClock (org.apache.hadoop.yarn.util.SystemClock)4 IOException (java.io.IOException)3 FileSystem (org.apache.hadoop.fs.FileSystem)3 Path (org.apache.hadoop.fs.Path)3 JobTaskEvent (org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskEvent)3 State (org.apache.hadoop.mapreduce.JobStatus.State)2 JobState (org.apache.hadoop.mapreduce.v2.api.records.JobState)2