Search in sources :

Example 11 with TaskEvent

use of org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent in project hadoop by apache.

the class TestJobImpl method testFailAbortDoesntHang.

@Test(timeout = 10000)
public void testFailAbortDoesntHang() throws IOException {
    Configuration conf = new Configuration();
    conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
    conf.set(MRJobConfig.MR_AM_COMMITTER_CANCEL_TIMEOUT_MS, "1000");
    DrainDispatcher dispatcher = new DrainDispatcher();
    dispatcher.init(conf);
    dispatcher.start();
    OutputCommitter committer = Mockito.mock(OutputCommitter.class);
    CommitterEventHandler commitHandler = createCommitterEventHandler(dispatcher, committer);
    commitHandler.init(conf);
    commitHandler.start();
    //Job has only 1 mapper task. No reducers
    conf.setInt(MRJobConfig.NUM_REDUCES, 0);
    conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, 1);
    JobImpl job = createRunningStubbedJob(conf, dispatcher, 1, null);
    //FAIL_ABORT state
    for (Task t : job.tasks.values()) {
        TaskImpl task = (TaskImpl) t;
        task.handle(new TaskEvent(task.getID(), TaskEventType.T_SCHEDULE));
        for (TaskAttempt ta : task.getAttempts().values()) {
            task.handle(new TaskTAttemptEvent(ta.getID(), TaskEventType.T_ATTEMPT_FAILED));
        }
    }
    dispatcher.await();
    //Verify abortJob is called once and the job failed
    Mockito.verify(committer, Mockito.timeout(2000).times(1)).abortJob((JobContext) Mockito.any(), (State) Mockito.any());
    assertJobState(job, JobStateInternal.FAILED);
    dispatcher.stop();
}
Also used : DrainDispatcher(org.apache.hadoop.yarn.event.DrainDispatcher) OutputCommitter(org.apache.hadoop.mapreduce.OutputCommitter) Task(org.apache.hadoop.mapreduce.v2.app.job.Task) Configuration(org.apache.hadoop.conf.Configuration) JobTaskEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskEvent) TaskEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent) CommitterEventHandler(org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventHandler) TaskTAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskTAttemptEvent) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt) Test(org.junit.Test)

Example 12 with TaskEvent

use of org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent in project hadoop by apache.

the class TestKill method testKillTask.

@Test
public void testKillTask() throws Exception {
    final CountDownLatch latch = new CountDownLatch(1);
    MRApp app = new BlockingMRApp(2, 0, latch);
    //this will start the job but job won't complete as Task is blocked
    Job job = app.submit(new Configuration());
    //wait and vailidate for Job to become RUNNING
    app.waitForInternalState((JobImpl) job, JobStateInternal.RUNNING);
    Map<TaskId, Task> tasks = job.getTasks();
    Assert.assertEquals("No of tasks is not correct", 2, tasks.size());
    Iterator<Task> it = tasks.values().iterator();
    Task task1 = it.next();
    Task task2 = it.next();
    //send the kill signal to the first Task
    app.getContext().getEventHandler().handle(new TaskEvent(task1.getID(), TaskEventType.T_KILL));
    //unblock Task
    latch.countDown();
    //wait and validate for Job to become SUCCEEDED
    app.waitForState(job, JobState.SUCCEEDED);
    //first Task is killed and second is Succeeded
    //Job is succeeded
    Assert.assertEquals("Task state not correct", TaskState.KILLED, task1.getReport().getTaskState());
    Assert.assertEquals("Task state not correct", TaskState.SUCCEEDED, task2.getReport().getTaskState());
    Map<TaskAttemptId, TaskAttempt> attempts = task1.getAttempts();
    Assert.assertEquals("No of attempts is not correct", 1, attempts.size());
    Iterator<TaskAttempt> iter = attempts.values().iterator();
    Assert.assertEquals("Attempt state not correct", TaskAttemptState.KILLED, iter.next().getReport().getTaskAttemptState());
    attempts = task2.getAttempts();
    Assert.assertEquals("No of attempts is not correct", 1, attempts.size());
    iter = attempts.values().iterator();
    Assert.assertEquals("Attempt state not correct", TaskAttemptState.SUCCEEDED, iter.next().getReport().getTaskAttemptState());
}
Also used : Task(org.apache.hadoop.mapreduce.v2.app.job.Task) TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) Configuration(org.apache.hadoop.conf.Configuration) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) CountDownLatch(java.util.concurrent.CountDownLatch) TaskEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) Test(org.junit.Test)

Example 13 with TaskEvent

use of org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent in project hadoop by apache.

the class TestTaskImpl method killTask.

private void killTask(TaskId taskId) {
    mockTask.handle(new TaskEvent(taskId, TaskEventType.T_KILL));
    assertTaskKillWaitState();
}
Also used : TaskEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent)

Aggregations

TaskEvent (org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent)12 Test (org.junit.Test)8 Configuration (org.apache.hadoop.conf.Configuration)5 Task (org.apache.hadoop.mapreduce.v2.app.job.Task)5 TaskId (org.apache.hadoop.mapreduce.v2.api.records.TaskId)4 Job (org.apache.hadoop.mapreduce.v2.app.job.Job)4 TaskAttempt (org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt)4 TaskAttemptEvent (org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent)4 TaskTAttemptEvent (org.apache.hadoop.mapreduce.v2.app.job.event.TaskTAttemptEvent)4 MapTaskAttemptImpl (org.apache.hadoop.mapred.MapTaskAttemptImpl)2 JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)2 TaskAttemptId (org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId)2 JobTaskEvent (org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskEvent)2 TaskAttemptKillEvent (org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptKillEvent)2 CountDownLatch (java.util.concurrent.CountDownLatch)1 Path (org.apache.hadoop.fs.Path)1 JobConf (org.apache.hadoop.mapred.JobConf)1 OutputCommitter (org.apache.hadoop.mapreduce.OutputCommitter)1 TaskSplitMetaInfo (org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo)1 AMInfo (org.apache.hadoop.mapreduce.v2.api.records.AMInfo)1