Search in sources :

Example 66 with JobId

use of org.apache.hadoop.mapreduce.v2.api.records.JobId in project hadoop by apache.

the class TestJobSummary method before.

@Before
public void before() {
    JobId mockJobId = mock(JobId.class);
    when(mockJobId.toString()).thenReturn("testJobId");
    summary.setJobId(mockJobId);
    summary.setJobSubmitTime(2L);
    summary.setJobLaunchTime(3L);
    summary.setFirstMapTaskLaunchTime(4L);
    summary.setFirstReduceTaskLaunchTime(5L);
    summary.setJobFinishTime(6L);
    summary.setNumFinishedMaps(1);
    summary.setNumFailedMaps(0);
    summary.setNumFinishedReduces(1);
    summary.setNumFailedReduces(0);
    summary.setUser("testUser");
    summary.setQueue("testQueue");
    summary.setJobStatus("testJobStatus");
    summary.setMapSlotSeconds(7);
    summary.setReduceSlotSeconds(8);
    summary.setJobName("testName");
}
Also used : JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) Before(org.junit.Before)

Example 67 with JobId

use of org.apache.hadoop.mapreduce.v2.api.records.JobId in project hadoop by apache.

the class TestRecovery method testRecoveryAllAttemptsKilled.

@Test
public void testRecoveryAllAttemptsKilled() {
    LOG.info("--- START:  testRecoveryAllAttemptsKilled ---");
    long clusterTimestamp = System.currentTimeMillis();
    EventHandler mockEventHandler = mock(EventHandler.class);
    MapTaskImpl recoverMapTask = getMockMapTask(clusterTimestamp, mockEventHandler);
    TaskId taskId = recoverMapTask.getID();
    JobID jobID = new JobID(Long.toString(clusterTimestamp), 1);
    TaskID taskID = new TaskID(jobID, org.apache.hadoop.mapreduce.TaskType.MAP, taskId.getId());
    //Mock up the TaskAttempts
    Map<TaskAttemptID, TaskAttemptInfo> mockTaskAttempts = new HashMap<TaskAttemptID, TaskAttemptInfo>();
    TaskAttemptID taId1 = new TaskAttemptID(taskID, 2);
    TaskAttemptInfo mockTAinfo1 = getMockTaskAttemptInfo(taId1, TaskAttemptState.KILLED);
    mockTaskAttempts.put(taId1, mockTAinfo1);
    TaskAttemptID taId2 = new TaskAttemptID(taskID, 1);
    TaskAttemptInfo mockTAinfo2 = getMockTaskAttemptInfo(taId2, TaskAttemptState.KILLED);
    mockTaskAttempts.put(taId2, mockTAinfo2);
    OutputCommitter mockCommitter = mock(OutputCommitter.class);
    TaskInfo mockTaskInfo = mock(TaskInfo.class);
    when(mockTaskInfo.getTaskStatus()).thenReturn("KILLED");
    when(mockTaskInfo.getTaskId()).thenReturn(taskID);
    when(mockTaskInfo.getAllTaskAttempts()).thenReturn(mockTaskAttempts);
    recoverMapTask.handle(new TaskRecoverEvent(taskId, mockTaskInfo, mockCommitter, true));
    ArgumentCaptor<Event> arg = ArgumentCaptor.forClass(Event.class);
    verify(mockEventHandler, atLeast(1)).handle((org.apache.hadoop.yarn.event.Event) arg.capture());
    Map<TaskAttemptID, TaskAttemptState> finalAttemptStates = new HashMap<TaskAttemptID, TaskAttemptState>();
    finalAttemptStates.put(taId1, TaskAttemptState.KILLED);
    finalAttemptStates.put(taId2, TaskAttemptState.KILLED);
    List<EventType> jobHistoryEvents = new ArrayList<EventType>();
    jobHistoryEvents.add(EventType.TASK_STARTED);
    jobHistoryEvents.add(EventType.MAP_ATTEMPT_STARTED);
    jobHistoryEvents.add(EventType.MAP_ATTEMPT_KILLED);
    jobHistoryEvents.add(EventType.MAP_ATTEMPT_STARTED);
    jobHistoryEvents.add(EventType.MAP_ATTEMPT_KILLED);
    jobHistoryEvents.add(EventType.TASK_FAILED);
    recoveryChecker(recoverMapTask, TaskState.KILLED, finalAttemptStates, arg, jobHistoryEvents, 2L, 0L);
}
Also used : OutputCommitter(org.apache.hadoop.mapreduce.OutputCommitter) TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) TaskID(org.apache.hadoop.mapreduce.TaskID) HashMap(java.util.HashMap) TaskAttemptEventType(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType) EventType(org.apache.hadoop.mapreduce.jobhistory.EventType) TaskEventType(org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType) TaskAttemptID(org.apache.hadoop.mapreduce.TaskAttemptID) ArrayList(java.util.ArrayList) EventHandler(org.apache.hadoop.yarn.event.EventHandler) JobHistoryEventHandler(org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler) MapTaskImpl(org.apache.hadoop.mapreduce.v2.app.job.impl.MapTaskImpl) TaskInfo(org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo) TaskAttemptState(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState) TaskAttemptInfo(org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo) TaskAttemptContainerLaunchedEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerLaunchedEvent) Event(org.apache.hadoop.mapreduce.jobhistory.Event) TaskRecoverEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskRecoverEvent) JobTaskEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskEvent) JobHistoryEvent(org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent) JobCounterUpdateEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent) ContainerLauncherEvent(org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherEvent) TaskAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent) TaskEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent) JobID(org.apache.hadoop.mapreduce.JobID) TaskRecoverEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskRecoverEvent) Test(org.junit.Test)

Example 68 with JobId

use of org.apache.hadoop.mapreduce.v2.api.records.JobId in project hadoop by apache.

the class TestJobEndNotifier method testNotificationOnLastRetryUnregistrationFailure.

@Test
public void testNotificationOnLastRetryUnregistrationFailure() throws Exception {
    HttpServer2 server = startHttpServer();
    MRApp app = spy(new MRAppWithCustomContainerAllocator(2, 2, false, this.getClass().getName(), true, 2, false));
    // Currently, we will have isLastRetry always equals to false at beginning
    // of MRAppMaster, except staging area exists or commit already started at 
    // the beginning.
    // Now manually set isLastRetry to true and this should reset to false when
    // unregister failed.
    app.isLastAMRetry = true;
    doNothing().when(app).sysexit();
    JobConf conf = new JobConf();
    conf.set(JobContext.MR_JOB_END_NOTIFICATION_URL, JobEndServlet.baseUrl + "jobend?jobid=$jobId&status=$jobStatus");
    JobImpl job = (JobImpl) app.submit(conf);
    app.waitForState(job, JobState.RUNNING);
    app.getContext().getEventHandler().handle(new JobEvent(app.getJobId(), JobEventType.JOB_AM_REBOOT));
    app.waitForInternalState(job, JobStateInternal.REBOOT);
    // Now shutdown. User should see FAILED state.
    // Unregistration fails: isLastAMRetry is recalculated, this is
    ///reboot will stop service internally, we don't need to shutdown twice
    app.waitForServiceToStop(10000);
    Assert.assertFalse(app.isLastAMRetry());
    // Since it's not last retry, JobEndServlet didn't called
    Assert.assertEquals(0, JobEndServlet.calledTimes);
    Assert.assertNull(JobEndServlet.requestUri);
    Assert.assertNull(JobEndServlet.foundJobState);
    server.stop();
}
Also used : JobImpl(org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl) JobEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent) HttpServer2(org.apache.hadoop.http.HttpServer2) JobConf(org.apache.hadoop.mapred.JobConf) Test(org.junit.Test)

Example 69 with JobId

use of org.apache.hadoop.mapreduce.v2.api.records.JobId in project hadoop by apache.

the class TestKill method testKillTaskWait.

@Test
public void testKillTaskWait() throws Exception {
    final Dispatcher dispatcher = new AsyncDispatcher() {

        private TaskAttemptEvent cachedKillEvent;

        @Override
        protected void dispatch(Event event) {
            if (event instanceof TaskAttemptEvent) {
                TaskAttemptEvent killEvent = (TaskAttemptEvent) event;
                if (killEvent.getType() == TaskAttemptEventType.TA_KILL) {
                    TaskAttemptId taID = killEvent.getTaskAttemptID();
                    if (taID.getTaskId().getTaskType() == TaskType.REDUCE && taID.getTaskId().getId() == 0 && taID.getId() == 0) {
                        // Task is asking the reduce TA to kill itself. 'Create' a race
                        // condition. Make the task succeed and then inform the task that
                        // TA has succeeded. Once Task gets the TA succeeded event at
                        // KILL_WAIT, then relay the actual kill signal to TA
                        super.dispatch(new TaskAttemptEvent(taID, TaskAttemptEventType.TA_DONE));
                        super.dispatch(new TaskAttemptEvent(taID, TaskAttemptEventType.TA_CONTAINER_COMPLETED));
                        super.dispatch(new TaskTAttemptEvent(taID, TaskEventType.T_ATTEMPT_SUCCEEDED));
                        this.cachedKillEvent = killEvent;
                        return;
                    }
                }
            } else if (event instanceof TaskEvent) {
                TaskEvent taskEvent = (TaskEvent) event;
                if (taskEvent.getType() == TaskEventType.T_ATTEMPT_SUCCEEDED && this.cachedKillEvent != null) {
                    // When the TA comes and reports that it is done, send the
                    // cachedKillEvent
                    super.dispatch(this.cachedKillEvent);
                    return;
                }
            }
            super.dispatch(event);
        }
    };
    MRApp app = new MRApp(1, 1, false, this.getClass().getName(), true) {

        @Override
        public Dispatcher createDispatcher() {
            return dispatcher;
        }
    };
    Job job = app.submit(new Configuration());
    JobId jobId = app.getJobId();
    app.waitForState(job, JobState.RUNNING);
    Assert.assertEquals("Num tasks not correct", 2, job.getTasks().size());
    Iterator<Task> it = job.getTasks().values().iterator();
    Task mapTask = it.next();
    Task reduceTask = it.next();
    app.waitForState(mapTask, TaskState.RUNNING);
    app.waitForState(reduceTask, TaskState.RUNNING);
    TaskAttempt mapAttempt = mapTask.getAttempts().values().iterator().next();
    app.waitForState(mapAttempt, TaskAttemptState.RUNNING);
    TaskAttempt reduceAttempt = reduceTask.getAttempts().values().iterator().next();
    app.waitForState(reduceAttempt, TaskAttemptState.RUNNING);
    // Finish map
    app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapAttempt.getID(), TaskAttemptEventType.TA_DONE));
    app.waitForState(mapTask, TaskState.SUCCEEDED);
    // Now kill the job
    app.getContext().getEventHandler().handle(new JobEvent(jobId, JobEventType.JOB_KILL));
    app.waitForInternalState((JobImpl) job, JobStateInternal.KILLED);
}
Also used : Task(org.apache.hadoop.mapreduce.v2.app.job.Task) Configuration(org.apache.hadoop.conf.Configuration) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) TaskAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent) TaskTAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskTAttemptEvent) Dispatcher(org.apache.hadoop.yarn.event.Dispatcher) AsyncDispatcher(org.apache.hadoop.yarn.event.AsyncDispatcher) AsyncDispatcher(org.apache.hadoop.yarn.event.AsyncDispatcher) JobEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent) TaskEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent) JobEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent) Event(org.apache.hadoop.yarn.event.Event) TaskTAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskTAttemptEvent) TaskAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent) TaskEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) Test(org.junit.Test)

Example 70 with JobId

use of org.apache.hadoop.mapreduce.v2.api.records.JobId in project hadoop by apache.

the class TestKill method testKillTaskWaitKillJobAfterTA_DONE.

@Test
public void testKillTaskWaitKillJobAfterTA_DONE() throws Exception {
    CountDownLatch latch = new CountDownLatch(1);
    final Dispatcher dispatcher = new MyAsyncDispatch(latch, TaskAttemptEventType.TA_DONE);
    MRApp app = new MRApp(1, 1, false, this.getClass().getName(), true) {

        @Override
        public Dispatcher createDispatcher() {
            return dispatcher;
        }
    };
    Job job = app.submit(new Configuration());
    JobId jobId = app.getJobId();
    app.waitForState(job, JobState.RUNNING);
    Assert.assertEquals("Num tasks not correct", 2, job.getTasks().size());
    Iterator<Task> it = job.getTasks().values().iterator();
    Task mapTask = it.next();
    Task reduceTask = it.next();
    app.waitForState(mapTask, TaskState.RUNNING);
    app.waitForState(reduceTask, TaskState.RUNNING);
    TaskAttempt mapAttempt = mapTask.getAttempts().values().iterator().next();
    app.waitForState(mapAttempt, TaskAttemptState.RUNNING);
    TaskAttempt reduceAttempt = reduceTask.getAttempts().values().iterator().next();
    app.waitForState(reduceAttempt, TaskAttemptState.RUNNING);
    // The order in the dispatch event queue, from first to last
    // TA_DONE
    // JobEventType.JOB_KILL
    // TaskAttemptEventType.TA_CONTAINER_COMPLETED ( from TA_DONE handling )
    // TaskEventType.T_KILL ( from JobEventType.JOB_KILL handling )
    // TaskEventType.T_ATTEMPT_SUCCEEDED ( from TA_CONTAINER_COMPLETED handling )
    // Finish map
    app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapAttempt.getID(), TaskAttemptEventType.TA_DONE));
    // Now kill the job
    app.getContext().getEventHandler().handle(new JobEvent(jobId, JobEventType.JOB_KILL));
    //unblock
    latch.countDown();
    app.waitForInternalState((JobImpl) job, JobStateInternal.KILLED);
}
Also used : Task(org.apache.hadoop.mapreduce.v2.app.job.Task) Configuration(org.apache.hadoop.conf.Configuration) JobEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent) TaskAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt) CountDownLatch(java.util.concurrent.CountDownLatch) Dispatcher(org.apache.hadoop.yarn.event.Dispatcher) AsyncDispatcher(org.apache.hadoop.yarn.event.AsyncDispatcher) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) Test(org.junit.Test)

Aggregations

JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)276 Test (org.junit.Test)238 Job (org.apache.hadoop.mapreduce.v2.app.job.Job)212 ClientResponse (com.sun.jersey.api.client.ClientResponse)103 WebResource (com.sun.jersey.api.client.WebResource)103 Task (org.apache.hadoop.mapreduce.v2.app.job.Task)88 JSONObject (org.codehaus.jettison.json.JSONObject)81 Configuration (org.apache.hadoop.conf.Configuration)77 TaskId (org.apache.hadoop.mapreduce.v2.api.records.TaskId)61 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)55 Path (org.apache.hadoop.fs.Path)52 TaskAttemptId (org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId)51 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)47 TaskAttempt (org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt)41 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)36 AppContext (org.apache.hadoop.mapreduce.v2.app.AppContext)35 IOException (java.io.IOException)32 JobConf (org.apache.hadoop.mapred.JobConf)28 HistoryFileInfo (org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.HistoryFileInfo)25 JobID (org.apache.hadoop.mapreduce.JobID)23