Search in sources :

Example 1 with TaskCompletionEvent

use of org.apache.hadoop.mapred.TaskCompletionEvent in project hadoop by apache.

the class TestJobHistoryEntities method testGetTaskAttemptCompletionEvent.

/**
   * Simple test of some methods of CompletedJob
   * @throws Exception
   */
@Test(timeout = 30000)
public void testGetTaskAttemptCompletionEvent() throws Exception {
    HistoryFileInfo info = mock(HistoryFileInfo.class);
    when(info.getConfFile()).thenReturn(fullConfPath);
    completedJob = new CompletedJob(conf, jobId, fullHistoryPath, loadTasks, "user", info, jobAclsManager);
    TaskCompletionEvent[] events = completedJob.getMapAttemptCompletionEvents(0, 1000);
    assertEquals(10, completedJob.getMapAttemptCompletionEvents(0, 10).length);
    int currentEventId = 0;
    for (TaskCompletionEvent taskAttemptCompletionEvent : events) {
        int eventId = taskAttemptCompletionEvent.getEventId();
        assertTrue(eventId >= currentEventId);
        currentEventId = eventId;
    }
    assertNull(completedJob.loadConfFile());
    // job name
    assertEquals("Sleep job", completedJob.getName());
    // queue name
    assertEquals("default", completedJob.getQueueName());
    // progress
    assertEquals(1.0, completedJob.getProgress(), 0.001);
    // 12 rows in answer
    assertEquals(12, completedJob.getTaskAttemptCompletionEvents(0, 1000).length);
    // select first 10 rows
    assertEquals(10, completedJob.getTaskAttemptCompletionEvents(0, 10).length);
    // select 5-10 rows include 5th
    assertEquals(7, completedJob.getTaskAttemptCompletionEvents(5, 10).length);
    // without errors
    assertEquals(1, completedJob.getDiagnostics().size());
    assertEquals("", completedJob.getDiagnostics().get(0));
    assertEquals(0, completedJob.getJobACLs().size());
}
Also used : HistoryFileInfo(org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.HistoryFileInfo) TaskCompletionEvent(org.apache.hadoop.mapred.TaskCompletionEvent) Test(org.junit.Test)

Example 2 with TaskCompletionEvent

use of org.apache.hadoop.mapred.TaskCompletionEvent in project hadoop by apache.

the class TestFetchFailure method testFetchFailure.

@Test
public void testFetchFailure() throws Exception {
    MRApp app = new MRApp(1, 1, false, this.getClass().getName(), true);
    Configuration conf = new Configuration();
    // map -> reduce -> fetch-failure -> map retry is incompatible with
    // sequential, single-task-attempt approach in uber-AM, so disable:
    conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
    Job job = app.submit(conf);
    app.waitForState(job, JobState.RUNNING);
    //all maps would be running
    Assert.assertEquals("Num tasks not correct", 2, job.getTasks().size());
    Iterator<Task> it = job.getTasks().values().iterator();
    Task mapTask = it.next();
    Task reduceTask = it.next();
    //wait for Task state move to RUNNING
    app.waitForState(mapTask, TaskState.RUNNING);
    TaskAttempt mapAttempt1 = mapTask.getAttempts().values().iterator().next();
    app.waitForState(mapAttempt1, TaskAttemptState.RUNNING);
    //send the done signal to the map attempt
    app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapAttempt1.getID(), TaskAttemptEventType.TA_DONE));
    // wait for map success
    app.waitForState(mapTask, TaskState.SUCCEEDED);
    final int checkIntervalMillis = 10;
    final int waitForMillis = 800;
    GenericTestUtils.waitFor(new Supplier<Boolean>() {

        @Override
        public Boolean get() {
            TaskAttemptCompletionEvent[] events = job.getTaskAttemptCompletionEvents(0, 100);
            return events.length >= 1;
        }
    }, checkIntervalMillis, waitForMillis);
    TaskAttemptCompletionEvent[] events = job.getTaskAttemptCompletionEvents(0, 100);
    Assert.assertEquals("Num completion events not correct", 1, events.length);
    Assert.assertEquals("Event status not correct", TaskAttemptCompletionEventStatus.SUCCEEDED, events[0].getStatus());
    // wait for reduce to start running
    app.waitForState(reduceTask, TaskState.RUNNING);
    TaskAttempt reduceAttempt = reduceTask.getAttempts().values().iterator().next();
    app.waitForState(reduceAttempt, TaskAttemptState.RUNNING);
    //send 3 fetch failures from reduce to trigger map re execution
    sendFetchFailure(app, reduceAttempt, mapAttempt1, "host");
    sendFetchFailure(app, reduceAttempt, mapAttempt1, "host");
    sendFetchFailure(app, reduceAttempt, mapAttempt1, "host");
    //wait for map Task state move back to RUNNING
    app.waitForState(mapTask, TaskState.RUNNING);
    //map attempt must have become FAILED
    Assert.assertEquals("Map TaskAttempt state not correct", TaskAttemptState.FAILED, mapAttempt1.getState());
    Assert.assertEquals("Num attempts in Map Task not correct", 2, mapTask.getAttempts().size());
    Iterator<TaskAttempt> atIt = mapTask.getAttempts().values().iterator();
    atIt.next();
    TaskAttempt mapAttempt2 = atIt.next();
    app.waitForState(mapAttempt2, TaskAttemptState.RUNNING);
    //send the done signal to the second map attempt
    app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapAttempt2.getID(), TaskAttemptEventType.TA_DONE));
    // wait for map success
    app.waitForState(mapTask, TaskState.SUCCEEDED);
    //send done to reduce
    app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceAttempt.getID(), TaskAttemptEventType.TA_DONE));
    app.waitForState(job, JobState.SUCCEEDED);
    //previous completion event now becomes obsolete
    Assert.assertEquals("Event status not correct", TaskAttemptCompletionEventStatus.OBSOLETE, events[0].getStatus());
    events = job.getTaskAttemptCompletionEvents(0, 100);
    Assert.assertEquals("Num completion events not correct", 4, events.length);
    Assert.assertEquals("Event map attempt id not correct", mapAttempt1.getID(), events[0].getAttemptId());
    Assert.assertEquals("Event map attempt id not correct", mapAttempt1.getID(), events[1].getAttemptId());
    Assert.assertEquals("Event map attempt id not correct", mapAttempt2.getID(), events[2].getAttemptId());
    Assert.assertEquals("Event redude attempt id not correct", reduceAttempt.getID(), events[3].getAttemptId());
    Assert.assertEquals("Event status not correct for map attempt1", TaskAttemptCompletionEventStatus.OBSOLETE, events[0].getStatus());
    Assert.assertEquals("Event status not correct for map attempt1", TaskAttemptCompletionEventStatus.FAILED, events[1].getStatus());
    Assert.assertEquals("Event status not correct for map attempt2", TaskAttemptCompletionEventStatus.SUCCEEDED, events[2].getStatus());
    Assert.assertEquals("Event status not correct for reduce attempt1", TaskAttemptCompletionEventStatus.SUCCEEDED, events[3].getStatus());
    TaskCompletionEvent[] mapEvents = job.getMapAttemptCompletionEvents(0, 2);
    TaskCompletionEvent[] convertedEvents = TypeConverter.fromYarn(events);
    Assert.assertEquals("Incorrect number of map events", 2, mapEvents.length);
    Assert.assertArrayEquals("Unexpected map events", Arrays.copyOfRange(convertedEvents, 0, 2), mapEvents);
    mapEvents = job.getMapAttemptCompletionEvents(2, 200);
    Assert.assertEquals("Incorrect number of map events", 1, mapEvents.length);
    Assert.assertEquals("Unexpected map event", convertedEvents[2], mapEvents[0]);
}
Also used : Task(org.apache.hadoop.mapreduce.v2.app.job.Task) Configuration(org.apache.hadoop.conf.Configuration) TaskAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent) TaskAttemptCompletionEvent(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent) TaskCompletionEvent(org.apache.hadoop.mapred.TaskCompletionEvent) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) Test(org.junit.Test)

Example 3 with TaskCompletionEvent

use of org.apache.hadoop.mapred.TaskCompletionEvent in project hadoop by apache.

the class TypeConverter method fromYarn.

public static TaskCompletionEvent[] fromYarn(TaskAttemptCompletionEvent[] newEvents) {
    TaskCompletionEvent[] oldEvents = new TaskCompletionEvent[newEvents.length];
    int i = 0;
    for (TaskAttemptCompletionEvent newEvent : newEvents) {
        oldEvents[i++] = fromYarn(newEvent);
    }
    return oldEvents;
}
Also used : TaskCompletionEvent(org.apache.hadoop.mapred.TaskCompletionEvent) TaskAttemptCompletionEvent(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent)

Example 4 with TaskCompletionEvent

use of org.apache.hadoop.mapred.TaskCompletionEvent in project compiler by boalang.

the class BoaOutputCommitter method abortJob.

@Override
public void abortJob(JobContext context, JobStatus.State runState) throws java.io.IOException {
    super.abortJob(context, runState);
    final JobClient jobClient = new JobClient(new JobConf(context.getConfiguration()));
    final RunningJob job = jobClient.getJob((org.apache.hadoop.mapred.JobID) JobID.forName(context.getConfiguration().get("mapred.job.id")));
    String diag = "";
    for (final TaskCompletionEvent event : job.getTaskCompletionEvents(0)) switch(event.getTaskStatus()) {
        case SUCCEEDED:
            break;
        case FAILED:
        case KILLED:
        case OBSOLETE:
        case TIPFAILED:
            diag += "Diagnostics for: " + event.getTaskTrackerHttp() + "\n";
            for (final String s : job.getTaskDiagnostics(event.getTaskAttemptId())) diag += s + "\n";
            diag += "\n";
            break;
    }
    updateStatus(diag, context.getConfiguration().getInt("boa.hadoop.jobid", 0));
}
Also used : TaskCompletionEvent(org.apache.hadoop.mapred.TaskCompletionEvent) RunningJob(org.apache.hadoop.mapred.RunningJob) JobClient(org.apache.hadoop.mapred.JobClient) JobConf(org.apache.hadoop.mapred.JobConf)

Example 5 with TaskCompletionEvent

use of org.apache.hadoop.mapred.TaskCompletionEvent in project hive by apache.

the class HadoopJobExecHelper method computeReducerTimeStatsPerJob.

private void computeReducerTimeStatsPerJob(RunningJob rj) throws IOException {
    TaskCompletionEvent[] taskCompletions = rj.getTaskCompletionEvents(0);
    List<Integer> reducersRunTimes = new ArrayList<Integer>();
    for (TaskCompletionEvent taskCompletion : taskCompletions) {
        if (!taskCompletion.isMapTask()) {
            reducersRunTimes.add(Integer.valueOf(taskCompletion.getTaskRunTime()));
        }
    }
    // Compute the reducers run time statistics for the job
    ReducerTimeStatsPerJob reducerTimeStatsPerJob = new ReducerTimeStatsPerJob(reducersRunTimes);
    // Adding the reducers run time statistics for the job in the QueryPlan
    this.task.getQueryPlan().getReducerTimeStatsPerJobList().add(reducerTimeStatsPerJob);
    return;
}
Also used : ReducerTimeStatsPerJob(org.apache.hadoop.hive.ql.plan.ReducerTimeStatsPerJob) TaskCompletionEvent(org.apache.hadoop.mapred.TaskCompletionEvent) ArrayList(java.util.ArrayList)

Aggregations

TaskCompletionEvent (org.apache.hadoop.mapred.TaskCompletionEvent)9 TaskAttemptCompletionEvent (org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent)4 Configuration (org.apache.hadoop.conf.Configuration)3 Job (org.apache.hadoop.mapreduce.v2.app.job.Job)3 Task (org.apache.hadoop.mapreduce.v2.app.job.Task)3 Test (org.junit.Test)3 ArrayList (java.util.ArrayList)2 MapTaskCompletionEventsUpdate (org.apache.hadoop.mapred.MapTaskCompletionEventsUpdate)2 TaskAttempt (org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt)2 TaskAttemptEvent (org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent)2 HashMap (java.util.HashMap)1 LinkedList (java.util.LinkedList)1 FileContext (org.apache.hadoop.fs.FileContext)1 Path (org.apache.hadoop.fs.Path)1 ReducerTimeStatsPerJob (org.apache.hadoop.hive.ql.plan.ReducerTimeStatsPerJob)1 JobACLsManager (org.apache.hadoop.mapred.JobACLsManager)1 JobClient (org.apache.hadoop.mapred.JobClient)1 JobConf (org.apache.hadoop.mapred.JobConf)1 RunningJob (org.apache.hadoop.mapred.RunningJob)1 TaskAttemptID (org.apache.hadoop.mapred.TaskAttemptID)1