Search in sources :

Example 81 with TaskAttempt

use of org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt in project hadoop by apache.

the class TestFail method testTimedOutTask.

@Test
public //All Task attempts are timed out, leading to Job failure
void testTimedOutTask() throws Exception {
    MRApp app = new TimeOutTaskMRApp(1, 0);
    Configuration conf = new Configuration();
    int maxAttempts = 2;
    conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, maxAttempts);
    // disable uberization (requires entire job to be reattempted, so max for
    // subtask attempts is overridden to 1)
    conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
    Job job = app.submit(conf);
    app.waitForState(job, JobState.FAILED);
    Map<TaskId, Task> tasks = job.getTasks();
    Assert.assertEquals("Num tasks is not correct", 1, tasks.size());
    Task task = tasks.values().iterator().next();
    Assert.assertEquals("Task state not correct", TaskState.FAILED, task.getReport().getTaskState());
    Map<TaskAttemptId, TaskAttempt> attempts = tasks.values().iterator().next().getAttempts();
    Assert.assertEquals("Num attempts is not correct", maxAttempts, attempts.size());
    for (TaskAttempt attempt : attempts.values()) {
        Assert.assertEquals("Attempt state not correct", TaskAttemptState.FAILED, attempt.getReport().getTaskAttemptState());
    }
}
Also used : Task(org.apache.hadoop.mapreduce.v2.app.job.Task) TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) Configuration(org.apache.hadoop.conf.Configuration) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) Test(org.junit.Test)

Example 82 with TaskAttempt

use of org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt in project hadoop by apache.

the class TestFetchFailure method testFetchFailureMultipleReduces.

@Test
public void testFetchFailureMultipleReduces() throws Exception {
    MRApp app = new MRApp(1, 3, false, this.getClass().getName(), true);
    Configuration conf = new Configuration();
    // map -> reduce -> fetch-failure -> map retry is incompatible with
    // sequential, single-task-attempt approach in uber-AM, so disable:
    conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
    Job job = app.submit(conf);
    app.waitForState(job, JobState.RUNNING);
    //all maps would be running
    Assert.assertEquals("Num tasks not correct", 4, job.getTasks().size());
    Iterator<Task> it = job.getTasks().values().iterator();
    Task mapTask = it.next();
    Task reduceTask = it.next();
    Task reduceTask2 = it.next();
    Task reduceTask3 = it.next();
    //wait for Task state move to RUNNING
    app.waitForState(mapTask, TaskState.RUNNING);
    TaskAttempt mapAttempt1 = mapTask.getAttempts().values().iterator().next();
    app.waitForState(mapAttempt1, TaskAttemptState.RUNNING);
    //send the done signal to the map attempt
    app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapAttempt1.getID(), TaskAttemptEventType.TA_DONE));
    // wait for map success
    app.waitForState(mapTask, TaskState.SUCCEEDED);
    TaskAttemptCompletionEvent[] events = job.getTaskAttemptCompletionEvents(0, 100);
    Assert.assertEquals("Num completion events not correct", 1, events.length);
    Assert.assertEquals("Event status not correct", TaskAttemptCompletionEventStatus.SUCCEEDED, events[0].getStatus());
    // wait for reduce to start running
    app.waitForState(reduceTask, TaskState.RUNNING);
    app.waitForState(reduceTask2, TaskState.RUNNING);
    app.waitForState(reduceTask3, TaskState.RUNNING);
    TaskAttempt reduceAttempt = reduceTask.getAttempts().values().iterator().next();
    app.waitForState(reduceAttempt, TaskAttemptState.RUNNING);
    updateStatus(app, reduceAttempt, Phase.SHUFFLE);
    TaskAttempt reduceAttempt2 = reduceTask2.getAttempts().values().iterator().next();
    app.waitForState(reduceAttempt2, TaskAttemptState.RUNNING);
    updateStatus(app, reduceAttempt2, Phase.SHUFFLE);
    TaskAttempt reduceAttempt3 = reduceTask3.getAttempts().values().iterator().next();
    app.waitForState(reduceAttempt3, TaskAttemptState.RUNNING);
    updateStatus(app, reduceAttempt3, Phase.SHUFFLE);
    //send 2 fetch failures from reduce to prepare for map re execution
    sendFetchFailure(app, reduceAttempt, mapAttempt1, "host1");
    sendFetchFailure(app, reduceAttempt2, mapAttempt1, "host2");
    //We should not re-launch the map task yet
    assertEquals(TaskState.SUCCEEDED, mapTask.getState());
    updateStatus(app, reduceAttempt2, Phase.REDUCE);
    updateStatus(app, reduceAttempt3, Phase.REDUCE);
    //send 3rd fetch failures from reduce to trigger map re execution
    sendFetchFailure(app, reduceAttempt3, mapAttempt1, "host3");
    //wait for map Task state move back to RUNNING
    app.waitForState(mapTask, TaskState.RUNNING);
    //map attempt must have become FAILED
    Assert.assertEquals("Map TaskAttempt state not correct", TaskAttemptState.FAILED, mapAttempt1.getState());
    Assert.assertEquals(mapAttempt1.getDiagnostics().get(0), "Too many fetch failures. Failing the attempt. " + "Last failure reported by " + reduceAttempt3.getID().toString() + " from host host3");
    Assert.assertEquals("Num attempts in Map Task not correct", 2, mapTask.getAttempts().size());
    Iterator<TaskAttempt> atIt = mapTask.getAttempts().values().iterator();
    atIt.next();
    TaskAttempt mapAttempt2 = atIt.next();
    app.waitForState(mapAttempt2, TaskAttemptState.RUNNING);
    //send the done signal to the second map attempt
    app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapAttempt2.getID(), TaskAttemptEventType.TA_DONE));
    // wait for map success
    app.waitForState(mapTask, TaskState.SUCCEEDED);
    //send done to reduce
    app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceAttempt.getID(), TaskAttemptEventType.TA_DONE));
    //send done to reduce
    app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceAttempt2.getID(), TaskAttemptEventType.TA_DONE));
    //send done to reduce
    app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceAttempt3.getID(), TaskAttemptEventType.TA_DONE));
    app.waitForState(job, JobState.SUCCEEDED);
    //previous completion event now becomes obsolete
    Assert.assertEquals("Event status not correct", TaskAttemptCompletionEventStatus.OBSOLETE, events[0].getStatus());
    events = job.getTaskAttemptCompletionEvents(0, 100);
    Assert.assertEquals("Num completion events not correct", 6, events.length);
    Assert.assertEquals("Event map attempt id not correct", mapAttempt1.getID(), events[0].getAttemptId());
    Assert.assertEquals("Event map attempt id not correct", mapAttempt1.getID(), events[1].getAttemptId());
    Assert.assertEquals("Event map attempt id not correct", mapAttempt2.getID(), events[2].getAttemptId());
    Assert.assertEquals("Event reduce attempt id not correct", reduceAttempt.getID(), events[3].getAttemptId());
    Assert.assertEquals("Event status not correct for map attempt1", TaskAttemptCompletionEventStatus.OBSOLETE, events[0].getStatus());
    Assert.assertEquals("Event status not correct for map attempt1", TaskAttemptCompletionEventStatus.FAILED, events[1].getStatus());
    Assert.assertEquals("Event status not correct for map attempt2", TaskAttemptCompletionEventStatus.SUCCEEDED, events[2].getStatus());
    Assert.assertEquals("Event status not correct for reduce attempt1", TaskAttemptCompletionEventStatus.SUCCEEDED, events[3].getStatus());
    TaskCompletionEvent[] mapEvents = job.getMapAttemptCompletionEvents(0, 2);
    TaskCompletionEvent[] convertedEvents = TypeConverter.fromYarn(events);
    Assert.assertEquals("Incorrect number of map events", 2, mapEvents.length);
    Assert.assertArrayEquals("Unexpected map events", Arrays.copyOfRange(convertedEvents, 0, 2), mapEvents);
    mapEvents = job.getMapAttemptCompletionEvents(2, 200);
    Assert.assertEquals("Incorrect number of map events", 1, mapEvents.length);
    Assert.assertEquals("Unexpected map event", convertedEvents[2], mapEvents[0]);
}
Also used : Task(org.apache.hadoop.mapreduce.v2.app.job.Task) Configuration(org.apache.hadoop.conf.Configuration) TaskCompletionEvent(org.apache.hadoop.mapred.TaskCompletionEvent) TaskAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) TaskAttemptCompletionEvent(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent) Test(org.junit.Test)

Example 83 with TaskAttempt

use of org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt in project hadoop by apache.

the class JobInfo method countTasksAndAttempts.

/**
   * Go through a job and update the member variables with counts for
   * information to output in the page.
   *
   * @param job
   *          the job to get counts for.
   */
private void countTasksAndAttempts(Job job) {
    final Map<TaskId, Task> tasks = job.getTasks();
    if (tasks == null) {
        return;
    }
    for (Task task : tasks.values()) {
        switch(task.getType()) {
            case MAP:
                // Task counts
                switch(task.getState()) {
                    case RUNNING:
                        ++this.mapsRunning;
                        break;
                    case SCHEDULED:
                        ++this.mapsPending;
                        break;
                    default:
                        break;
                }
                break;
            case REDUCE:
                // Task counts
                switch(task.getState()) {
                    case RUNNING:
                        ++this.reducesRunning;
                        break;
                    case SCHEDULED:
                        ++this.reducesPending;
                        break;
                    default:
                        break;
                }
                break;
            default:
                throw new IllegalStateException("Task type is neither map nor reduce: " + task.getType());
        }
        // Attempts counts
        Map<TaskAttemptId, TaskAttempt> attempts = task.getAttempts();
        int newAttempts, running, successful, failed, killed;
        for (TaskAttempt attempt : attempts.values()) {
            newAttempts = 0;
            running = 0;
            successful = 0;
            failed = 0;
            killed = 0;
            if (TaskAttemptStateUI.NEW.correspondsTo(attempt.getState())) {
                ++newAttempts;
            } else if (TaskAttemptStateUI.RUNNING.correspondsTo(attempt.getState())) {
                ++running;
            } else if (TaskAttemptStateUI.SUCCESSFUL.correspondsTo(attempt.getState())) {
                ++successful;
            } else if (TaskAttemptStateUI.FAILED.correspondsTo(attempt.getState())) {
                ++failed;
            } else if (TaskAttemptStateUI.KILLED.correspondsTo(attempt.getState())) {
                ++killed;
            }
            switch(task.getType()) {
                case MAP:
                    this.newMapAttempts += newAttempts;
                    this.runningMapAttempts += running;
                    this.successfulMapAttempts += successful;
                    this.failedMapAttempts += failed;
                    this.killedMapAttempts += killed;
                    break;
                case REDUCE:
                    this.newReduceAttempts += newAttempts;
                    this.runningReduceAttempts += running;
                    this.successfulReduceAttempts += successful;
                    this.failedReduceAttempts += failed;
                    this.killedReduceAttempts += killed;
                    break;
                default:
                    throw new IllegalStateException("Task type neither map nor reduce: " + task.getType());
            }
        }
    }
}
Also used : Task(org.apache.hadoop.mapreduce.v2.app.job.Task) TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt)

Example 84 with TaskAttempt

use of org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt in project hadoop by apache.

the class DefaultSpeculator method speculationValue.

/*   *************************************************************    */
// This is the code section that runs periodically and adds speculations for
//  those jobs that need them.
// This can return a few magic values for tasks that shouldn't speculate:
//  returns ON_SCHEDULE if thresholdRuntime(taskID) says that we should not
//     considering speculating this task
//  returns ALREADY_SPECULATING if that is true.  This has priority.
//  returns TOO_NEW if our companion task hasn't gotten any information
//  returns PROGRESS_IS_GOOD if the task is sailing through
//  returns NOT_RUNNING if the task is not running
//
// All of these values are negative.  Any value that should be allowed to
//  speculate is 0 or positive.
private long speculationValue(TaskId taskID, long now) {
    Job job = context.getJob(taskID.getJobId());
    Task task = job.getTask(taskID);
    Map<TaskAttemptId, TaskAttempt> attempts = task.getAttempts();
    long acceptableRuntime = Long.MIN_VALUE;
    long result = Long.MIN_VALUE;
    if (!mayHaveSpeculated.contains(taskID)) {
        acceptableRuntime = estimator.thresholdRuntime(taskID);
        if (acceptableRuntime == Long.MAX_VALUE) {
            return ON_SCHEDULE;
        }
    }
    TaskAttemptId runningTaskAttemptID = null;
    int numberRunningAttempts = 0;
    for (TaskAttempt taskAttempt : attempts.values()) {
        if (taskAttempt.getState() == TaskAttemptState.RUNNING || taskAttempt.getState() == TaskAttemptState.STARTING) {
            if (++numberRunningAttempts > 1) {
                return ALREADY_SPECULATING;
            }
            runningTaskAttemptID = taskAttempt.getID();
            long estimatedRunTime = estimator.estimatedRuntime(runningTaskAttemptID);
            long taskAttemptStartTime = estimator.attemptEnrolledTime(runningTaskAttemptID);
            if (taskAttemptStartTime > now) {
                //  attempt status change that chronicles the attempt start
                return TOO_NEW;
            }
            long estimatedEndTime = estimatedRunTime + taskAttemptStartTime;
            long estimatedReplacementEndTime = now + estimator.estimatedNewAttemptRuntime(taskID);
            float progress = taskAttempt.getProgress();
            TaskAttemptHistoryStatistics data = runningTaskAttemptStatistics.get(runningTaskAttemptID);
            if (data == null) {
                runningTaskAttemptStatistics.put(runningTaskAttemptID, new TaskAttemptHistoryStatistics(estimatedRunTime, progress, now));
            } else {
                if (estimatedRunTime == data.getEstimatedRunTime() && progress == data.getProgress()) {
                    // Previous stats are same as same stats
                    if (data.notHeartbeatedInAWhile(now)) {
                        // Stats have stagnated for a while, simulate heart-beat.
                        TaskAttemptStatus taskAttemptStatus = new TaskAttemptStatus();
                        taskAttemptStatus.id = runningTaskAttemptID;
                        taskAttemptStatus.progress = progress;
                        taskAttemptStatus.taskState = taskAttempt.getState();
                        // Now simulate the heart-beat
                        handleAttempt(taskAttemptStatus);
                    }
                } else {
                    // Stats have changed - update our data structure
                    data.setEstimatedRunTime(estimatedRunTime);
                    data.setProgress(progress);
                    data.resetHeartBeatTime(now);
                }
            }
            if (estimatedEndTime < now) {
                return PROGRESS_IS_GOOD;
            }
            if (estimatedReplacementEndTime >= estimatedEndTime) {
                return TOO_LATE_TO_SPECULATE;
            }
            result = estimatedEndTime - estimatedReplacementEndTime;
        }
    }
    // If we are here, there's at most one task attempt.
    if (numberRunningAttempts == 0) {
        return NOT_RUNNING;
    }
    if (acceptableRuntime == Long.MIN_VALUE) {
        acceptableRuntime = estimator.thresholdRuntime(taskID);
        if (acceptableRuntime == Long.MAX_VALUE) {
            return ON_SCHEDULE;
        }
    }
    return result;
}
Also used : Task(org.apache.hadoop.mapreduce.v2.app.job.Task) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) TaskAttemptStatus(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent.TaskAttemptStatus) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt) Job(org.apache.hadoop.mapreduce.v2.app.job.Job)

Example 85 with TaskAttempt

use of org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt in project hadoop by apache.

the class LegacyTaskRuntimeEstimator method storedPerAttemptValue.

private long storedPerAttemptValue(Map<TaskAttempt, AtomicLong> data, TaskAttemptId attemptID) {
    TaskId taskID = attemptID.getTaskId();
    JobId jobID = taskID.getJobId();
    Job job = context.getJob(jobID);
    Task task = job.getTask(taskID);
    if (task == null) {
        return -1L;
    }
    TaskAttempt taskAttempt = task.getAttempt(attemptID);
    if (taskAttempt == null) {
        return -1L;
    }
    AtomicLong estimate = data.get(taskAttempt);
    return estimate == null ? -1L : estimate.get();
}
Also used : Task(org.apache.hadoop.mapreduce.v2.app.job.Task) AtomicLong(java.util.concurrent.atomic.AtomicLong) TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId)

Aggregations

TaskAttempt (org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt)102 Task (org.apache.hadoop.mapreduce.v2.app.job.Task)86 Job (org.apache.hadoop.mapreduce.v2.app.job.Job)76 Test (org.junit.Test)63 TaskAttemptId (org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId)60 Configuration (org.apache.hadoop.conf.Configuration)45 JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)32 TaskAttemptEvent (org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent)32 TaskId (org.apache.hadoop.mapreduce.v2.api.records.TaskId)29 ClientResponse (com.sun.jersey.api.client.ClientResponse)18 WebResource (com.sun.jersey.api.client.WebResource)18 JSONObject (org.codehaus.jettison.json.JSONObject)12 TaskAttemptReport (org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport)9 IOException (java.io.IOException)8 Path (javax.ws.rs.Path)8 Produces (javax.ws.rs.Produces)8 StringReader (java.io.StringReader)7 HashMap (java.util.HashMap)7 GET (javax.ws.rs.GET)7 DocumentBuilder (javax.xml.parsers.DocumentBuilder)7