Search in sources :

Example 96 with TaskAttemptId

use of org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId in project hadoop by apache.

the class TestFail method testTimedOutTask.

@Test
public //All Task attempts are timed out, leading to Job failure
void testTimedOutTask() throws Exception {
    MRApp app = new TimeOutTaskMRApp(1, 0);
    Configuration conf = new Configuration();
    int maxAttempts = 2;
    conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, maxAttempts);
    // disable uberization (requires entire job to be reattempted, so max for
    // subtask attempts is overridden to 1)
    conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
    Job job = app.submit(conf);
    app.waitForState(job, JobState.FAILED);
    Map<TaskId, Task> tasks = job.getTasks();
    Assert.assertEquals("Num tasks is not correct", 1, tasks.size());
    Task task = tasks.values().iterator().next();
    Assert.assertEquals("Task state not correct", TaskState.FAILED, task.getReport().getTaskState());
    Map<TaskAttemptId, TaskAttempt> attempts = tasks.values().iterator().next().getAttempts();
    Assert.assertEquals("Num attempts is not correct", maxAttempts, attempts.size());
    for (TaskAttempt attempt : attempts.values()) {
        Assert.assertEquals("Attempt state not correct", TaskAttemptState.FAILED, attempt.getReport().getTaskAttemptState());
    }
}
Also used : Task(org.apache.hadoop.mapreduce.v2.app.job.Task) TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) Configuration(org.apache.hadoop.conf.Configuration) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) Test(org.junit.Test)

Example 97 with TaskAttemptId

use of org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId in project hadoop by apache.

the class JobInfo method countTasksAndAttempts.

/**
   * Go through a job and update the member variables with counts for
   * information to output in the page.
   *
   * @param job
   *          the job to get counts for.
   */
private void countTasksAndAttempts(Job job) {
    final Map<TaskId, Task> tasks = job.getTasks();
    if (tasks == null) {
        return;
    }
    for (Task task : tasks.values()) {
        switch(task.getType()) {
            case MAP:
                // Task counts
                switch(task.getState()) {
                    case RUNNING:
                        ++this.mapsRunning;
                        break;
                    case SCHEDULED:
                        ++this.mapsPending;
                        break;
                    default:
                        break;
                }
                break;
            case REDUCE:
                // Task counts
                switch(task.getState()) {
                    case RUNNING:
                        ++this.reducesRunning;
                        break;
                    case SCHEDULED:
                        ++this.reducesPending;
                        break;
                    default:
                        break;
                }
                break;
            default:
                throw new IllegalStateException("Task type is neither map nor reduce: " + task.getType());
        }
        // Attempts counts
        Map<TaskAttemptId, TaskAttempt> attempts = task.getAttempts();
        int newAttempts, running, successful, failed, killed;
        for (TaskAttempt attempt : attempts.values()) {
            newAttempts = 0;
            running = 0;
            successful = 0;
            failed = 0;
            killed = 0;
            if (TaskAttemptStateUI.NEW.correspondsTo(attempt.getState())) {
                ++newAttempts;
            } else if (TaskAttemptStateUI.RUNNING.correspondsTo(attempt.getState())) {
                ++running;
            } else if (TaskAttemptStateUI.SUCCESSFUL.correspondsTo(attempt.getState())) {
                ++successful;
            } else if (TaskAttemptStateUI.FAILED.correspondsTo(attempt.getState())) {
                ++failed;
            } else if (TaskAttemptStateUI.KILLED.correspondsTo(attempt.getState())) {
                ++killed;
            }
            switch(task.getType()) {
                case MAP:
                    this.newMapAttempts += newAttempts;
                    this.runningMapAttempts += running;
                    this.successfulMapAttempts += successful;
                    this.failedMapAttempts += failed;
                    this.killedMapAttempts += killed;
                    break;
                case REDUCE:
                    this.newReduceAttempts += newAttempts;
                    this.runningReduceAttempts += running;
                    this.successfulReduceAttempts += successful;
                    this.failedReduceAttempts += failed;
                    this.killedReduceAttempts += killed;
                    break;
                default:
                    throw new IllegalStateException("Task type neither map nor reduce: " + task.getType());
            }
        }
    }
}
Also used : Task(org.apache.hadoop.mapreduce.v2.app.job.Task) TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt)

Example 98 with TaskAttemptId

use of org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId in project hadoop by apache.

the class CheckpointAMPreemptionPolicy method updatePreemptionCounters.

@SuppressWarnings({ "unchecked" })
private void updatePreemptionCounters(TaskAttemptId yarnAttemptID) {
    if (!countedPreemptions.contains(yarnAttemptID)) {
        countedPreemptions.add(yarnAttemptID);
        JobCounterUpdateEvent jce = new JobCounterUpdateEvent(yarnAttemptID.getTaskId().getJobId());
        jce.addCounterUpdate(JobCounter.TASKS_REQ_PREEMPT, 1);
        eventHandler.handle(jce);
    }
}
Also used : JobCounterUpdateEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent)

Example 99 with TaskAttemptId

use of org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId in project hadoop by apache.

the class DefaultSpeculator method speculationValue.

/*   *************************************************************    */
// This is the code section that runs periodically and adds speculations for
//  those jobs that need them.
// This can return a few magic values for tasks that shouldn't speculate:
//  returns ON_SCHEDULE if thresholdRuntime(taskID) says that we should not
//     considering speculating this task
//  returns ALREADY_SPECULATING if that is true.  This has priority.
//  returns TOO_NEW if our companion task hasn't gotten any information
//  returns PROGRESS_IS_GOOD if the task is sailing through
//  returns NOT_RUNNING if the task is not running
//
// All of these values are negative.  Any value that should be allowed to
//  speculate is 0 or positive.
private long speculationValue(TaskId taskID, long now) {
    Job job = context.getJob(taskID.getJobId());
    Task task = job.getTask(taskID);
    Map<TaskAttemptId, TaskAttempt> attempts = task.getAttempts();
    long acceptableRuntime = Long.MIN_VALUE;
    long result = Long.MIN_VALUE;
    if (!mayHaveSpeculated.contains(taskID)) {
        acceptableRuntime = estimator.thresholdRuntime(taskID);
        if (acceptableRuntime == Long.MAX_VALUE) {
            return ON_SCHEDULE;
        }
    }
    TaskAttemptId runningTaskAttemptID = null;
    int numberRunningAttempts = 0;
    for (TaskAttempt taskAttempt : attempts.values()) {
        if (taskAttempt.getState() == TaskAttemptState.RUNNING || taskAttempt.getState() == TaskAttemptState.STARTING) {
            if (++numberRunningAttempts > 1) {
                return ALREADY_SPECULATING;
            }
            runningTaskAttemptID = taskAttempt.getID();
            long estimatedRunTime = estimator.estimatedRuntime(runningTaskAttemptID);
            long taskAttemptStartTime = estimator.attemptEnrolledTime(runningTaskAttemptID);
            if (taskAttemptStartTime > now) {
                //  attempt status change that chronicles the attempt start
                return TOO_NEW;
            }
            long estimatedEndTime = estimatedRunTime + taskAttemptStartTime;
            long estimatedReplacementEndTime = now + estimator.estimatedNewAttemptRuntime(taskID);
            float progress = taskAttempt.getProgress();
            TaskAttemptHistoryStatistics data = runningTaskAttemptStatistics.get(runningTaskAttemptID);
            if (data == null) {
                runningTaskAttemptStatistics.put(runningTaskAttemptID, new TaskAttemptHistoryStatistics(estimatedRunTime, progress, now));
            } else {
                if (estimatedRunTime == data.getEstimatedRunTime() && progress == data.getProgress()) {
                    // Previous stats are same as same stats
                    if (data.notHeartbeatedInAWhile(now)) {
                        // Stats have stagnated for a while, simulate heart-beat.
                        TaskAttemptStatus taskAttemptStatus = new TaskAttemptStatus();
                        taskAttemptStatus.id = runningTaskAttemptID;
                        taskAttemptStatus.progress = progress;
                        taskAttemptStatus.taskState = taskAttempt.getState();
                        // Now simulate the heart-beat
                        handleAttempt(taskAttemptStatus);
                    }
                } else {
                    // Stats have changed - update our data structure
                    data.setEstimatedRunTime(estimatedRunTime);
                    data.setProgress(progress);
                    data.resetHeartBeatTime(now);
                }
            }
            if (estimatedEndTime < now) {
                return PROGRESS_IS_GOOD;
            }
            if (estimatedReplacementEndTime >= estimatedEndTime) {
                return TOO_LATE_TO_SPECULATE;
            }
            result = estimatedEndTime - estimatedReplacementEndTime;
        }
    }
    // If we are here, there's at most one task attempt.
    if (numberRunningAttempts == 0) {
        return NOT_RUNNING;
    }
    if (acceptableRuntime == Long.MIN_VALUE) {
        acceptableRuntime = estimator.thresholdRuntime(taskID);
        if (acceptableRuntime == Long.MAX_VALUE) {
            return ON_SCHEDULE;
        }
    }
    return result;
}
Also used : Task(org.apache.hadoop.mapreduce.v2.app.job.Task) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) TaskAttemptStatus(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent.TaskAttemptStatus) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt) Job(org.apache.hadoop.mapreduce.v2.app.job.Job)

Example 100 with TaskAttemptId

use of org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId in project hadoop by apache.

the class LegacyTaskRuntimeEstimator method storedPerAttemptValue.

private long storedPerAttemptValue(Map<TaskAttempt, AtomicLong> data, TaskAttemptId attemptID) {
    TaskId taskID = attemptID.getTaskId();
    JobId jobID = taskID.getJobId();
    Job job = context.getJob(jobID);
    Task task = job.getTask(taskID);
    if (task == null) {
        return -1L;
    }
    TaskAttempt taskAttempt = task.getAttempt(attemptID);
    if (taskAttempt == null) {
        return -1L;
    }
    AtomicLong estimate = data.get(taskAttempt);
    return estimate == null ? -1L : estimate.get();
}
Also used : Task(org.apache.hadoop.mapreduce.v2.app.job.Task) AtomicLong(java.util.concurrent.atomic.AtomicLong) TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId)

Aggregations

TaskAttemptId (org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId)111 Test (org.junit.Test)72 TaskId (org.apache.hadoop.mapreduce.v2.api.records.TaskId)61 TaskAttempt (org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt)57 JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)51 Task (org.apache.hadoop.mapreduce.v2.app.job.Task)48 Job (org.apache.hadoop.mapreduce.v2.app.job.Job)45 TaskAttemptEvent (org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent)33 Configuration (org.apache.hadoop.conf.Configuration)32 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)28 AppContext (org.apache.hadoop.mapreduce.v2.app.AppContext)27 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)21 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)20 ClientResponse (com.sun.jersey.api.client.ClientResponse)16 WebResource (com.sun.jersey.api.client.WebResource)16 HashMap (java.util.HashMap)16 Container (org.apache.hadoop.yarn.api.records.Container)16 Path (org.apache.hadoop.fs.Path)15 TaskAttemptContainerLaunchedEvent (org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerLaunchedEvent)14 NodeId (org.apache.hadoop.yarn.api.records.NodeId)14