Search in sources :

Example 96 with TaskId

use of org.apache.hadoop.mapreduce.v2.api.records.TaskId in project hadoop by apache.

the class DefaultSpeculator method speculationValue.

/*   *************************************************************    */
// This is the code section that runs periodically and adds speculations for
//  those jobs that need them.
// This can return a few magic values for tasks that shouldn't speculate:
//  returns ON_SCHEDULE if thresholdRuntime(taskID) says that we should not
//     considering speculating this task
//  returns ALREADY_SPECULATING if that is true.  This has priority.
//  returns TOO_NEW if our companion task hasn't gotten any information
//  returns PROGRESS_IS_GOOD if the task is sailing through
//  returns NOT_RUNNING if the task is not running
//
// All of these values are negative.  Any value that should be allowed to
//  speculate is 0 or positive.
private long speculationValue(TaskId taskID, long now) {
    Job job = context.getJob(taskID.getJobId());
    Task task = job.getTask(taskID);
    Map<TaskAttemptId, TaskAttempt> attempts = task.getAttempts();
    long acceptableRuntime = Long.MIN_VALUE;
    long result = Long.MIN_VALUE;
    if (!mayHaveSpeculated.contains(taskID)) {
        acceptableRuntime = estimator.thresholdRuntime(taskID);
        if (acceptableRuntime == Long.MAX_VALUE) {
            return ON_SCHEDULE;
        }
    }
    TaskAttemptId runningTaskAttemptID = null;
    int numberRunningAttempts = 0;
    for (TaskAttempt taskAttempt : attempts.values()) {
        if (taskAttempt.getState() == TaskAttemptState.RUNNING || taskAttempt.getState() == TaskAttemptState.STARTING) {
            if (++numberRunningAttempts > 1) {
                return ALREADY_SPECULATING;
            }
            runningTaskAttemptID = taskAttempt.getID();
            long estimatedRunTime = estimator.estimatedRuntime(runningTaskAttemptID);
            long taskAttemptStartTime = estimator.attemptEnrolledTime(runningTaskAttemptID);
            if (taskAttemptStartTime > now) {
                //  attempt status change that chronicles the attempt start
                return TOO_NEW;
            }
            long estimatedEndTime = estimatedRunTime + taskAttemptStartTime;
            long estimatedReplacementEndTime = now + estimator.estimatedNewAttemptRuntime(taskID);
            float progress = taskAttempt.getProgress();
            TaskAttemptHistoryStatistics data = runningTaskAttemptStatistics.get(runningTaskAttemptID);
            if (data == null) {
                runningTaskAttemptStatistics.put(runningTaskAttemptID, new TaskAttemptHistoryStatistics(estimatedRunTime, progress, now));
            } else {
                if (estimatedRunTime == data.getEstimatedRunTime() && progress == data.getProgress()) {
                    // Previous stats are same as same stats
                    if (data.notHeartbeatedInAWhile(now)) {
                        // Stats have stagnated for a while, simulate heart-beat.
                        TaskAttemptStatus taskAttemptStatus = new TaskAttemptStatus();
                        taskAttemptStatus.id = runningTaskAttemptID;
                        taskAttemptStatus.progress = progress;
                        taskAttemptStatus.taskState = taskAttempt.getState();
                        // Now simulate the heart-beat
                        handleAttempt(taskAttemptStatus);
                    }
                } else {
                    // Stats have changed - update our data structure
                    data.setEstimatedRunTime(estimatedRunTime);
                    data.setProgress(progress);
                    data.resetHeartBeatTime(now);
                }
            }
            if (estimatedEndTime < now) {
                return PROGRESS_IS_GOOD;
            }
            if (estimatedReplacementEndTime >= estimatedEndTime) {
                return TOO_LATE_TO_SPECULATE;
            }
            result = estimatedEndTime - estimatedReplacementEndTime;
        }
    }
    // If we are here, there's at most one task attempt.
    if (numberRunningAttempts == 0) {
        return NOT_RUNNING;
    }
    if (acceptableRuntime == Long.MIN_VALUE) {
        acceptableRuntime = estimator.thresholdRuntime(taskID);
        if (acceptableRuntime == Long.MAX_VALUE) {
            return ON_SCHEDULE;
        }
    }
    return result;
}
Also used : Task(org.apache.hadoop.mapreduce.v2.app.job.Task) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) TaskAttemptStatus(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent.TaskAttemptStatus) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt) Job(org.apache.hadoop.mapreduce.v2.app.job.Job)

Example 97 with TaskId

use of org.apache.hadoop.mapreduce.v2.api.records.TaskId in project hadoop by apache.

the class LegacyTaskRuntimeEstimator method storedPerAttemptValue.

private long storedPerAttemptValue(Map<TaskAttempt, AtomicLong> data, TaskAttemptId attemptID) {
    TaskId taskID = attemptID.getTaskId();
    JobId jobID = taskID.getJobId();
    Job job = context.getJob(jobID);
    Task task = job.getTask(taskID);
    if (task == null) {
        return -1L;
    }
    TaskAttempt taskAttempt = task.getAttempt(attemptID);
    if (taskAttempt == null) {
        return -1L;
    }
    AtomicLong estimate = data.get(taskAttempt);
    return estimate == null ? -1L : estimate.get();
}
Also used : Task(org.apache.hadoop.mapreduce.v2.app.job.Task) AtomicLong(java.util.concurrent.atomic.AtomicLong) TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId)

Example 98 with TaskId

use of org.apache.hadoop.mapreduce.v2.api.records.TaskId in project hadoop by apache.

the class AMWebServices method getTaskFromTaskIdString.

/**
   * convert a task id string to an actual task and handle all the error
   * checking.
   */
public static Task getTaskFromTaskIdString(String tid, Job job) throws NotFoundException {
    TaskId taskID;
    Task task;
    try {
        taskID = MRApps.toTaskID(tid);
    } catch (YarnRuntimeException e) {
        // unhandled exceptions
        throw new NotFoundException(e.getMessage());
    } catch (NumberFormatException ne) {
        throw new NotFoundException(ne.getMessage());
    } catch (IllegalArgumentException e) {
        throw new NotFoundException(e.getMessage());
    }
    if (taskID == null) {
        throw new NotFoundException("taskid " + tid + " not found or invalid");
    }
    task = job.getTask(taskID);
    if (task == null) {
        throw new NotFoundException("task not found with id " + tid);
    }
    return task;
}
Also used : YarnRuntimeException(org.apache.hadoop.yarn.exceptions.YarnRuntimeException) Task(org.apache.hadoop.mapreduce.v2.app.job.Task) TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) NotFoundException(org.apache.hadoop.yarn.webapp.NotFoundException)

Example 99 with TaskId

use of org.apache.hadoop.mapreduce.v2.api.records.TaskId in project hadoop by apache.

the class AMWebServices method getJobTaskAttempts.

@GET
@Path("/jobs/{jobid}/tasks/{taskid}/attempts")
@Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
public TaskAttemptsInfo getJobTaskAttempts(@Context HttpServletRequest hsr, @PathParam("jobid") String jid, @PathParam("taskid") String tid) {
    init();
    TaskAttemptsInfo attempts = new TaskAttemptsInfo();
    Job job = getJobFromJobIdString(jid, appCtx);
    checkAccess(job, hsr);
    Task task = getTaskFromTaskIdString(tid, job);
    for (TaskAttempt ta : task.getAttempts().values()) {
        if (ta != null) {
            if (task.getType() == TaskType.REDUCE) {
                attempts.add(new ReduceTaskAttemptInfo(ta, task.getType()));
            } else {
                attempts.add(new TaskAttemptInfo(ta, task.getType(), true));
            }
        }
    }
    return attempts;
}
Also used : ReduceTaskAttemptInfo(org.apache.hadoop.mapreduce.v2.app.webapp.dao.ReduceTaskAttemptInfo) TaskAttemptsInfo(org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptsInfo) Task(org.apache.hadoop.mapreduce.v2.app.job.Task) TaskAttemptInfo(org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptInfo) ReduceTaskAttemptInfo(org.apache.hadoop.mapreduce.v2.app.webapp.dao.ReduceTaskAttemptInfo) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) Path(javax.ws.rs.Path) Produces(javax.ws.rs.Produces) GET(javax.ws.rs.GET)

Example 100 with TaskId

use of org.apache.hadoop.mapreduce.v2.api.records.TaskId in project hadoop by apache.

the class AppController method requireTask.

/**
   * Ensure that a TASK_ID was passed into the page.
   */
public void requireTask() {
    if ($(TASK_ID).isEmpty()) {
        badRequest("missing task ID");
        throw new RuntimeException("missing task ID");
    }
    TaskId taskID = MRApps.toTaskID($(TASK_ID));
    Job job = app.context.getJob(taskID.getJobId());
    app.setJob(job);
    if (app.getJob() == null) {
        notFound(MRApps.toString(taskID.getJobId()));
        throw new RuntimeException("Not Found: " + $(JOB_ID));
    } else {
        app.setTask(app.getJob().getTask(taskID));
        if (app.getTask() == null) {
            notFound($(TASK_ID));
            throw new RuntimeException("Not Found: " + $(TASK_ID));
        }
    }
    if (!checkAccess(job)) {
        accessDenied("User " + request().getRemoteUser() + " does not have " + " permission to view job " + $(JOB_ID));
        throw new RuntimeException("Access denied: User " + request().getRemoteUser() + " does not have permission to view job " + $(JOB_ID));
    }
}
Also used : TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) Job(org.apache.hadoop.mapreduce.v2.app.job.Job)

Aggregations

TaskId (org.apache.hadoop.mapreduce.v2.api.records.TaskId)113 Test (org.junit.Test)75 Task (org.apache.hadoop.mapreduce.v2.app.job.Task)69 Job (org.apache.hadoop.mapreduce.v2.app.job.Job)60 JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)58 TaskAttemptId (org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId)56 TaskAttempt (org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt)42 Configuration (org.apache.hadoop.conf.Configuration)29 AppContext (org.apache.hadoop.mapreduce.v2.app.AppContext)24 TaskAttemptEvent (org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent)24 Path (org.apache.hadoop.fs.Path)23 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)22 HashMap (java.util.HashMap)20 JobConf (org.apache.hadoop.mapred.JobConf)17 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)17 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)17 MapTaskAttemptImpl (org.apache.hadoop.mapred.MapTaskAttemptImpl)16 TaskAttemptListener (org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener)16 InetSocketAddress (java.net.InetSocketAddress)15 TaskSplitMetaInfo (org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo)15