Search in sources :

Example 46 with TaskAttempt

use of org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt in project hadoop by apache.

the class LegacyTaskRuntimeEstimator method updateAttempt.

@Override
public void updateAttempt(TaskAttemptStatus status, long timestamp) {
    super.updateAttempt(status, timestamp);
    TaskAttemptId attemptID = status.id;
    TaskId taskID = attemptID.getTaskId();
    JobId jobID = taskID.getJobId();
    Job job = context.getJob(jobID);
    if (job == null) {
        return;
    }
    Task task = job.getTask(taskID);
    if (task == null) {
        return;
    }
    TaskAttempt taskAttempt = task.getAttempt(attemptID);
    if (taskAttempt == null) {
        return;
    }
    Long boxedStart = startTimes.get(attemptID);
    long start = boxedStart == null ? Long.MIN_VALUE : boxedStart;
    //
    if (taskAttempt.getState() == TaskAttemptState.RUNNING) {
        // See if this task is already in the registry
        AtomicLong estimateContainer = attemptRuntimeEstimates.get(taskAttempt);
        AtomicLong estimateVarianceContainer = attemptRuntimeEstimateVariances.get(taskAttempt);
        if (estimateContainer == null) {
            if (attemptRuntimeEstimates.get(taskAttempt) == null) {
                attemptRuntimeEstimates.put(taskAttempt, new AtomicLong());
                estimateContainer = attemptRuntimeEstimates.get(taskAttempt);
            }
        }
        if (estimateVarianceContainer == null) {
            attemptRuntimeEstimateVariances.putIfAbsent(taskAttempt, new AtomicLong());
            estimateVarianceContainer = attemptRuntimeEstimateVariances.get(taskAttempt);
        }
        long estimate = -1;
        long varianceEstimate = -1;
        //  speculative task attempt if two are already running for this task
        if (start > 0 && timestamp > start) {
            estimate = (long) ((timestamp - start) / Math.max(0.0001, status.progress));
            varianceEstimate = (long) (estimate * status.progress / 10);
        }
        if (estimateContainer != null) {
            estimateContainer.set(estimate);
        }
        if (estimateVarianceContainer != null) {
            estimateVarianceContainer.set(varianceEstimate);
        }
    }
}
Also used : Task(org.apache.hadoop.mapreduce.v2.app.job.Task) AtomicLong(java.util.concurrent.atomic.AtomicLong) TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) AtomicLong(java.util.concurrent.atomic.AtomicLong) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId)

Example 47 with TaskAttempt

use of org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt in project hadoop by apache.

the class AMWebServices method getTaskAttemptFromTaskAttemptString.

/**
   * convert a task attempt id string to an actual task attempt and handle all
   * the error checking.
   */
public static TaskAttempt getTaskAttemptFromTaskAttemptString(String attId, Task task) throws NotFoundException {
    TaskAttemptId attemptId;
    TaskAttempt ta;
    try {
        attemptId = MRApps.toTaskAttemptID(attId);
    } catch (YarnRuntimeException e) {
        // unhandled exceptions
        throw new NotFoundException(e.getMessage());
    } catch (NumberFormatException ne) {
        throw new NotFoundException(ne.getMessage());
    } catch (IllegalArgumentException e) {
        throw new NotFoundException(e.getMessage());
    }
    if (attemptId == null) {
        throw new NotFoundException("task attempt id " + attId + " not found or invalid");
    }
    ta = task.getAttempt(attemptId);
    if (ta == null) {
        throw new NotFoundException("Error getting info on task attempt id " + attId);
    }
    return ta;
}
Also used : YarnRuntimeException(org.apache.hadoop.yarn.exceptions.YarnRuntimeException) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) NotFoundException(org.apache.hadoop.yarn.webapp.NotFoundException) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt)

Example 48 with TaskAttempt

use of org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt in project hadoop by apache.

the class AMWebServices method getJobTaskAttemptState.

@GET
@Path("/jobs/{jobid}/tasks/{taskid}/attempts/{attemptid}/state")
@Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
public JobTaskAttemptState getJobTaskAttemptState(@Context HttpServletRequest hsr, @PathParam("jobid") String jid, @PathParam("taskid") String tid, @PathParam("attemptid") String attId) throws IOException, InterruptedException {
    init();
    Job job = getJobFromJobIdString(jid, appCtx);
    checkAccess(job, hsr);
    Task task = getTaskFromTaskIdString(tid, job);
    TaskAttempt ta = getTaskAttemptFromTaskAttemptString(attId, task);
    return new JobTaskAttemptState(ta.getState().toString());
}
Also used : Task(org.apache.hadoop.mapreduce.v2.app.job.Task) JobTaskAttemptState(org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobTaskAttemptState) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) Path(javax.ws.rs.Path) Produces(javax.ws.rs.Produces) GET(javax.ws.rs.GET)

Example 49 with TaskAttempt

use of org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt in project hadoop by apache.

the class AMWebServices method getJobTaskAttemptId.

@GET
@Path("/jobs/{jobid}/tasks/{taskid}/attempts/{attemptid}")
@Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
public TaskAttemptInfo getJobTaskAttemptId(@Context HttpServletRequest hsr, @PathParam("jobid") String jid, @PathParam("taskid") String tid, @PathParam("attemptid") String attId) {
    init();
    Job job = getJobFromJobIdString(jid, appCtx);
    checkAccess(job, hsr);
    Task task = getTaskFromTaskIdString(tid, job);
    TaskAttempt ta = getTaskAttemptFromTaskAttemptString(attId, task);
    if (task.getType() == TaskType.REDUCE) {
        return new ReduceTaskAttemptInfo(ta, task.getType());
    } else {
        return new TaskAttemptInfo(ta, task.getType(), true);
    }
}
Also used : ReduceTaskAttemptInfo(org.apache.hadoop.mapreduce.v2.app.webapp.dao.ReduceTaskAttemptInfo) Task(org.apache.hadoop.mapreduce.v2.app.job.Task) TaskAttemptInfo(org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptInfo) ReduceTaskAttemptInfo(org.apache.hadoop.mapreduce.v2.app.webapp.dao.ReduceTaskAttemptInfo) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) Path(javax.ws.rs.Path) Produces(javax.ws.rs.Produces) GET(javax.ws.rs.GET)

Example 50 with TaskAttempt

use of org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt in project hadoop by apache.

the class TestRuntimeEstimators method coreTestEstimator.

private void coreTestEstimator(TaskRuntimeEstimator testedEstimator, int expectedSpeculations) {
    estimator = testedEstimator;
    clock = new ControlledClock();
    dispatcher = new AsyncDispatcher();
    myJob = null;
    slotsInUse.set(0);
    completedMaps.set(0);
    completedReduces.set(0);
    successfulSpeculations.set(0);
    taskTimeSavedBySpeculation.set(0);
    clock.tickMsec(1000);
    Configuration conf = new Configuration();
    myAppContext = new MyAppContext(MAP_TASKS, REDUCE_TASKS);
    myJob = myAppContext.getAllJobs().values().iterator().next();
    estimator.contextualize(conf, myAppContext);
    conf.setLong(MRJobConfig.SPECULATIVE_RETRY_AFTER_NO_SPECULATE, 500L);
    conf.setLong(MRJobConfig.SPECULATIVE_RETRY_AFTER_SPECULATE, 5000L);
    conf.setDouble(MRJobConfig.SPECULATIVECAP_RUNNING_TASKS, 0.1);
    conf.setDouble(MRJobConfig.SPECULATIVECAP_TOTAL_TASKS, 0.001);
    conf.setInt(MRJobConfig.SPECULATIVE_MINIMUM_ALLOWED_TASKS, 5);
    speculator = new DefaultSpeculator(conf, myAppContext, estimator, clock);
    Assert.assertEquals("wrong SPECULATIVE_RETRY_AFTER_NO_SPECULATE value", 500L, speculator.getSoonestRetryAfterNoSpeculate());
    Assert.assertEquals("wrong SPECULATIVE_RETRY_AFTER_SPECULATE value", 5000L, speculator.getSoonestRetryAfterSpeculate());
    Assert.assertEquals(speculator.getProportionRunningTasksSpeculatable(), 0.1, 0.00001);
    Assert.assertEquals(speculator.getProportionTotalTasksSpeculatable(), 0.001, 0.00001);
    Assert.assertEquals("wrong SPECULATIVE_MINIMUM_ALLOWED_TASKS value", 5, speculator.getMinimumAllowedSpeculativeTasks());
    dispatcher.register(Speculator.EventType.class, speculator);
    dispatcher.register(TaskEventType.class, new SpeculationRequestEventHandler());
    dispatcher.init(conf);
    dispatcher.start();
    speculator.init(conf);
    speculator.start();
    // Now that the plumbing is hooked up, we do the following:
    //  do until all tasks are finished, ...
    //  1: If we have spare capacity, assign as many map tasks as we can, then
    //     assign as many reduce tasks as we can.  Note that an odd reduce
    //     task might be started while there are still map tasks, because
    //     map tasks take 3 slots and reduce tasks 2 slots.
    //  2: Send a speculation event for every task attempt that's running
    //  note that new attempts might get started by the speculator
    // discover undone tasks
    int undoneMaps = MAP_TASKS;
    int undoneReduces = REDUCE_TASKS;
    // build a task sequence where all the maps precede any of the reduces
    List<Task> allTasksSequence = new LinkedList<Task>();
    allTasksSequence.addAll(myJob.getTasks(TaskType.MAP).values());
    allTasksSequence.addAll(myJob.getTasks(TaskType.REDUCE).values());
    while (undoneMaps + undoneReduces > 0) {
        undoneMaps = 0;
        undoneReduces = 0;
        // start all attempts which are new but for which there is enough slots
        for (Task task : allTasksSequence) {
            if (!task.isFinished()) {
                if (task.getType() == TaskType.MAP) {
                    ++undoneMaps;
                } else {
                    ++undoneReduces;
                }
            }
            for (TaskAttempt attempt : task.getAttempts().values()) {
                if (attempt.getState() == TaskAttemptState.NEW && INITIAL_NUMBER_FREE_SLOTS - slotsInUse.get() >= taskTypeSlots(task.getType())) {
                    MyTaskAttemptImpl attemptImpl = (MyTaskAttemptImpl) attempt;
                    SpeculatorEvent event = new SpeculatorEvent(attempt.getID(), false, clock.getTime());
                    speculator.handle(event);
                    attemptImpl.startUp();
                } else {
                    // If a task attempt is in progress we should send the news to
                    // the Speculator.
                    TaskAttemptStatus status = new TaskAttemptStatus();
                    status.id = attempt.getID();
                    status.progress = attempt.getProgress();
                    status.stateString = attempt.getState().name();
                    status.taskState = attempt.getState();
                    SpeculatorEvent event = new SpeculatorEvent(status, clock.getTime());
                    speculator.handle(event);
                }
            }
        }
        long startTime = System.currentTimeMillis();
        // drain the speculator event queue
        while (!speculator.eventQueueEmpty()) {
            Thread.yield();
            if (System.currentTimeMillis() > startTime + 130000) {
                return;
            }
        }
        clock.tickMsec(1000L);
        if (clock.getTime() % 10000L == 0L) {
            speculator.scanForSpeculations();
        }
    }
    Assert.assertEquals("We got the wrong number of successful speculations.", expectedSpeculations, successfulSpeculations.get());
}
Also used : Task(org.apache.hadoop.mapreduce.v2.app.job.Task) Configuration(org.apache.hadoop.conf.Configuration) TaskAttemptStatus(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent.TaskAttemptStatus) ControlledClock(org.apache.hadoop.yarn.util.ControlledClock) Speculator(org.apache.hadoop.mapreduce.v2.app.speculate.Speculator) DefaultSpeculator(org.apache.hadoop.mapreduce.v2.app.speculate.DefaultSpeculator) LinkedList(java.util.LinkedList) AsyncDispatcher(org.apache.hadoop.yarn.event.AsyncDispatcher) DefaultSpeculator(org.apache.hadoop.mapreduce.v2.app.speculate.DefaultSpeculator) SpeculatorEvent(org.apache.hadoop.mapreduce.v2.app.speculate.SpeculatorEvent) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt)

Aggregations

TaskAttempt (org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt)102 Task (org.apache.hadoop.mapreduce.v2.app.job.Task)86 Job (org.apache.hadoop.mapreduce.v2.app.job.Job)76 Test (org.junit.Test)63 TaskAttemptId (org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId)60 Configuration (org.apache.hadoop.conf.Configuration)45 JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)32 TaskAttemptEvent (org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent)32 TaskId (org.apache.hadoop.mapreduce.v2.api.records.TaskId)29 ClientResponse (com.sun.jersey.api.client.ClientResponse)18 WebResource (com.sun.jersey.api.client.WebResource)18 JSONObject (org.codehaus.jettison.json.JSONObject)12 TaskAttemptReport (org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport)9 IOException (java.io.IOException)8 Path (javax.ws.rs.Path)8 Produces (javax.ws.rs.Produces)8 StringReader (java.io.StringReader)7 HashMap (java.util.HashMap)7 GET (javax.ws.rs.GET)7 DocumentBuilder (javax.xml.parsers.DocumentBuilder)7