Search in sources :

Example 41 with Task

use of org.apache.hadoop.mapreduce.v2.app.job.Task in project hadoop by apache.

the class AMWebServices method getJobTaskAttemptId.

@GET
@Path("/jobs/{jobid}/tasks/{taskid}/attempts/{attemptid}")
@Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
public TaskAttemptInfo getJobTaskAttemptId(@Context HttpServletRequest hsr, @PathParam("jobid") String jid, @PathParam("taskid") String tid, @PathParam("attemptid") String attId) {
    init();
    Job job = getJobFromJobIdString(jid, appCtx);
    checkAccess(job, hsr);
    Task task = getTaskFromTaskIdString(tid, job);
    TaskAttempt ta = getTaskAttemptFromTaskAttemptString(attId, task);
    if (task.getType() == TaskType.REDUCE) {
        return new ReduceTaskAttemptInfo(ta, task.getType());
    } else {
        return new TaskAttemptInfo(ta, task.getType(), true);
    }
}
Also used : ReduceTaskAttemptInfo(org.apache.hadoop.mapreduce.v2.app.webapp.dao.ReduceTaskAttemptInfo) Task(org.apache.hadoop.mapreduce.v2.app.job.Task) TaskAttemptInfo(org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptInfo) ReduceTaskAttemptInfo(org.apache.hadoop.mapreduce.v2.app.webapp.dao.ReduceTaskAttemptInfo) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) Path(javax.ws.rs.Path) Produces(javax.ws.rs.Produces) GET(javax.ws.rs.GET)

Example 42 with Task

use of org.apache.hadoop.mapreduce.v2.app.job.Task in project hadoop by apache.

the class TestRuntimeEstimators method coreTestEstimator.

private void coreTestEstimator(TaskRuntimeEstimator testedEstimator, int expectedSpeculations) {
    estimator = testedEstimator;
    clock = new ControlledClock();
    dispatcher = new AsyncDispatcher();
    myJob = null;
    slotsInUse.set(0);
    completedMaps.set(0);
    completedReduces.set(0);
    successfulSpeculations.set(0);
    taskTimeSavedBySpeculation.set(0);
    clock.tickMsec(1000);
    Configuration conf = new Configuration();
    myAppContext = new MyAppContext(MAP_TASKS, REDUCE_TASKS);
    myJob = myAppContext.getAllJobs().values().iterator().next();
    estimator.contextualize(conf, myAppContext);
    conf.setLong(MRJobConfig.SPECULATIVE_RETRY_AFTER_NO_SPECULATE, 500L);
    conf.setLong(MRJobConfig.SPECULATIVE_RETRY_AFTER_SPECULATE, 5000L);
    conf.setDouble(MRJobConfig.SPECULATIVECAP_RUNNING_TASKS, 0.1);
    conf.setDouble(MRJobConfig.SPECULATIVECAP_TOTAL_TASKS, 0.001);
    conf.setInt(MRJobConfig.SPECULATIVE_MINIMUM_ALLOWED_TASKS, 5);
    speculator = new DefaultSpeculator(conf, myAppContext, estimator, clock);
    Assert.assertEquals("wrong SPECULATIVE_RETRY_AFTER_NO_SPECULATE value", 500L, speculator.getSoonestRetryAfterNoSpeculate());
    Assert.assertEquals("wrong SPECULATIVE_RETRY_AFTER_SPECULATE value", 5000L, speculator.getSoonestRetryAfterSpeculate());
    Assert.assertEquals(speculator.getProportionRunningTasksSpeculatable(), 0.1, 0.00001);
    Assert.assertEquals(speculator.getProportionTotalTasksSpeculatable(), 0.001, 0.00001);
    Assert.assertEquals("wrong SPECULATIVE_MINIMUM_ALLOWED_TASKS value", 5, speculator.getMinimumAllowedSpeculativeTasks());
    dispatcher.register(Speculator.EventType.class, speculator);
    dispatcher.register(TaskEventType.class, new SpeculationRequestEventHandler());
    dispatcher.init(conf);
    dispatcher.start();
    speculator.init(conf);
    speculator.start();
    // Now that the plumbing is hooked up, we do the following:
    //  do until all tasks are finished, ...
    //  1: If we have spare capacity, assign as many map tasks as we can, then
    //     assign as many reduce tasks as we can.  Note that an odd reduce
    //     task might be started while there are still map tasks, because
    //     map tasks take 3 slots and reduce tasks 2 slots.
    //  2: Send a speculation event for every task attempt that's running
    //  note that new attempts might get started by the speculator
    // discover undone tasks
    int undoneMaps = MAP_TASKS;
    int undoneReduces = REDUCE_TASKS;
    // build a task sequence where all the maps precede any of the reduces
    List<Task> allTasksSequence = new LinkedList<Task>();
    allTasksSequence.addAll(myJob.getTasks(TaskType.MAP).values());
    allTasksSequence.addAll(myJob.getTasks(TaskType.REDUCE).values());
    while (undoneMaps + undoneReduces > 0) {
        undoneMaps = 0;
        undoneReduces = 0;
        // start all attempts which are new but for which there is enough slots
        for (Task task : allTasksSequence) {
            if (!task.isFinished()) {
                if (task.getType() == TaskType.MAP) {
                    ++undoneMaps;
                } else {
                    ++undoneReduces;
                }
            }
            for (TaskAttempt attempt : task.getAttempts().values()) {
                if (attempt.getState() == TaskAttemptState.NEW && INITIAL_NUMBER_FREE_SLOTS - slotsInUse.get() >= taskTypeSlots(task.getType())) {
                    MyTaskAttemptImpl attemptImpl = (MyTaskAttemptImpl) attempt;
                    SpeculatorEvent event = new SpeculatorEvent(attempt.getID(), false, clock.getTime());
                    speculator.handle(event);
                    attemptImpl.startUp();
                } else {
                    // If a task attempt is in progress we should send the news to
                    // the Speculator.
                    TaskAttemptStatus status = new TaskAttemptStatus();
                    status.id = attempt.getID();
                    status.progress = attempt.getProgress();
                    status.stateString = attempt.getState().name();
                    status.taskState = attempt.getState();
                    SpeculatorEvent event = new SpeculatorEvent(status, clock.getTime());
                    speculator.handle(event);
                }
            }
        }
        long startTime = System.currentTimeMillis();
        // drain the speculator event queue
        while (!speculator.eventQueueEmpty()) {
            Thread.yield();
            if (System.currentTimeMillis() > startTime + 130000) {
                return;
            }
        }
        clock.tickMsec(1000L);
        if (clock.getTime() % 10000L == 0L) {
            speculator.scanForSpeculations();
        }
    }
    Assert.assertEquals("We got the wrong number of successful speculations.", expectedSpeculations, successfulSpeculations.get());
}
Also used : Task(org.apache.hadoop.mapreduce.v2.app.job.Task) Configuration(org.apache.hadoop.conf.Configuration) TaskAttemptStatus(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent.TaskAttemptStatus) ControlledClock(org.apache.hadoop.yarn.util.ControlledClock) Speculator(org.apache.hadoop.mapreduce.v2.app.speculate.Speculator) DefaultSpeculator(org.apache.hadoop.mapreduce.v2.app.speculate.DefaultSpeculator) LinkedList(java.util.LinkedList) AsyncDispatcher(org.apache.hadoop.yarn.event.AsyncDispatcher) DefaultSpeculator(org.apache.hadoop.mapreduce.v2.app.speculate.DefaultSpeculator) SpeculatorEvent(org.apache.hadoop.mapreduce.v2.app.speculate.SpeculatorEvent) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt)

Example 43 with Task

use of org.apache.hadoop.mapreduce.v2.app.job.Task in project hadoop by apache.

the class TestStagingCleanup method testByPreserveFailedStaging.

@Test
public void testByPreserveFailedStaging() throws IOException {
    conf.set(MRJobConfig.MAPREDUCE_JOB_DIR, stagingJobDir);
    // TODO: Decide which failed task files that should
    // be kept are in application log directory.
    // Currently all files are not deleted from staging dir.
    conf.setBoolean(MRJobConfig.PRESERVE_FAILED_TASK_FILES, true);
    fs = mock(FileSystem.class);
    when(fs.delete(any(Path.class), anyBoolean())).thenReturn(true);
    //Staging Dir exists
    String user = UserGroupInformation.getCurrentUser().getShortUserName();
    Path stagingDir = MRApps.getStagingAreaDir(conf, user);
    when(fs.exists(stagingDir)).thenReturn(true);
    ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(), 0);
    ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1);
    JobId jobid = recordFactory.newRecordInstance(JobId.class);
    jobid.setAppId(appId);
    ContainerAllocator mockAlloc = mock(ContainerAllocator.class);
    Assert.assertTrue(MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS > 1);
    MRAppMaster appMaster = new TestMRApp(attemptId, mockAlloc, JobStateInternal.FAILED, MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS);
    appMaster.init(conf);
    appMaster.start();
    appMaster.shutDownJob();
    //test whether notifyIsLastAMRetry called
    Assert.assertEquals(true, ((TestMRApp) appMaster).getTestIsLastAMRetry());
    verify(fs, times(0)).delete(stagingJobPath, true);
}
Also used : Path(org.apache.hadoop.fs.Path) FileSystem(org.apache.hadoop.fs.FileSystem) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) ContainerAllocator(org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator) Test(org.junit.Test)

Example 44 with Task

use of org.apache.hadoop.mapreduce.v2.app.job.Task in project hadoop by apache.

the class TestJobImpl method testUnusableNodeTransition.

@Test(timeout = 20000)
public void testUnusableNodeTransition() throws Exception {
    Configuration conf = new Configuration();
    conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
    conf.setInt(MRJobConfig.NUM_REDUCES, 1);
    DrainDispatcher dispatcher = new DrainDispatcher();
    dispatcher.init(conf);
    dispatcher.start();
    CyclicBarrier syncBarrier = new CyclicBarrier(2);
    OutputCommitter committer = new TestingOutputCommitter(syncBarrier, true);
    CommitterEventHandler commitHandler = createCommitterEventHandler(dispatcher, committer);
    commitHandler.init(conf);
    commitHandler.start();
    final JobImpl job = createRunningStubbedJob(conf, dispatcher, 2, null);
    // add a special task event handler to put the task back to running in case
    // of task rescheduling/killing
    EventHandler<TaskAttemptEvent> taskAttemptEventHandler = new EventHandler<TaskAttemptEvent>() {

        @Override
        public void handle(TaskAttemptEvent event) {
            if (event.getType() == TaskAttemptEventType.TA_KILL) {
                job.decrementSucceededMapperCount();
            }
        }
    };
    dispatcher.register(TaskAttemptEventType.class, taskAttemptEventHandler);
    // replace the tasks with spied versions to return the right attempts
    Map<TaskId, Task> spiedTasks = new HashMap<TaskId, Task>();
    List<NodeReport> nodeReports = new ArrayList<NodeReport>();
    Map<NodeReport, TaskId> nodeReportsToTaskIds = new HashMap<NodeReport, TaskId>();
    for (Map.Entry<TaskId, Task> e : job.tasks.entrySet()) {
        TaskId taskId = e.getKey();
        Task task = e.getValue();
        if (taskId.getTaskType() == TaskType.MAP) {
            // add an attempt to the task to simulate nodes
            NodeId nodeId = mock(NodeId.class);
            TaskAttempt attempt = mock(TaskAttempt.class);
            when(attempt.getNodeId()).thenReturn(nodeId);
            TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
            when(attempt.getID()).thenReturn(attemptId);
            // create a spied task
            Task spied = spy(task);
            doReturn(attempt).when(spied).getAttempt(any(TaskAttemptId.class));
            spiedTasks.put(taskId, spied);
            // create a NodeReport based on the node id
            NodeReport report = mock(NodeReport.class);
            when(report.getNodeState()).thenReturn(NodeState.UNHEALTHY);
            when(report.getNodeId()).thenReturn(nodeId);
            nodeReports.add(report);
            nodeReportsToTaskIds.put(report, taskId);
        }
    }
    // replace the tasks with the spied tasks
    job.tasks.putAll(spiedTasks);
    // complete all mappers first
    for (TaskId taskId : job.tasks.keySet()) {
        if (taskId.getTaskType() == TaskType.MAP) {
            // generate a task attempt completed event first to populate the
            // nodes-to-succeeded-attempts map
            TaskAttemptCompletionEvent tce = Records.newRecord(TaskAttemptCompletionEvent.class);
            TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
            tce.setAttemptId(attemptId);
            tce.setStatus(TaskAttemptCompletionEventStatus.SUCCEEDED);
            job.handle(new JobTaskAttemptCompletedEvent(tce));
            // complete the task itself
            job.handle(new JobTaskEvent(taskId, TaskState.SUCCEEDED));
            Assert.assertEquals(JobState.RUNNING, job.getState());
        }
    }
    // add an event for a node transition
    NodeReport firstMapperNodeReport = nodeReports.get(0);
    NodeReport secondMapperNodeReport = nodeReports.get(1);
    job.handle(new JobUpdatedNodesEvent(job.getID(), Collections.singletonList(firstMapperNodeReport)));
    dispatcher.await();
    // complete the reducer
    for (TaskId taskId : job.tasks.keySet()) {
        if (taskId.getTaskType() == TaskType.REDUCE) {
            job.handle(new JobTaskEvent(taskId, TaskState.SUCCEEDED));
        }
    }
    // add another event for a node transition for the other mapper
    // this should not trigger rescheduling
    job.handle(new JobUpdatedNodesEvent(job.getID(), Collections.singletonList(secondMapperNodeReport)));
    // complete the first mapper that was rescheduled
    TaskId firstMapper = nodeReportsToTaskIds.get(firstMapperNodeReport);
    job.handle(new JobTaskEvent(firstMapper, TaskState.SUCCEEDED));
    // verify the state is moving to committing
    assertJobState(job, JobStateInternal.COMMITTING);
    // let the committer complete and verify the job succeeds
    syncBarrier.await();
    assertJobState(job, JobStateInternal.SUCCEEDED);
    dispatcher.stop();
    commitHandler.stop();
}
Also used : DrainDispatcher(org.apache.hadoop.yarn.event.DrainDispatcher) Task(org.apache.hadoop.mapreduce.v2.app.job.Task) TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) Configuration(org.apache.hadoop.conf.Configuration) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) CommitterEventHandler(org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventHandler) EventHandler(org.apache.hadoop.yarn.event.EventHandler) TaskAttemptCompletionEvent(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent) JobTaskEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskEvent) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt) OutputCommitter(org.apache.hadoop.mapreduce.OutputCommitter) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) TaskAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent) CyclicBarrier(java.util.concurrent.CyclicBarrier) JobTaskAttemptCompletedEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskAttemptCompletedEvent) NodeId(org.apache.hadoop.yarn.api.records.NodeId) CommitterEventHandler(org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventHandler) Map(java.util.Map) HashMap(java.util.HashMap) JobUpdatedNodesEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobUpdatedNodesEvent) NodeReport(org.apache.hadoop.yarn.api.records.NodeReport) Test(org.junit.Test)

Example 45 with Task

use of org.apache.hadoop.mapreduce.v2.app.job.Task in project hadoop by apache.

the class TestMRAppMetrics method testNames.

@Test
public void testNames() {
    Job job = mock(Job.class);
    Task mapTask = make(stub(Task.class).returning(TaskType.MAP).from.getType());
    Task reduceTask = make(stub(Task.class).returning(TaskType.REDUCE).from.getType());
    MRAppMetrics metrics = MRAppMetrics.create();
    metrics.submittedJob(job);
    metrics.waitingTask(mapTask);
    metrics.waitingTask(reduceTask);
    metrics.preparingJob(job);
    metrics.submittedJob(job);
    metrics.waitingTask(mapTask);
    metrics.waitingTask(reduceTask);
    metrics.preparingJob(job);
    metrics.submittedJob(job);
    metrics.waitingTask(mapTask);
    metrics.waitingTask(reduceTask);
    metrics.preparingJob(job);
    metrics.endPreparingJob(job);
    metrics.endPreparingJob(job);
    metrics.endPreparingJob(job);
    metrics.runningJob(job);
    metrics.launchedTask(mapTask);
    metrics.runningTask(mapTask);
    metrics.failedTask(mapTask);
    metrics.endWaitingTask(reduceTask);
    metrics.endRunningTask(mapTask);
    metrics.endRunningJob(job);
    metrics.failedJob(job);
    metrics.runningJob(job);
    metrics.launchedTask(mapTask);
    metrics.runningTask(mapTask);
    metrics.killedTask(mapTask);
    metrics.endWaitingTask(reduceTask);
    metrics.endRunningTask(mapTask);
    metrics.endRunningJob(job);
    metrics.killedJob(job);
    metrics.runningJob(job);
    metrics.launchedTask(mapTask);
    metrics.runningTask(mapTask);
    metrics.completedTask(mapTask);
    metrics.endRunningTask(mapTask);
    metrics.launchedTask(reduceTask);
    metrics.runningTask(reduceTask);
    metrics.completedTask(reduceTask);
    metrics.endRunningTask(reduceTask);
    metrics.endRunningJob(job);
    metrics.completedJob(job);
    checkMetrics(/*job*/
    3, 1, 1, 1, 0, 0, /*map*/
    3, 1, 1, 1, 0, 0, /*reduce*/
    1, 1, 0, 0, 0, 0);
}
Also used : Task(org.apache.hadoop.mapreduce.v2.app.job.Task) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) Test(org.junit.Test)

Aggregations

Task (org.apache.hadoop.mapreduce.v2.app.job.Task)157 Test (org.junit.Test)153 Job (org.apache.hadoop.mapreduce.v2.app.job.Job)150 JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)107 TaskAttempt (org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt)94 TaskId (org.apache.hadoop.mapreduce.v2.api.records.TaskId)79 TaskAttemptId (org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId)73 Configuration (org.apache.hadoop.conf.Configuration)68 ClientResponse (com.sun.jersey.api.client.ClientResponse)56 WebResource (com.sun.jersey.api.client.WebResource)56 TaskAttemptEvent (org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent)52 JSONObject (org.codehaus.jettison.json.JSONObject)46 AppContext (org.apache.hadoop.mapreduce.v2.app.AppContext)25 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)23 Path (org.apache.hadoop.fs.Path)22 MapTaskAttemptImpl (org.apache.hadoop.mapred.MapTaskAttemptImpl)20 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)20 HashMap (java.util.HashMap)19 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)19 JobConf (org.apache.hadoop.mapred.JobConf)16