Search in sources :

Example 1 with JobTaskEvent

use of org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskEvent in project hadoop by apache.

the class TaskImpl method recover.

/**
   * Recover a completed task from a previous application attempt
   * @param taskInfo recovered info about the task
   * @param recoverTaskOutput whether to recover task outputs
   * @return state of the task after recovery
   */
private TaskStateInternal recover(TaskInfo taskInfo, OutputCommitter committer, boolean recoverTaskOutput) {
    LOG.info("Recovering task " + taskId + " from prior app attempt, status was " + taskInfo.getTaskStatus());
    scheduledTime = taskInfo.getStartTime();
    sendTaskStartedEvent();
    Collection<TaskAttemptInfo> attemptInfos = taskInfo.getAllTaskAttempts().values();
    if (attemptInfos.size() > 0) {
        metrics.launchedTask(this);
    }
    // recover the attempts for this task in the order they finished
    // so task attempt completion events are ordered properly
    int savedNextAttemptNumber = nextAttemptNumber;
    ArrayList<TaskAttemptInfo> taInfos = new ArrayList<TaskAttemptInfo>(taskInfo.getAllTaskAttempts().values());
    Collections.sort(taInfos, TA_INFO_COMPARATOR);
    for (TaskAttemptInfo taInfo : taInfos) {
        nextAttemptNumber = taInfo.getAttemptId().getId();
        TaskAttemptImpl attempt = addAttempt(Avataar.VIRGIN);
        // handle the recovery inline so attempts complete before task does
        attempt.handle(new TaskAttemptRecoverEvent(attempt.getID(), taInfo, committer, recoverTaskOutput));
        finishedAttempts.add(attempt.getID());
        TaskAttemptCompletionEventStatus taces = null;
        TaskAttemptState attemptState = attempt.getState();
        switch(attemptState) {
            case FAILED:
                taces = TaskAttemptCompletionEventStatus.FAILED;
                break;
            case KILLED:
                taces = TaskAttemptCompletionEventStatus.KILLED;
                break;
            case SUCCEEDED:
                taces = TaskAttemptCompletionEventStatus.SUCCEEDED;
                break;
            default:
                throw new IllegalStateException("Unexpected attempt state during recovery: " + attemptState);
        }
        if (attemptState == TaskAttemptState.FAILED) {
            failedAttempts.add(attempt.getID());
            if (failedAttempts.size() >= maxAttempts) {
                taces = TaskAttemptCompletionEventStatus.TIPFAILED;
            }
        }
        // TODO: this shouldn't be necessary after MAPREDUCE-4330
        if (successfulAttempt == null) {
            handleTaskAttemptCompletion(attempt.getID(), taces);
            if (attemptState == TaskAttemptState.SUCCEEDED) {
                successfulAttempt = attempt.getID();
            }
        }
    }
    nextAttemptNumber = savedNextAttemptNumber;
    TaskStateInternal taskState = TaskStateInternal.valueOf(taskInfo.getTaskStatus());
    switch(taskState) {
        case SUCCEEDED:
            if (successfulAttempt != null) {
                sendTaskSucceededEvents();
            } else {
                LOG.info("Missing successful attempt for task " + taskId + ", recovering as RUNNING");
                // there must have been a fetch failure and the retry wasn't complete
                taskState = TaskStateInternal.RUNNING;
                metrics.runningTask(this);
                addAndScheduleAttempt(Avataar.VIRGIN);
            }
            break;
        case FAILED:
        case KILLED:
            {
                if (taskState == TaskStateInternal.KILLED && attemptInfos.size() == 0) {
                    metrics.endWaitingTask(this);
                }
                TaskFailedEvent tfe = new TaskFailedEvent(taskInfo.getTaskId(), taskInfo.getFinishTime(), taskInfo.getTaskType(), taskInfo.getError(), taskInfo.getTaskStatus(), taskInfo.getFailedDueToAttemptId(), taskInfo.getCounters());
                eventHandler.handle(new JobHistoryEvent(taskId.getJobId(), tfe));
                eventHandler.handle(new JobTaskEvent(taskId, getExternalState(taskState)));
                break;
            }
        default:
            throw new java.lang.AssertionError("Unexpected recovered task state: " + taskState);
    }
    return taskState;
}
Also used : TaskStateInternal(org.apache.hadoop.mapreduce.v2.app.job.TaskStateInternal) ArrayList(java.util.ArrayList) TaskAttemptCompletionEventStatus(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEventStatus) JobHistoryEvent(org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent) TaskAttemptState(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState) JobTaskEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskEvent) TaskAttemptInfo(org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo) TaskFailedEvent(org.apache.hadoop.mapreduce.jobhistory.TaskFailedEvent) TaskAttemptRecoverEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptRecoverEvent)

Example 2 with JobTaskEvent

use of org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskEvent in project hadoop by apache.

the class TestJobImpl method completeJobTasks.

private static void completeJobTasks(JobImpl job) {
    // complete the map tasks and the reduce tasks so we start committing
    int numMaps = job.getTotalMaps();
    for (int i = 0; i < numMaps; ++i) {
        job.handle(new JobTaskEvent(MRBuilderUtils.newTaskId(job.getID(), 1, TaskType.MAP), TaskState.SUCCEEDED));
        Assert.assertEquals(JobState.RUNNING, job.getState());
    }
    int numReduces = job.getTotalReduces();
    for (int i = 0; i < numReduces; ++i) {
        job.handle(new JobTaskEvent(MRBuilderUtils.newTaskId(job.getID(), 1, TaskType.MAP), TaskState.SUCCEEDED));
        Assert.assertEquals(JobState.RUNNING, job.getState());
    }
}
Also used : JobTaskEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskEvent)

Example 3 with JobTaskEvent

use of org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskEvent in project hadoop by apache.

the class TestJobImpl method testUnusableNodeTransition.

@Test(timeout = 20000)
public void testUnusableNodeTransition() throws Exception {
    Configuration conf = new Configuration();
    conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
    conf.setInt(MRJobConfig.NUM_REDUCES, 1);
    DrainDispatcher dispatcher = new DrainDispatcher();
    dispatcher.init(conf);
    dispatcher.start();
    CyclicBarrier syncBarrier = new CyclicBarrier(2);
    OutputCommitter committer = new TestingOutputCommitter(syncBarrier, true);
    CommitterEventHandler commitHandler = createCommitterEventHandler(dispatcher, committer);
    commitHandler.init(conf);
    commitHandler.start();
    final JobImpl job = createRunningStubbedJob(conf, dispatcher, 2, null);
    // add a special task event handler to put the task back to running in case
    // of task rescheduling/killing
    EventHandler<TaskAttemptEvent> taskAttemptEventHandler = new EventHandler<TaskAttemptEvent>() {

        @Override
        public void handle(TaskAttemptEvent event) {
            if (event.getType() == TaskAttemptEventType.TA_KILL) {
                job.decrementSucceededMapperCount();
            }
        }
    };
    dispatcher.register(TaskAttemptEventType.class, taskAttemptEventHandler);
    // replace the tasks with spied versions to return the right attempts
    Map<TaskId, Task> spiedTasks = new HashMap<TaskId, Task>();
    List<NodeReport> nodeReports = new ArrayList<NodeReport>();
    Map<NodeReport, TaskId> nodeReportsToTaskIds = new HashMap<NodeReport, TaskId>();
    for (Map.Entry<TaskId, Task> e : job.tasks.entrySet()) {
        TaskId taskId = e.getKey();
        Task task = e.getValue();
        if (taskId.getTaskType() == TaskType.MAP) {
            // add an attempt to the task to simulate nodes
            NodeId nodeId = mock(NodeId.class);
            TaskAttempt attempt = mock(TaskAttempt.class);
            when(attempt.getNodeId()).thenReturn(nodeId);
            TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
            when(attempt.getID()).thenReturn(attemptId);
            // create a spied task
            Task spied = spy(task);
            doReturn(attempt).when(spied).getAttempt(any(TaskAttemptId.class));
            spiedTasks.put(taskId, spied);
            // create a NodeReport based on the node id
            NodeReport report = mock(NodeReport.class);
            when(report.getNodeState()).thenReturn(NodeState.UNHEALTHY);
            when(report.getNodeId()).thenReturn(nodeId);
            nodeReports.add(report);
            nodeReportsToTaskIds.put(report, taskId);
        }
    }
    // replace the tasks with the spied tasks
    job.tasks.putAll(spiedTasks);
    // complete all mappers first
    for (TaskId taskId : job.tasks.keySet()) {
        if (taskId.getTaskType() == TaskType.MAP) {
            // generate a task attempt completed event first to populate the
            // nodes-to-succeeded-attempts map
            TaskAttemptCompletionEvent tce = Records.newRecord(TaskAttemptCompletionEvent.class);
            TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
            tce.setAttemptId(attemptId);
            tce.setStatus(TaskAttemptCompletionEventStatus.SUCCEEDED);
            job.handle(new JobTaskAttemptCompletedEvent(tce));
            // complete the task itself
            job.handle(new JobTaskEvent(taskId, TaskState.SUCCEEDED));
            Assert.assertEquals(JobState.RUNNING, job.getState());
        }
    }
    // add an event for a node transition
    NodeReport firstMapperNodeReport = nodeReports.get(0);
    NodeReport secondMapperNodeReport = nodeReports.get(1);
    job.handle(new JobUpdatedNodesEvent(job.getID(), Collections.singletonList(firstMapperNodeReport)));
    dispatcher.await();
    // complete the reducer
    for (TaskId taskId : job.tasks.keySet()) {
        if (taskId.getTaskType() == TaskType.REDUCE) {
            job.handle(new JobTaskEvent(taskId, TaskState.SUCCEEDED));
        }
    }
    // add another event for a node transition for the other mapper
    // this should not trigger rescheduling
    job.handle(new JobUpdatedNodesEvent(job.getID(), Collections.singletonList(secondMapperNodeReport)));
    // complete the first mapper that was rescheduled
    TaskId firstMapper = nodeReportsToTaskIds.get(firstMapperNodeReport);
    job.handle(new JobTaskEvent(firstMapper, TaskState.SUCCEEDED));
    // verify the state is moving to committing
    assertJobState(job, JobStateInternal.COMMITTING);
    // let the committer complete and verify the job succeeds
    syncBarrier.await();
    assertJobState(job, JobStateInternal.SUCCEEDED);
    dispatcher.stop();
    commitHandler.stop();
}
Also used : DrainDispatcher(org.apache.hadoop.yarn.event.DrainDispatcher) Task(org.apache.hadoop.mapreduce.v2.app.job.Task) TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) Configuration(org.apache.hadoop.conf.Configuration) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) CommitterEventHandler(org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventHandler) EventHandler(org.apache.hadoop.yarn.event.EventHandler) TaskAttemptCompletionEvent(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent) JobTaskEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskEvent) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt) OutputCommitter(org.apache.hadoop.mapreduce.OutputCommitter) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) TaskAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent) CyclicBarrier(java.util.concurrent.CyclicBarrier) JobTaskAttemptCompletedEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskAttemptCompletedEvent) NodeId(org.apache.hadoop.yarn.api.records.NodeId) CommitterEventHandler(org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventHandler) Map(java.util.Map) HashMap(java.util.HashMap) JobUpdatedNodesEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobUpdatedNodesEvent) NodeReport(org.apache.hadoop.yarn.api.records.NodeReport) Test(org.junit.Test)

Example 4 with JobTaskEvent

use of org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskEvent in project hadoop by apache.

the class TaskImpl method sendTaskSucceededEvents.

private void sendTaskSucceededEvents() {
    eventHandler.handle(new JobTaskEvent(taskId, TaskState.SUCCEEDED));
    LOG.info("Task succeeded with attempt " + successfulAttempt);
    if (historyTaskStartGenerated) {
        TaskFinishedEvent tfe = createTaskFinishedEvent(this, TaskStateInternal.SUCCEEDED);
        eventHandler.handle(new JobHistoryEvent(taskId.getJobId(), tfe));
    }
}
Also used : TaskFinishedEvent(org.apache.hadoop.mapreduce.jobhistory.TaskFinishedEvent) JobTaskEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskEvent) JobHistoryEvent(org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent)

Example 5 with JobTaskEvent

use of org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskEvent in project hadoop by apache.

the class TestRecovery method recoveryChecker.

private void recoveryChecker(MapTaskImpl checkTask, TaskState finalState, Map<TaskAttemptID, TaskAttemptState> finalAttemptStates, ArgumentCaptor<Event> arg, List<EventType> expectedJobHistoryEvents, long expectedMapLaunches, long expectedFailedMaps) {
    assertEquals("Final State of Task", finalState, checkTask.getState());
    Map<TaskAttemptId, TaskAttempt> recoveredAttempts = checkTask.getAttempts();
    assertEquals("Expected Number of Task Attempts", finalAttemptStates.size(), recoveredAttempts.size());
    for (TaskAttemptID taID : finalAttemptStates.keySet()) {
        assertEquals("Expected Task Attempt State", finalAttemptStates.get(taID), recoveredAttempts.get(TypeConverter.toYarn(taID)).getState());
    }
    Iterator<Event> ie = arg.getAllValues().iterator();
    int eventNum = 0;
    long totalLaunchedMaps = 0;
    long totalFailedMaps = 0;
    boolean jobTaskEventReceived = false;
    while (ie.hasNext()) {
        Object current = ie.next();
        ++eventNum;
        LOG.info(eventNum + " " + current.getClass().getName());
        if (current instanceof JobHistoryEvent) {
            JobHistoryEvent jhe = (JobHistoryEvent) current;
            LOG.info(expectedJobHistoryEvents.get(0).toString() + " " + jhe.getHistoryEvent().getEventType().toString() + " " + jhe.getJobID());
            assertEquals(expectedJobHistoryEvents.get(0), jhe.getHistoryEvent().getEventType());
            expectedJobHistoryEvents.remove(0);
        } else if (current instanceof JobCounterUpdateEvent) {
            JobCounterUpdateEvent jcue = (JobCounterUpdateEvent) current;
            boolean containsUpdates = jcue.getCounterUpdates().size() > 0;
            // TaskAttempt recovery. Check that first.
            if (containsUpdates) {
                LOG.info("JobCounterUpdateEvent " + jcue.getCounterUpdates().get(0).getCounterKey() + " " + jcue.getCounterUpdates().get(0).getIncrementValue());
                if (jcue.getCounterUpdates().get(0).getCounterKey() == JobCounter.NUM_FAILED_MAPS) {
                    totalFailedMaps += jcue.getCounterUpdates().get(0).getIncrementValue();
                } else if (jcue.getCounterUpdates().get(0).getCounterKey() == JobCounter.TOTAL_LAUNCHED_MAPS) {
                    totalLaunchedMaps += jcue.getCounterUpdates().get(0).getIncrementValue();
                }
            }
        } else if (current instanceof JobTaskEvent) {
            JobTaskEvent jte = (JobTaskEvent) current;
            assertEquals(jte.getState(), finalState);
            jobTaskEventReceived = true;
        }
    }
    assertTrue(jobTaskEventReceived || (finalState == TaskState.RUNNING));
    assertEquals("Did not process all expected JobHistoryEvents", 0, expectedJobHistoryEvents.size());
    assertEquals("Expected Map Launches", expectedMapLaunches, totalLaunchedMaps);
    assertEquals("Expected Failed Maps", expectedFailedMaps, totalFailedMaps);
}
Also used : TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) TaskAttemptID(org.apache.hadoop.mapreduce.TaskAttemptID) JobHistoryEvent(org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent) JobCounterUpdateEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent) JobTaskEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskEvent) TaskAttemptContainerLaunchedEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerLaunchedEvent) Event(org.apache.hadoop.mapreduce.jobhistory.Event) TaskRecoverEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskRecoverEvent) JobTaskEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskEvent) JobHistoryEvent(org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent) JobCounterUpdateEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent) ContainerLauncherEvent(org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherEvent) TaskAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent) TaskEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt)

Aggregations

JobTaskEvent (org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskEvent)6 JobHistoryEvent (org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent)3 ArrayList (java.util.ArrayList)2 Configuration (org.apache.hadoop.conf.Configuration)2 OutputCommitter (org.apache.hadoop.mapreduce.OutputCommitter)2 TaskAttemptId (org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId)2 CommitterEventHandler (org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventHandler)2 TaskAttempt (org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt)2 TaskAttemptEvent (org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent)2 Test (org.junit.Test)2 HashMap (java.util.HashMap)1 Map (java.util.Map)1 CyclicBarrier (java.util.concurrent.CyclicBarrier)1 TaskAttemptID (org.apache.hadoop.mapreduce.TaskAttemptID)1 Event (org.apache.hadoop.mapreduce.jobhistory.Event)1 TaskAttemptInfo (org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo)1 TaskFailedEvent (org.apache.hadoop.mapreduce.jobhistory.TaskFailedEvent)1 TaskFinishedEvent (org.apache.hadoop.mapreduce.jobhistory.TaskFinishedEvent)1 TaskAttemptCompletionEvent (org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent)1 TaskAttemptCompletionEventStatus (org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEventStatus)1