Search in sources :

Example 1 with TaskAttemptEvent

use of org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent in project hadoop by apache.

the class TestSpeculativeExecutionWithMRApp method testSepculateSuccessfulWithUpdateEvents.

@Test
public void testSepculateSuccessfulWithUpdateEvents() throws Exception {
    Clock actualClock = SystemClock.getInstance();
    final ControlledClock clock = new ControlledClock(actualClock);
    clock.setTime(System.currentTimeMillis());
    MRApp app = new MRApp(NUM_MAPPERS, NUM_REDUCERS, false, "test", true, clock);
    Job job = app.submit(new Configuration(), true, true);
    app.waitForState(job, JobState.RUNNING);
    Map<TaskId, Task> tasks = job.getTasks();
    Assert.assertEquals("Num tasks is not correct", NUM_MAPPERS + NUM_REDUCERS, tasks.size());
    Iterator<Task> taskIter = tasks.values().iterator();
    while (taskIter.hasNext()) {
        app.waitForState(taskIter.next(), TaskState.RUNNING);
    }
    // Process the update events
    clock.setTime(System.currentTimeMillis() + 1000);
    EventHandler appEventHandler = app.getContext().getEventHandler();
    for (Map.Entry<TaskId, Task> mapTask : tasks.entrySet()) {
        for (Map.Entry<TaskAttemptId, TaskAttempt> taskAttempt : mapTask.getValue().getAttempts().entrySet()) {
            TaskAttemptStatus status = createTaskAttemptStatus(taskAttempt.getKey(), (float) 0.5, TaskAttemptState.RUNNING);
            TaskAttemptStatusUpdateEvent event = new TaskAttemptStatusUpdateEvent(taskAttempt.getKey(), status);
            appEventHandler.handle(event);
        }
    }
    Task speculatedTask = null;
    int numTasksToFinish = NUM_MAPPERS + NUM_REDUCERS - 1;
    clock.setTime(System.currentTimeMillis() + 1000);
    for (Map.Entry<TaskId, Task> task : tasks.entrySet()) {
        for (Map.Entry<TaskAttemptId, TaskAttempt> taskAttempt : task.getValue().getAttempts().entrySet()) {
            if (numTasksToFinish > 0) {
                appEventHandler.handle(new TaskAttemptEvent(taskAttempt.getKey(), TaskAttemptEventType.TA_DONE));
                appEventHandler.handle(new TaskAttemptEvent(taskAttempt.getKey(), TaskAttemptEventType.TA_CONTAINER_COMPLETED));
                numTasksToFinish--;
                app.waitForState(taskAttempt.getValue(), TaskAttemptState.SUCCEEDED);
            } else {
                // The last task is chosen for speculation
                TaskAttemptStatus status = createTaskAttemptStatus(taskAttempt.getKey(), (float) 0.75, TaskAttemptState.RUNNING);
                speculatedTask = task.getValue();
                TaskAttemptStatusUpdateEvent event = new TaskAttemptStatusUpdateEvent(taskAttempt.getKey(), status);
                appEventHandler.handle(event);
            }
        }
    }
    clock.setTime(System.currentTimeMillis() + 15000);
    for (Map.Entry<TaskId, Task> task : tasks.entrySet()) {
        for (Map.Entry<TaskAttemptId, TaskAttempt> taskAttempt : task.getValue().getAttempts().entrySet()) {
            if (taskAttempt.getValue().getState() != TaskAttemptState.SUCCEEDED) {
                TaskAttemptStatus status = createTaskAttemptStatus(taskAttempt.getKey(), (float) 0.75, TaskAttemptState.RUNNING);
                TaskAttemptStatusUpdateEvent event = new TaskAttemptStatusUpdateEvent(taskAttempt.getKey(), status);
                appEventHandler.handle(event);
            }
        }
    }
    final Task speculatedTaskConst = speculatedTask;
    GenericTestUtils.waitFor(new Supplier<Boolean>() {

        @Override
        public Boolean get() {
            if (speculatedTaskConst.getAttempts().size() != 2) {
                clock.setTime(System.currentTimeMillis() + 1000);
                return false;
            } else {
                return true;
            }
        }
    }, 1000, 60000);
    TaskAttempt[] ta = makeFirstAttemptWin(appEventHandler, speculatedTask);
    verifySpeculationMessage(app, ta);
    app.waitForState(Service.STATE.STOPPED);
}
Also used : Task(org.apache.hadoop.mapreduce.v2.app.job.Task) TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) Configuration(org.apache.hadoop.conf.Configuration) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) EventHandler(org.apache.hadoop.yarn.event.EventHandler) TaskAttemptStatus(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent.TaskAttemptStatus) TaskAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent) Clock(org.apache.hadoop.yarn.util.Clock) ControlledClock(org.apache.hadoop.yarn.util.ControlledClock) SystemClock(org.apache.hadoop.yarn.util.SystemClock) ControlledClock(org.apache.hadoop.yarn.util.ControlledClock) TaskAttemptStatusUpdateEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) Map(java.util.Map) MRApp(org.apache.hadoop.mapreduce.v2.app.MRApp) Test(org.junit.Test)

Example 2 with TaskAttemptEvent

use of org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent in project hadoop by apache.

the class TestSpeculativeExecutionWithMRApp method makeFirstAttemptWin.

private static TaskAttempt[] makeFirstAttemptWin(EventHandler appEventHandler, Task speculatedTask) {
    // finish 1st TA, 2nd will be killed
    Collection<TaskAttempt> attempts = speculatedTask.getAttempts().values();
    TaskAttempt[] ta = new TaskAttempt[attempts.size()];
    attempts.toArray(ta);
    appEventHandler.handle(new TaskAttemptEvent(ta[0].getID(), TaskAttemptEventType.TA_DONE));
    appEventHandler.handle(new TaskAttemptEvent(ta[0].getID(), TaskAttemptEventType.TA_CONTAINER_COMPLETED));
    return ta;
}
Also used : TaskAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt)

Example 3 with TaskAttemptEvent

use of org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent in project hadoop by apache.

the class TaskAttemptListenerImpl method fatalError.

@Override
public void fatalError(TaskAttemptID taskAttemptID, String msg) throws IOException {
    // This happens only in Child and in the Task.
    LOG.fatal("Task: " + taskAttemptID + " - exited : " + msg);
    reportDiagnosticInfo(taskAttemptID, "Error: " + msg);
    org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID = TypeConverter.toYarn(taskAttemptID);
    // handling checkpoints
    preemptionPolicy.handleFailedContainer(attemptID);
    context.getEventHandler().handle(new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_FAILMSG));
}
Also used : TaskAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent)

Example 4 with TaskAttemptEvent

use of org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent in project hadoop by apache.

the class TestFail method testTaskFailWithUnusedContainer.

@Test
public void testTaskFailWithUnusedContainer() throws Exception {
    MRApp app = new MRAppWithFailingTaskAndUnusedContainer();
    Configuration conf = new Configuration();
    int maxAttempts = 1;
    conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, maxAttempts);
    // disable uberization (requires entire job to be reattempted, so max for
    // subtask attempts is overridden to 1)
    conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
    Job job = app.submit(conf);
    app.waitForState(job, JobState.RUNNING);
    Map<TaskId, Task> tasks = job.getTasks();
    Assert.assertEquals("Num tasks is not correct", 1, tasks.size());
    Task task = tasks.values().iterator().next();
    app.waitForState(task, TaskState.SCHEDULED);
    Map<TaskAttemptId, TaskAttempt> attempts = tasks.values().iterator().next().getAttempts();
    Assert.assertEquals("Num attempts is not correct", maxAttempts, attempts.size());
    TaskAttempt attempt = attempts.values().iterator().next();
    app.waitForInternalState((TaskAttemptImpl) attempt, TaskAttemptStateInternal.ASSIGNED);
    app.getDispatcher().getEventHandler().handle(new TaskAttemptEvent(attempt.getID(), TaskAttemptEventType.TA_CONTAINER_COMPLETED));
    app.waitForState(job, JobState.FAILED);
}
Also used : Task(org.apache.hadoop.mapreduce.v2.app.job.Task) TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) Configuration(org.apache.hadoop.conf.Configuration) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) TaskAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) Test(org.junit.Test)

Example 5 with TaskAttemptEvent

use of org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent in project hadoop by apache.

the class TestFetchFailure method testFetchFailure.

@Test
public void testFetchFailure() throws Exception {
    MRApp app = new MRApp(1, 1, false, this.getClass().getName(), true);
    Configuration conf = new Configuration();
    // map -> reduce -> fetch-failure -> map retry is incompatible with
    // sequential, single-task-attempt approach in uber-AM, so disable:
    conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
    Job job = app.submit(conf);
    app.waitForState(job, JobState.RUNNING);
    //all maps would be running
    Assert.assertEquals("Num tasks not correct", 2, job.getTasks().size());
    Iterator<Task> it = job.getTasks().values().iterator();
    Task mapTask = it.next();
    Task reduceTask = it.next();
    //wait for Task state move to RUNNING
    app.waitForState(mapTask, TaskState.RUNNING);
    TaskAttempt mapAttempt1 = mapTask.getAttempts().values().iterator().next();
    app.waitForState(mapAttempt1, TaskAttemptState.RUNNING);
    //send the done signal to the map attempt
    app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapAttempt1.getID(), TaskAttemptEventType.TA_DONE));
    // wait for map success
    app.waitForState(mapTask, TaskState.SUCCEEDED);
    final int checkIntervalMillis = 10;
    final int waitForMillis = 800;
    GenericTestUtils.waitFor(new Supplier<Boolean>() {

        @Override
        public Boolean get() {
            TaskAttemptCompletionEvent[] events = job.getTaskAttemptCompletionEvents(0, 100);
            return events.length >= 1;
        }
    }, checkIntervalMillis, waitForMillis);
    TaskAttemptCompletionEvent[] events = job.getTaskAttemptCompletionEvents(0, 100);
    Assert.assertEquals("Num completion events not correct", 1, events.length);
    Assert.assertEquals("Event status not correct", TaskAttemptCompletionEventStatus.SUCCEEDED, events[0].getStatus());
    // wait for reduce to start running
    app.waitForState(reduceTask, TaskState.RUNNING);
    TaskAttempt reduceAttempt = reduceTask.getAttempts().values().iterator().next();
    app.waitForState(reduceAttempt, TaskAttemptState.RUNNING);
    //send 3 fetch failures from reduce to trigger map re execution
    sendFetchFailure(app, reduceAttempt, mapAttempt1, "host");
    sendFetchFailure(app, reduceAttempt, mapAttempt1, "host");
    sendFetchFailure(app, reduceAttempt, mapAttempt1, "host");
    //wait for map Task state move back to RUNNING
    app.waitForState(mapTask, TaskState.RUNNING);
    //map attempt must have become FAILED
    Assert.assertEquals("Map TaskAttempt state not correct", TaskAttemptState.FAILED, mapAttempt1.getState());
    Assert.assertEquals("Num attempts in Map Task not correct", 2, mapTask.getAttempts().size());
    Iterator<TaskAttempt> atIt = mapTask.getAttempts().values().iterator();
    atIt.next();
    TaskAttempt mapAttempt2 = atIt.next();
    app.waitForState(mapAttempt2, TaskAttemptState.RUNNING);
    //send the done signal to the second map attempt
    app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapAttempt2.getID(), TaskAttemptEventType.TA_DONE));
    // wait for map success
    app.waitForState(mapTask, TaskState.SUCCEEDED);
    //send done to reduce
    app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceAttempt.getID(), TaskAttemptEventType.TA_DONE));
    app.waitForState(job, JobState.SUCCEEDED);
    //previous completion event now becomes obsolete
    Assert.assertEquals("Event status not correct", TaskAttemptCompletionEventStatus.OBSOLETE, events[0].getStatus());
    events = job.getTaskAttemptCompletionEvents(0, 100);
    Assert.assertEquals("Num completion events not correct", 4, events.length);
    Assert.assertEquals("Event map attempt id not correct", mapAttempt1.getID(), events[0].getAttemptId());
    Assert.assertEquals("Event map attempt id not correct", mapAttempt1.getID(), events[1].getAttemptId());
    Assert.assertEquals("Event map attempt id not correct", mapAttempt2.getID(), events[2].getAttemptId());
    Assert.assertEquals("Event redude attempt id not correct", reduceAttempt.getID(), events[3].getAttemptId());
    Assert.assertEquals("Event status not correct for map attempt1", TaskAttemptCompletionEventStatus.OBSOLETE, events[0].getStatus());
    Assert.assertEquals("Event status not correct for map attempt1", TaskAttemptCompletionEventStatus.FAILED, events[1].getStatus());
    Assert.assertEquals("Event status not correct for map attempt2", TaskAttemptCompletionEventStatus.SUCCEEDED, events[2].getStatus());
    Assert.assertEquals("Event status not correct for reduce attempt1", TaskAttemptCompletionEventStatus.SUCCEEDED, events[3].getStatus());
    TaskCompletionEvent[] mapEvents = job.getMapAttemptCompletionEvents(0, 2);
    TaskCompletionEvent[] convertedEvents = TypeConverter.fromYarn(events);
    Assert.assertEquals("Incorrect number of map events", 2, mapEvents.length);
    Assert.assertArrayEquals("Unexpected map events", Arrays.copyOfRange(convertedEvents, 0, 2), mapEvents);
    mapEvents = job.getMapAttemptCompletionEvents(2, 200);
    Assert.assertEquals("Incorrect number of map events", 1, mapEvents.length);
    Assert.assertEquals("Unexpected map event", convertedEvents[2], mapEvents[0]);
}
Also used : Task(org.apache.hadoop.mapreduce.v2.app.job.Task) Configuration(org.apache.hadoop.conf.Configuration) TaskAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent) TaskAttemptCompletionEvent(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent) TaskCompletionEvent(org.apache.hadoop.mapred.TaskCompletionEvent) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) Test(org.junit.Test)

Aggregations

TaskAttemptEvent (org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent)60 Test (org.junit.Test)46 TaskAttempt (org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt)30 Configuration (org.apache.hadoop.conf.Configuration)27 Job (org.apache.hadoop.mapreduce.v2.app.job.Job)27 Task (org.apache.hadoop.mapreduce.v2.app.job.Task)27 MapTaskAttemptImpl (org.apache.hadoop.mapred.MapTaskAttemptImpl)21 TaskAttemptId (org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId)21 TaskId (org.apache.hadoop.mapreduce.v2.api.records.TaskId)19 JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)16 TaskSplitMetaInfo (org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo)14 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)14 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)14 AppContext (org.apache.hadoop.mapreduce.v2.app.AppContext)13 NodeId (org.apache.hadoop.yarn.api.records.NodeId)13 Path (org.apache.hadoop.fs.Path)12 JobConf (org.apache.hadoop.mapred.JobConf)12 TaskAttemptListener (org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener)12 TaskAttemptContainerAssignedEvent (org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent)12 Credentials (org.apache.hadoop.security.Credentials)12