Search in sources :

Example 71 with Task

use of org.apache.hadoop.mapreduce.v2.app.job.Task in project hadoop by apache.

the class TaskAttemptListenerImpl method fatalError.

@Override
public void fatalError(TaskAttemptID taskAttemptID, String msg) throws IOException {
    // This happens only in Child and in the Task.
    LOG.fatal("Task: " + taskAttemptID + " - exited : " + msg);
    reportDiagnosticInfo(taskAttemptID, "Error: " + msg);
    org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID = TypeConverter.toYarn(taskAttemptID);
    // handling checkpoints
    preemptionPolicy.handleFailedContainer(attemptID);
    context.getEventHandler().handle(new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_FAILMSG));
}
Also used : TaskAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent)

Example 72 with Task

use of org.apache.hadoop.mapreduce.v2.app.job.Task in project hadoop by apache.

the class TestCheckpointPreemptionPolicy method testPreemptionContract.

@Test
public void testPreemptionContract() {
    final Map<ContainerId, TaskAttemptId> containers = assignedContainers;
    AMPreemptionPolicy.Context mPctxt = new AMPreemptionPolicy.Context() {

        @Override
        public TaskAttemptId getTaskAttempt(ContainerId cId) {
            return containers.get(cId);
        }

        @Override
        public List<Container> getContainers(TaskType t) {
            List<Container> p = new ArrayList<Container>();
            for (Map.Entry<ContainerId, TaskAttemptId> ent : assignedContainers.entrySet()) {
                if (ent.getValue().getTaskId().getTaskType().equals(t)) {
                    p.add(Container.newInstance(ent.getKey(), null, null, contToResourceMap.get(ent.getKey()), Priority.newInstance(0), null));
                }
            }
            return p;
        }
    };
    PreemptionMessage pM = generatePreemptionMessage(preemptedContainers, contToResourceMap, Resource.newInstance(minAlloc, 1), false);
    CheckpointAMPreemptionPolicy policy = new CheckpointAMPreemptionPolicy();
    policy.init(mActxt);
    int supposedMemPreemption = (int) pM.getContract().getResourceRequest().get(0).getResourceRequest().getCapability().getMemorySize() * pM.getContract().getResourceRequest().get(0).getResourceRequest().getNumContainers();
    // first round of preemption
    policy.preempt(mPctxt, pM);
    List<TaskAttemptId> preempting = validatePreemption(pM, policy, supposedMemPreemption);
    // redundant message
    policy.preempt(mPctxt, pM);
    List<TaskAttemptId> preempting2 = validatePreemption(pM, policy, supposedMemPreemption);
    // check that nothing got added
    assert preempting2.equals(preempting);
    // simulate 2 task completions/successful preemption
    policy.handleCompletedContainer(preempting.get(0));
    policy.handleCompletedContainer(preempting.get(1));
    // remove from assignedContainers
    Iterator<Map.Entry<ContainerId, TaskAttemptId>> it = assignedContainers.entrySet().iterator();
    while (it.hasNext()) {
        Map.Entry<ContainerId, TaskAttemptId> ent = it.next();
        if (ent.getValue().equals(preempting.get(0)) || ent.getValue().equals(preempting.get(1)))
            it.remove();
    }
    // one more message asking for preemption
    policy.preempt(mPctxt, pM);
    // triggers preemption of 2 more containers (i.e., the preemption set changes)
    List<TaskAttemptId> preempting3 = validatePreemption(pM, policy, supposedMemPreemption);
    assert preempting3.equals(preempting2) == false;
}
Also used : RunningAppContext(org.apache.hadoop.mapreduce.v2.app.MRAppMaster.RunningAppContext) PreemptionMessage(org.apache.hadoop.yarn.api.records.PreemptionMessage) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) ArrayList(java.util.ArrayList) PreemptionContainer(org.apache.hadoop.yarn.api.records.PreemptionContainer) Container(org.apache.hadoop.yarn.api.records.Container) CheckpointAMPreemptionPolicy(org.apache.hadoop.mapreduce.v2.app.rm.preemption.CheckpointAMPreemptionPolicy) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) TaskType(org.apache.hadoop.mapreduce.v2.api.records.TaskType) AMPreemptionPolicy(org.apache.hadoop.mapreduce.v2.app.rm.preemption.AMPreemptionPolicy) CheckpointAMPreemptionPolicy(org.apache.hadoop.mapreduce.v2.app.rm.preemption.CheckpointAMPreemptionPolicy) HashMap(java.util.HashMap) Map(java.util.Map) Test(org.junit.Test)

Example 73 with Task

use of org.apache.hadoop.mapreduce.v2.app.job.Task in project hadoop by apache.

the class TestFail method testFailTask.

@Test
public //The job succeeds.
void testFailTask() throws Exception {
    MRApp app = new MockFirstFailingAttemptMRApp(1, 0);
    Configuration conf = new Configuration();
    // this test requires two task attempts, but uberization overrides max to 1
    conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
    Job job = app.submit(conf);
    app.waitForState(job, JobState.SUCCEEDED);
    Map<TaskId, Task> tasks = job.getTasks();
    Assert.assertEquals("Num tasks is not correct", 1, tasks.size());
    Task task = tasks.values().iterator().next();
    Assert.assertEquals("Task state not correct", TaskState.SUCCEEDED, task.getReport().getTaskState());
    Map<TaskAttemptId, TaskAttempt> attempts = tasks.values().iterator().next().getAttempts();
    Assert.assertEquals("Num attempts is not correct", 2, attempts.size());
    //one attempt must be failed 
    //and another must have succeeded
    Iterator<TaskAttempt> it = attempts.values().iterator();
    Assert.assertEquals("Attempt state not correct", TaskAttemptState.FAILED, it.next().getReport().getTaskAttemptState());
    Assert.assertEquals("Attempt state not correct", TaskAttemptState.SUCCEEDED, it.next().getReport().getTaskAttemptState());
}
Also used : Task(org.apache.hadoop.mapreduce.v2.app.job.Task) TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) Configuration(org.apache.hadoop.conf.Configuration) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) Test(org.junit.Test)

Example 74 with Task

use of org.apache.hadoop.mapreduce.v2.app.job.Task in project hadoop by apache.

the class TestFail method testTaskFailWithUnusedContainer.

@Test
public void testTaskFailWithUnusedContainer() throws Exception {
    MRApp app = new MRAppWithFailingTaskAndUnusedContainer();
    Configuration conf = new Configuration();
    int maxAttempts = 1;
    conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, maxAttempts);
    // disable uberization (requires entire job to be reattempted, so max for
    // subtask attempts is overridden to 1)
    conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
    Job job = app.submit(conf);
    app.waitForState(job, JobState.RUNNING);
    Map<TaskId, Task> tasks = job.getTasks();
    Assert.assertEquals("Num tasks is not correct", 1, tasks.size());
    Task task = tasks.values().iterator().next();
    app.waitForState(task, TaskState.SCHEDULED);
    Map<TaskAttemptId, TaskAttempt> attempts = tasks.values().iterator().next().getAttempts();
    Assert.assertEquals("Num attempts is not correct", maxAttempts, attempts.size());
    TaskAttempt attempt = attempts.values().iterator().next();
    app.waitForInternalState((TaskAttemptImpl) attempt, TaskAttemptStateInternal.ASSIGNED);
    app.getDispatcher().getEventHandler().handle(new TaskAttemptEvent(attempt.getID(), TaskAttemptEventType.TA_CONTAINER_COMPLETED));
    app.waitForState(job, JobState.FAILED);
}
Also used : Task(org.apache.hadoop.mapreduce.v2.app.job.Task) TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) Configuration(org.apache.hadoop.conf.Configuration) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) TaskAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) Test(org.junit.Test)

Example 75 with Task

use of org.apache.hadoop.mapreduce.v2.app.job.Task in project hadoop by apache.

the class TestFetchFailure method testFetchFailure.

@Test
public void testFetchFailure() throws Exception {
    MRApp app = new MRApp(1, 1, false, this.getClass().getName(), true);
    Configuration conf = new Configuration();
    // map -> reduce -> fetch-failure -> map retry is incompatible with
    // sequential, single-task-attempt approach in uber-AM, so disable:
    conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
    Job job = app.submit(conf);
    app.waitForState(job, JobState.RUNNING);
    //all maps would be running
    Assert.assertEquals("Num tasks not correct", 2, job.getTasks().size());
    Iterator<Task> it = job.getTasks().values().iterator();
    Task mapTask = it.next();
    Task reduceTask = it.next();
    //wait for Task state move to RUNNING
    app.waitForState(mapTask, TaskState.RUNNING);
    TaskAttempt mapAttempt1 = mapTask.getAttempts().values().iterator().next();
    app.waitForState(mapAttempt1, TaskAttemptState.RUNNING);
    //send the done signal to the map attempt
    app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapAttempt1.getID(), TaskAttemptEventType.TA_DONE));
    // wait for map success
    app.waitForState(mapTask, TaskState.SUCCEEDED);
    final int checkIntervalMillis = 10;
    final int waitForMillis = 800;
    GenericTestUtils.waitFor(new Supplier<Boolean>() {

        @Override
        public Boolean get() {
            TaskAttemptCompletionEvent[] events = job.getTaskAttemptCompletionEvents(0, 100);
            return events.length >= 1;
        }
    }, checkIntervalMillis, waitForMillis);
    TaskAttemptCompletionEvent[] events = job.getTaskAttemptCompletionEvents(0, 100);
    Assert.assertEquals("Num completion events not correct", 1, events.length);
    Assert.assertEquals("Event status not correct", TaskAttemptCompletionEventStatus.SUCCEEDED, events[0].getStatus());
    // wait for reduce to start running
    app.waitForState(reduceTask, TaskState.RUNNING);
    TaskAttempt reduceAttempt = reduceTask.getAttempts().values().iterator().next();
    app.waitForState(reduceAttempt, TaskAttemptState.RUNNING);
    //send 3 fetch failures from reduce to trigger map re execution
    sendFetchFailure(app, reduceAttempt, mapAttempt1, "host");
    sendFetchFailure(app, reduceAttempt, mapAttempt1, "host");
    sendFetchFailure(app, reduceAttempt, mapAttempt1, "host");
    //wait for map Task state move back to RUNNING
    app.waitForState(mapTask, TaskState.RUNNING);
    //map attempt must have become FAILED
    Assert.assertEquals("Map TaskAttempt state not correct", TaskAttemptState.FAILED, mapAttempt1.getState());
    Assert.assertEquals("Num attempts in Map Task not correct", 2, mapTask.getAttempts().size());
    Iterator<TaskAttempt> atIt = mapTask.getAttempts().values().iterator();
    atIt.next();
    TaskAttempt mapAttempt2 = atIt.next();
    app.waitForState(mapAttempt2, TaskAttemptState.RUNNING);
    //send the done signal to the second map attempt
    app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapAttempt2.getID(), TaskAttemptEventType.TA_DONE));
    // wait for map success
    app.waitForState(mapTask, TaskState.SUCCEEDED);
    //send done to reduce
    app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceAttempt.getID(), TaskAttemptEventType.TA_DONE));
    app.waitForState(job, JobState.SUCCEEDED);
    //previous completion event now becomes obsolete
    Assert.assertEquals("Event status not correct", TaskAttemptCompletionEventStatus.OBSOLETE, events[0].getStatus());
    events = job.getTaskAttemptCompletionEvents(0, 100);
    Assert.assertEquals("Num completion events not correct", 4, events.length);
    Assert.assertEquals("Event map attempt id not correct", mapAttempt1.getID(), events[0].getAttemptId());
    Assert.assertEquals("Event map attempt id not correct", mapAttempt1.getID(), events[1].getAttemptId());
    Assert.assertEquals("Event map attempt id not correct", mapAttempt2.getID(), events[2].getAttemptId());
    Assert.assertEquals("Event redude attempt id not correct", reduceAttempt.getID(), events[3].getAttemptId());
    Assert.assertEquals("Event status not correct for map attempt1", TaskAttemptCompletionEventStatus.OBSOLETE, events[0].getStatus());
    Assert.assertEquals("Event status not correct for map attempt1", TaskAttemptCompletionEventStatus.FAILED, events[1].getStatus());
    Assert.assertEquals("Event status not correct for map attempt2", TaskAttemptCompletionEventStatus.SUCCEEDED, events[2].getStatus());
    Assert.assertEquals("Event status not correct for reduce attempt1", TaskAttemptCompletionEventStatus.SUCCEEDED, events[3].getStatus());
    TaskCompletionEvent[] mapEvents = job.getMapAttemptCompletionEvents(0, 2);
    TaskCompletionEvent[] convertedEvents = TypeConverter.fromYarn(events);
    Assert.assertEquals("Incorrect number of map events", 2, mapEvents.length);
    Assert.assertArrayEquals("Unexpected map events", Arrays.copyOfRange(convertedEvents, 0, 2), mapEvents);
    mapEvents = job.getMapAttemptCompletionEvents(2, 200);
    Assert.assertEquals("Incorrect number of map events", 1, mapEvents.length);
    Assert.assertEquals("Unexpected map event", convertedEvents[2], mapEvents[0]);
}
Also used : Task(org.apache.hadoop.mapreduce.v2.app.job.Task) Configuration(org.apache.hadoop.conf.Configuration) TaskAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent) TaskAttemptCompletionEvent(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent) TaskCompletionEvent(org.apache.hadoop.mapred.TaskCompletionEvent) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) Test(org.junit.Test)

Aggregations

Task (org.apache.hadoop.mapreduce.v2.app.job.Task)157 Test (org.junit.Test)153 Job (org.apache.hadoop.mapreduce.v2.app.job.Job)150 JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)107 TaskAttempt (org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt)94 TaskId (org.apache.hadoop.mapreduce.v2.api.records.TaskId)79 TaskAttemptId (org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId)73 Configuration (org.apache.hadoop.conf.Configuration)68 ClientResponse (com.sun.jersey.api.client.ClientResponse)56 WebResource (com.sun.jersey.api.client.WebResource)56 TaskAttemptEvent (org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent)52 JSONObject (org.codehaus.jettison.json.JSONObject)46 AppContext (org.apache.hadoop.mapreduce.v2.app.AppContext)25 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)23 Path (org.apache.hadoop.fs.Path)22 MapTaskAttemptImpl (org.apache.hadoop.mapred.MapTaskAttemptImpl)20 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)20 HashMap (java.util.HashMap)19 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)19 JobConf (org.apache.hadoop.mapred.JobConf)16