Search in sources :

Example 16 with TaskAttemptId

use of org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId in project hadoop by apache.

the class TestCheckpointPreemptionPolicy method testStrictPreemptionContract.

@Test
public void testStrictPreemptionContract() {
    final Map<ContainerId, TaskAttemptId> containers = assignedContainers;
    AMPreemptionPolicy.Context mPctxt = new AMPreemptionPolicy.Context() {

        @Override
        public TaskAttemptId getTaskAttempt(ContainerId cId) {
            return containers.get(cId);
        }

        @Override
        public List<Container> getContainers(TaskType t) {
            List<Container> p = new ArrayList<Container>();
            for (Map.Entry<ContainerId, TaskAttemptId> ent : assignedContainers.entrySet()) {
                if (ent.getValue().getTaskId().getTaskType().equals(t)) {
                    p.add(Container.newInstance(ent.getKey(), null, null, contToResourceMap.get(ent.getKey()), Priority.newInstance(0), null));
                }
            }
            return p;
        }
    };
    PreemptionMessage pM = generatePreemptionMessage(preemptedContainers, contToResourceMap, Resource.newInstance(1024, 1), true);
    CheckpointAMPreemptionPolicy policy = new CheckpointAMPreemptionPolicy();
    policy.init(mActxt);
    policy.preempt(mPctxt, pM);
    for (ContainerId c : preemptedContainers) {
        TaskAttemptId t = assignedContainers.get(c);
        if (TaskType.MAP.equals(t.getTaskId().getTaskType())) {
            assert policy.isPreempted(t) == false;
        } else {
            assert policy.isPreempted(t);
        }
    }
}
Also used : RunningAppContext(org.apache.hadoop.mapreduce.v2.app.MRAppMaster.RunningAppContext) PreemptionMessage(org.apache.hadoop.yarn.api.records.PreemptionMessage) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) ArrayList(java.util.ArrayList) PreemptionContainer(org.apache.hadoop.yarn.api.records.PreemptionContainer) Container(org.apache.hadoop.yarn.api.records.Container) CheckpointAMPreemptionPolicy(org.apache.hadoop.mapreduce.v2.app.rm.preemption.CheckpointAMPreemptionPolicy) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) TaskType(org.apache.hadoop.mapreduce.v2.api.records.TaskType) AMPreemptionPolicy(org.apache.hadoop.mapreduce.v2.app.rm.preemption.AMPreemptionPolicy) CheckpointAMPreemptionPolicy(org.apache.hadoop.mapreduce.v2.app.rm.preemption.CheckpointAMPreemptionPolicy) HashMap(java.util.HashMap) Map(java.util.Map) Test(org.junit.Test)

Example 17 with TaskAttemptId

use of org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId in project hadoop by apache.

the class TestCheckpointPreemptionPolicy method testPreemptionContract.

@Test
public void testPreemptionContract() {
    final Map<ContainerId, TaskAttemptId> containers = assignedContainers;
    AMPreemptionPolicy.Context mPctxt = new AMPreemptionPolicy.Context() {

        @Override
        public TaskAttemptId getTaskAttempt(ContainerId cId) {
            return containers.get(cId);
        }

        @Override
        public List<Container> getContainers(TaskType t) {
            List<Container> p = new ArrayList<Container>();
            for (Map.Entry<ContainerId, TaskAttemptId> ent : assignedContainers.entrySet()) {
                if (ent.getValue().getTaskId().getTaskType().equals(t)) {
                    p.add(Container.newInstance(ent.getKey(), null, null, contToResourceMap.get(ent.getKey()), Priority.newInstance(0), null));
                }
            }
            return p;
        }
    };
    PreemptionMessage pM = generatePreemptionMessage(preemptedContainers, contToResourceMap, Resource.newInstance(minAlloc, 1), false);
    CheckpointAMPreemptionPolicy policy = new CheckpointAMPreemptionPolicy();
    policy.init(mActxt);
    int supposedMemPreemption = (int) pM.getContract().getResourceRequest().get(0).getResourceRequest().getCapability().getMemorySize() * pM.getContract().getResourceRequest().get(0).getResourceRequest().getNumContainers();
    // first round of preemption
    policy.preempt(mPctxt, pM);
    List<TaskAttemptId> preempting = validatePreemption(pM, policy, supposedMemPreemption);
    // redundant message
    policy.preempt(mPctxt, pM);
    List<TaskAttemptId> preempting2 = validatePreemption(pM, policy, supposedMemPreemption);
    // check that nothing got added
    assert preempting2.equals(preempting);
    // simulate 2 task completions/successful preemption
    policy.handleCompletedContainer(preempting.get(0));
    policy.handleCompletedContainer(preempting.get(1));
    // remove from assignedContainers
    Iterator<Map.Entry<ContainerId, TaskAttemptId>> it = assignedContainers.entrySet().iterator();
    while (it.hasNext()) {
        Map.Entry<ContainerId, TaskAttemptId> ent = it.next();
        if (ent.getValue().equals(preempting.get(0)) || ent.getValue().equals(preempting.get(1)))
            it.remove();
    }
    // one more message asking for preemption
    policy.preempt(mPctxt, pM);
    // triggers preemption of 2 more containers (i.e., the preemption set changes)
    List<TaskAttemptId> preempting3 = validatePreemption(pM, policy, supposedMemPreemption);
    assert preempting3.equals(preempting2) == false;
}
Also used : RunningAppContext(org.apache.hadoop.mapreduce.v2.app.MRAppMaster.RunningAppContext) PreemptionMessage(org.apache.hadoop.yarn.api.records.PreemptionMessage) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) ArrayList(java.util.ArrayList) PreemptionContainer(org.apache.hadoop.yarn.api.records.PreemptionContainer) Container(org.apache.hadoop.yarn.api.records.Container) CheckpointAMPreemptionPolicy(org.apache.hadoop.mapreduce.v2.app.rm.preemption.CheckpointAMPreemptionPolicy) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) TaskType(org.apache.hadoop.mapreduce.v2.api.records.TaskType) AMPreemptionPolicy(org.apache.hadoop.mapreduce.v2.app.rm.preemption.AMPreemptionPolicy) CheckpointAMPreemptionPolicy(org.apache.hadoop.mapreduce.v2.app.rm.preemption.CheckpointAMPreemptionPolicy) HashMap(java.util.HashMap) Map(java.util.Map) Test(org.junit.Test)

Example 18 with TaskAttemptId

use of org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId in project hadoop by apache.

the class TestFail method testFailTask.

@Test
public //The job succeeds.
void testFailTask() throws Exception {
    MRApp app = new MockFirstFailingAttemptMRApp(1, 0);
    Configuration conf = new Configuration();
    // this test requires two task attempts, but uberization overrides max to 1
    conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
    Job job = app.submit(conf);
    app.waitForState(job, JobState.SUCCEEDED);
    Map<TaskId, Task> tasks = job.getTasks();
    Assert.assertEquals("Num tasks is not correct", 1, tasks.size());
    Task task = tasks.values().iterator().next();
    Assert.assertEquals("Task state not correct", TaskState.SUCCEEDED, task.getReport().getTaskState());
    Map<TaskAttemptId, TaskAttempt> attempts = tasks.values().iterator().next().getAttempts();
    Assert.assertEquals("Num attempts is not correct", 2, attempts.size());
    //one attempt must be failed 
    //and another must have succeeded
    Iterator<TaskAttempt> it = attempts.values().iterator();
    Assert.assertEquals("Attempt state not correct", TaskAttemptState.FAILED, it.next().getReport().getTaskAttemptState());
    Assert.assertEquals("Attempt state not correct", TaskAttemptState.SUCCEEDED, it.next().getReport().getTaskAttemptState());
}
Also used : Task(org.apache.hadoop.mapreduce.v2.app.job.Task) TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) Configuration(org.apache.hadoop.conf.Configuration) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) Test(org.junit.Test)

Example 19 with TaskAttemptId

use of org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId in project hadoop by apache.

the class TestFail method testTaskFailWithUnusedContainer.

@Test
public void testTaskFailWithUnusedContainer() throws Exception {
    MRApp app = new MRAppWithFailingTaskAndUnusedContainer();
    Configuration conf = new Configuration();
    int maxAttempts = 1;
    conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, maxAttempts);
    // disable uberization (requires entire job to be reattempted, so max for
    // subtask attempts is overridden to 1)
    conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
    Job job = app.submit(conf);
    app.waitForState(job, JobState.RUNNING);
    Map<TaskId, Task> tasks = job.getTasks();
    Assert.assertEquals("Num tasks is not correct", 1, tasks.size());
    Task task = tasks.values().iterator().next();
    app.waitForState(task, TaskState.SCHEDULED);
    Map<TaskAttemptId, TaskAttempt> attempts = tasks.values().iterator().next().getAttempts();
    Assert.assertEquals("Num attempts is not correct", maxAttempts, attempts.size());
    TaskAttempt attempt = attempts.values().iterator().next();
    app.waitForInternalState((TaskAttemptImpl) attempt, TaskAttemptStateInternal.ASSIGNED);
    app.getDispatcher().getEventHandler().handle(new TaskAttemptEvent(attempt.getID(), TaskAttemptEventType.TA_CONTAINER_COMPLETED));
    app.waitForState(job, JobState.FAILED);
}
Also used : Task(org.apache.hadoop.mapreduce.v2.app.job.Task) TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) Configuration(org.apache.hadoop.conf.Configuration) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) TaskAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) Test(org.junit.Test)

Example 20 with TaskAttemptId

use of org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId in project hadoop by apache.

the class TestFetchFailure method updateStatus.

private void updateStatus(MRApp app, TaskAttempt attempt, Phase phase) {
    TaskAttemptStatusUpdateEvent.TaskAttemptStatus status = new TaskAttemptStatusUpdateEvent.TaskAttemptStatus();
    status.counters = new Counters();
    status.fetchFailedMaps = new ArrayList<TaskAttemptId>();
    status.id = attempt.getID();
    status.mapFinishTime = 0;
    status.phase = phase;
    status.progress = 0.5f;
    status.shuffleFinishTime = 0;
    status.sortFinishTime = 0;
    status.stateString = "OK";
    status.taskState = attempt.getState();
    TaskAttemptStatusUpdateEvent event = new TaskAttemptStatusUpdateEvent(attempt.getID(), status);
    app.getContext().getEventHandler().handle(event);
}
Also used : TaskAttemptStatusUpdateEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) Counters(org.apache.hadoop.mapreduce.Counters)

Aggregations

TaskAttemptId (org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId)111 Test (org.junit.Test)72 TaskId (org.apache.hadoop.mapreduce.v2.api.records.TaskId)61 TaskAttempt (org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt)57 JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)51 Task (org.apache.hadoop.mapreduce.v2.app.job.Task)48 Job (org.apache.hadoop.mapreduce.v2.app.job.Job)45 TaskAttemptEvent (org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent)33 Configuration (org.apache.hadoop.conf.Configuration)32 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)28 AppContext (org.apache.hadoop.mapreduce.v2.app.AppContext)27 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)21 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)20 ClientResponse (com.sun.jersey.api.client.ClientResponse)16 WebResource (com.sun.jersey.api.client.WebResource)16 HashMap (java.util.HashMap)16 Container (org.apache.hadoop.yarn.api.records.Container)16 Path (org.apache.hadoop.fs.Path)15 TaskAttemptContainerLaunchedEvent (org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerLaunchedEvent)14 NodeId (org.apache.hadoop.yarn.api.records.NodeId)14