use of org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId in project hadoop by apache.
the class TestCheckpointPreemptionPolicy method testStrictPreemptionContract.
@Test
public void testStrictPreemptionContract() {
final Map<ContainerId, TaskAttemptId> containers = assignedContainers;
AMPreemptionPolicy.Context mPctxt = new AMPreemptionPolicy.Context() {
@Override
public TaskAttemptId getTaskAttempt(ContainerId cId) {
return containers.get(cId);
}
@Override
public List<Container> getContainers(TaskType t) {
List<Container> p = new ArrayList<Container>();
for (Map.Entry<ContainerId, TaskAttemptId> ent : assignedContainers.entrySet()) {
if (ent.getValue().getTaskId().getTaskType().equals(t)) {
p.add(Container.newInstance(ent.getKey(), null, null, contToResourceMap.get(ent.getKey()), Priority.newInstance(0), null));
}
}
return p;
}
};
PreemptionMessage pM = generatePreemptionMessage(preemptedContainers, contToResourceMap, Resource.newInstance(1024, 1), true);
CheckpointAMPreemptionPolicy policy = new CheckpointAMPreemptionPolicy();
policy.init(mActxt);
policy.preempt(mPctxt, pM);
for (ContainerId c : preemptedContainers) {
TaskAttemptId t = assignedContainers.get(c);
if (TaskType.MAP.equals(t.getTaskId().getTaskType())) {
assert policy.isPreempted(t) == false;
} else {
assert policy.isPreempted(t);
}
}
}
use of org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId in project hadoop by apache.
the class TestCheckpointPreemptionPolicy method testPreemptionContract.
@Test
public void testPreemptionContract() {
final Map<ContainerId, TaskAttemptId> containers = assignedContainers;
AMPreemptionPolicy.Context mPctxt = new AMPreemptionPolicy.Context() {
@Override
public TaskAttemptId getTaskAttempt(ContainerId cId) {
return containers.get(cId);
}
@Override
public List<Container> getContainers(TaskType t) {
List<Container> p = new ArrayList<Container>();
for (Map.Entry<ContainerId, TaskAttemptId> ent : assignedContainers.entrySet()) {
if (ent.getValue().getTaskId().getTaskType().equals(t)) {
p.add(Container.newInstance(ent.getKey(), null, null, contToResourceMap.get(ent.getKey()), Priority.newInstance(0), null));
}
}
return p;
}
};
PreemptionMessage pM = generatePreemptionMessage(preemptedContainers, contToResourceMap, Resource.newInstance(minAlloc, 1), false);
CheckpointAMPreemptionPolicy policy = new CheckpointAMPreemptionPolicy();
policy.init(mActxt);
int supposedMemPreemption = (int) pM.getContract().getResourceRequest().get(0).getResourceRequest().getCapability().getMemorySize() * pM.getContract().getResourceRequest().get(0).getResourceRequest().getNumContainers();
// first round of preemption
policy.preempt(mPctxt, pM);
List<TaskAttemptId> preempting = validatePreemption(pM, policy, supposedMemPreemption);
// redundant message
policy.preempt(mPctxt, pM);
List<TaskAttemptId> preempting2 = validatePreemption(pM, policy, supposedMemPreemption);
// check that nothing got added
assert preempting2.equals(preempting);
// simulate 2 task completions/successful preemption
policy.handleCompletedContainer(preempting.get(0));
policy.handleCompletedContainer(preempting.get(1));
// remove from assignedContainers
Iterator<Map.Entry<ContainerId, TaskAttemptId>> it = assignedContainers.entrySet().iterator();
while (it.hasNext()) {
Map.Entry<ContainerId, TaskAttemptId> ent = it.next();
if (ent.getValue().equals(preempting.get(0)) || ent.getValue().equals(preempting.get(1)))
it.remove();
}
// one more message asking for preemption
policy.preempt(mPctxt, pM);
// triggers preemption of 2 more containers (i.e., the preemption set changes)
List<TaskAttemptId> preempting3 = validatePreemption(pM, policy, supposedMemPreemption);
assert preempting3.equals(preempting2) == false;
}
use of org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId in project hadoop by apache.
the class TestFail method testFailTask.
@Test
public //The job succeeds.
void testFailTask() throws Exception {
MRApp app = new MockFirstFailingAttemptMRApp(1, 0);
Configuration conf = new Configuration();
// this test requires two task attempts, but uberization overrides max to 1
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
Job job = app.submit(conf);
app.waitForState(job, JobState.SUCCEEDED);
Map<TaskId, Task> tasks = job.getTasks();
Assert.assertEquals("Num tasks is not correct", 1, tasks.size());
Task task = tasks.values().iterator().next();
Assert.assertEquals("Task state not correct", TaskState.SUCCEEDED, task.getReport().getTaskState());
Map<TaskAttemptId, TaskAttempt> attempts = tasks.values().iterator().next().getAttempts();
Assert.assertEquals("Num attempts is not correct", 2, attempts.size());
//one attempt must be failed
//and another must have succeeded
Iterator<TaskAttempt> it = attempts.values().iterator();
Assert.assertEquals("Attempt state not correct", TaskAttemptState.FAILED, it.next().getReport().getTaskAttemptState());
Assert.assertEquals("Attempt state not correct", TaskAttemptState.SUCCEEDED, it.next().getReport().getTaskAttemptState());
}
use of org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId in project hadoop by apache.
the class TestFail method testTaskFailWithUnusedContainer.
@Test
public void testTaskFailWithUnusedContainer() throws Exception {
MRApp app = new MRAppWithFailingTaskAndUnusedContainer();
Configuration conf = new Configuration();
int maxAttempts = 1;
conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, maxAttempts);
// disable uberization (requires entire job to be reattempted, so max for
// subtask attempts is overridden to 1)
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
Job job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
Map<TaskId, Task> tasks = job.getTasks();
Assert.assertEquals("Num tasks is not correct", 1, tasks.size());
Task task = tasks.values().iterator().next();
app.waitForState(task, TaskState.SCHEDULED);
Map<TaskAttemptId, TaskAttempt> attempts = tasks.values().iterator().next().getAttempts();
Assert.assertEquals("Num attempts is not correct", maxAttempts, attempts.size());
TaskAttempt attempt = attempts.values().iterator().next();
app.waitForInternalState((TaskAttemptImpl) attempt, TaskAttemptStateInternal.ASSIGNED);
app.getDispatcher().getEventHandler().handle(new TaskAttemptEvent(attempt.getID(), TaskAttemptEventType.TA_CONTAINER_COMPLETED));
app.waitForState(job, JobState.FAILED);
}
use of org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId in project hadoop by apache.
the class TestFetchFailure method updateStatus.
private void updateStatus(MRApp app, TaskAttempt attempt, Phase phase) {
TaskAttemptStatusUpdateEvent.TaskAttemptStatus status = new TaskAttemptStatusUpdateEvent.TaskAttemptStatus();
status.counters = new Counters();
status.fetchFailedMaps = new ArrayList<TaskAttemptId>();
status.id = attempt.getID();
status.mapFinishTime = 0;
status.phase = phase;
status.progress = 0.5f;
status.shuffleFinishTime = 0;
status.sortFinishTime = 0;
status.stateString = "OK";
status.taskState = attempt.getState();
TaskAttemptStatusUpdateEvent event = new TaskAttemptStatusUpdateEvent(attempt.getID(), status);
app.getContext().getEventHandler().handle(event);
}
Aggregations