use of org.apache.hadoop.mapreduce.v2.app.job.Job in project hadoop by apache.
the class TestFail method testFailTask.
@Test
public //The job succeeds.
void testFailTask() throws Exception {
MRApp app = new MockFirstFailingAttemptMRApp(1, 0);
Configuration conf = new Configuration();
// this test requires two task attempts, but uberization overrides max to 1
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
Job job = app.submit(conf);
app.waitForState(job, JobState.SUCCEEDED);
Map<TaskId, Task> tasks = job.getTasks();
Assert.assertEquals("Num tasks is not correct", 1, tasks.size());
Task task = tasks.values().iterator().next();
Assert.assertEquals("Task state not correct", TaskState.SUCCEEDED, task.getReport().getTaskState());
Map<TaskAttemptId, TaskAttempt> attempts = tasks.values().iterator().next().getAttempts();
Assert.assertEquals("Num attempts is not correct", 2, attempts.size());
//one attempt must be failed
//and another must have succeeded
Iterator<TaskAttempt> it = attempts.values().iterator();
Assert.assertEquals("Attempt state not correct", TaskAttemptState.FAILED, it.next().getReport().getTaskAttemptState());
Assert.assertEquals("Attempt state not correct", TaskAttemptState.SUCCEEDED, it.next().getReport().getTaskAttemptState());
}
use of org.apache.hadoop.mapreduce.v2.app.job.Job in project hadoop by apache.
the class TestFail method testTaskFailWithUnusedContainer.
@Test
public void testTaskFailWithUnusedContainer() throws Exception {
MRApp app = new MRAppWithFailingTaskAndUnusedContainer();
Configuration conf = new Configuration();
int maxAttempts = 1;
conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, maxAttempts);
// disable uberization (requires entire job to be reattempted, so max for
// subtask attempts is overridden to 1)
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
Job job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
Map<TaskId, Task> tasks = job.getTasks();
Assert.assertEquals("Num tasks is not correct", 1, tasks.size());
Task task = tasks.values().iterator().next();
app.waitForState(task, TaskState.SCHEDULED);
Map<TaskAttemptId, TaskAttempt> attempts = tasks.values().iterator().next().getAttempts();
Assert.assertEquals("Num attempts is not correct", maxAttempts, attempts.size());
TaskAttempt attempt = attempts.values().iterator().next();
app.waitForInternalState((TaskAttemptImpl) attempt, TaskAttemptStateInternal.ASSIGNED);
app.getDispatcher().getEventHandler().handle(new TaskAttemptEvent(attempt.getID(), TaskAttemptEventType.TA_CONTAINER_COMPLETED));
app.waitForState(job, JobState.FAILED);
}
use of org.apache.hadoop.mapreduce.v2.app.job.Job in project hadoop by apache.
the class TestFetchFailure method testFetchFailure.
@Test
public void testFetchFailure() throws Exception {
MRApp app = new MRApp(1, 1, false, this.getClass().getName(), true);
Configuration conf = new Configuration();
// map -> reduce -> fetch-failure -> map retry is incompatible with
// sequential, single-task-attempt approach in uber-AM, so disable:
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
Job job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
//all maps would be running
Assert.assertEquals("Num tasks not correct", 2, job.getTasks().size());
Iterator<Task> it = job.getTasks().values().iterator();
Task mapTask = it.next();
Task reduceTask = it.next();
//wait for Task state move to RUNNING
app.waitForState(mapTask, TaskState.RUNNING);
TaskAttempt mapAttempt1 = mapTask.getAttempts().values().iterator().next();
app.waitForState(mapAttempt1, TaskAttemptState.RUNNING);
//send the done signal to the map attempt
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapAttempt1.getID(), TaskAttemptEventType.TA_DONE));
// wait for map success
app.waitForState(mapTask, TaskState.SUCCEEDED);
final int checkIntervalMillis = 10;
final int waitForMillis = 800;
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
TaskAttemptCompletionEvent[] events = job.getTaskAttemptCompletionEvents(0, 100);
return events.length >= 1;
}
}, checkIntervalMillis, waitForMillis);
TaskAttemptCompletionEvent[] events = job.getTaskAttemptCompletionEvents(0, 100);
Assert.assertEquals("Num completion events not correct", 1, events.length);
Assert.assertEquals("Event status not correct", TaskAttemptCompletionEventStatus.SUCCEEDED, events[0].getStatus());
// wait for reduce to start running
app.waitForState(reduceTask, TaskState.RUNNING);
TaskAttempt reduceAttempt = reduceTask.getAttempts().values().iterator().next();
app.waitForState(reduceAttempt, TaskAttemptState.RUNNING);
//send 3 fetch failures from reduce to trigger map re execution
sendFetchFailure(app, reduceAttempt, mapAttempt1, "host");
sendFetchFailure(app, reduceAttempt, mapAttempt1, "host");
sendFetchFailure(app, reduceAttempt, mapAttempt1, "host");
//wait for map Task state move back to RUNNING
app.waitForState(mapTask, TaskState.RUNNING);
//map attempt must have become FAILED
Assert.assertEquals("Map TaskAttempt state not correct", TaskAttemptState.FAILED, mapAttempt1.getState());
Assert.assertEquals("Num attempts in Map Task not correct", 2, mapTask.getAttempts().size());
Iterator<TaskAttempt> atIt = mapTask.getAttempts().values().iterator();
atIt.next();
TaskAttempt mapAttempt2 = atIt.next();
app.waitForState(mapAttempt2, TaskAttemptState.RUNNING);
//send the done signal to the second map attempt
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapAttempt2.getID(), TaskAttemptEventType.TA_DONE));
// wait for map success
app.waitForState(mapTask, TaskState.SUCCEEDED);
//send done to reduce
app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceAttempt.getID(), TaskAttemptEventType.TA_DONE));
app.waitForState(job, JobState.SUCCEEDED);
//previous completion event now becomes obsolete
Assert.assertEquals("Event status not correct", TaskAttemptCompletionEventStatus.OBSOLETE, events[0].getStatus());
events = job.getTaskAttemptCompletionEvents(0, 100);
Assert.assertEquals("Num completion events not correct", 4, events.length);
Assert.assertEquals("Event map attempt id not correct", mapAttempt1.getID(), events[0].getAttemptId());
Assert.assertEquals("Event map attempt id not correct", mapAttempt1.getID(), events[1].getAttemptId());
Assert.assertEquals("Event map attempt id not correct", mapAttempt2.getID(), events[2].getAttemptId());
Assert.assertEquals("Event redude attempt id not correct", reduceAttempt.getID(), events[3].getAttemptId());
Assert.assertEquals("Event status not correct for map attempt1", TaskAttemptCompletionEventStatus.OBSOLETE, events[0].getStatus());
Assert.assertEquals("Event status not correct for map attempt1", TaskAttemptCompletionEventStatus.FAILED, events[1].getStatus());
Assert.assertEquals("Event status not correct for map attempt2", TaskAttemptCompletionEventStatus.SUCCEEDED, events[2].getStatus());
Assert.assertEquals("Event status not correct for reduce attempt1", TaskAttemptCompletionEventStatus.SUCCEEDED, events[3].getStatus());
TaskCompletionEvent[] mapEvents = job.getMapAttemptCompletionEvents(0, 2);
TaskCompletionEvent[] convertedEvents = TypeConverter.fromYarn(events);
Assert.assertEquals("Incorrect number of map events", 2, mapEvents.length);
Assert.assertArrayEquals("Unexpected map events", Arrays.copyOfRange(convertedEvents, 0, 2), mapEvents);
mapEvents = job.getMapAttemptCompletionEvents(2, 200);
Assert.assertEquals("Incorrect number of map events", 1, mapEvents.length);
Assert.assertEquals("Unexpected map event", convertedEvents[2], mapEvents[0]);
}
use of org.apache.hadoop.mapreduce.v2.app.job.Job in project hadoop by apache.
the class TestFetchFailure method testFetchFailureWithRecovery.
/**
* This tests that if a map attempt was failed (say due to fetch failures),
* then it gets re-run. When the next map attempt is running, if the AM dies,
* then, on AM re-run, the AM does not incorrectly remember the first failed
* attempt. Currently recovery does not recover running tasks. Effectively,
* the AM re-runs the maps from scratch.
*/
@Test
public void testFetchFailureWithRecovery() throws Exception {
int runCount = 0;
MRApp app = new MRAppWithHistory(1, 1, false, this.getClass().getName(), true, ++runCount);
Configuration conf = new Configuration();
// map -> reduce -> fetch-failure -> map retry is incompatible with
// sequential, single-task-attempt approach in uber-AM, so disable:
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
Job job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
//all maps would be running
Assert.assertEquals("Num tasks not correct", 2, job.getTasks().size());
Iterator<Task> it = job.getTasks().values().iterator();
Task mapTask = it.next();
Task reduceTask = it.next();
//wait for Task state move to RUNNING
app.waitForState(mapTask, TaskState.RUNNING);
TaskAttempt mapAttempt1 = mapTask.getAttempts().values().iterator().next();
app.waitForState(mapAttempt1, TaskAttemptState.RUNNING);
//send the done signal to the map attempt
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapAttempt1.getID(), TaskAttemptEventType.TA_DONE));
// wait for map success
app.waitForState(mapTask, TaskState.SUCCEEDED);
TaskAttemptCompletionEvent[] events = job.getTaskAttemptCompletionEvents(0, 100);
Assert.assertEquals("Num completion events not correct", 1, events.length);
Assert.assertEquals("Event status not correct", TaskAttemptCompletionEventStatus.SUCCEEDED, events[0].getStatus());
// wait for reduce to start running
app.waitForState(reduceTask, TaskState.RUNNING);
TaskAttempt reduceAttempt = reduceTask.getAttempts().values().iterator().next();
app.waitForState(reduceAttempt, TaskAttemptState.RUNNING);
//send 3 fetch failures from reduce to trigger map re execution
sendFetchFailure(app, reduceAttempt, mapAttempt1, "host");
sendFetchFailure(app, reduceAttempt, mapAttempt1, "host");
sendFetchFailure(app, reduceAttempt, mapAttempt1, "host");
//wait for map Task state move back to RUNNING
app.waitForState(mapTask, TaskState.RUNNING);
// Crash the app again.
app.stop();
//rerun
app = new MRAppWithHistory(1, 1, false, this.getClass().getName(), false, ++runCount);
conf = new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE, true);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
//all maps would be running
Assert.assertEquals("Num tasks not correct", 2, job.getTasks().size());
it = job.getTasks().values().iterator();
mapTask = it.next();
reduceTask = it.next();
// the map is not in a SUCCEEDED state after restart of AM
app.waitForState(mapTask, TaskState.RUNNING);
mapAttempt1 = mapTask.getAttempts().values().iterator().next();
app.waitForState(mapAttempt1, TaskAttemptState.RUNNING);
//send the done signal to the map attempt
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapAttempt1.getID(), TaskAttemptEventType.TA_DONE));
// wait for map success
app.waitForState(mapTask, TaskState.SUCCEEDED);
reduceAttempt = reduceTask.getAttempts().values().iterator().next();
//send done to reduce
app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceAttempt.getID(), TaskAttemptEventType.TA_DONE));
app.waitForState(job, JobState.SUCCEEDED);
events = job.getTaskAttemptCompletionEvents(0, 100);
Assert.assertEquals("Num completion events not correct", 2, events.length);
}
use of org.apache.hadoop.mapreduce.v2.app.job.Job in project hadoop by apache.
the class JHEventHandlerForSigtermTest method mockJob.
private Job mockJob() {
Job mockJob = mock(Job.class);
when(mockJob.getAllCounters()).thenReturn(new Counters());
when(mockJob.getTotalMaps()).thenReturn(10);
when(mockJob.getTotalReduces()).thenReturn(10);
when(mockJob.getName()).thenReturn("mockjob");
return mockJob;
}
Aggregations