Search in sources :

Example 6 with TaskState

use of org.apache.hadoop.mapreduce.v2.api.records.TaskState in project hadoop by apache.

the class TestRecovery method recoveryChecker.

private void recoveryChecker(MapTaskImpl checkTask, TaskState finalState, Map<TaskAttemptID, TaskAttemptState> finalAttemptStates, ArgumentCaptor<Event> arg, List<EventType> expectedJobHistoryEvents, long expectedMapLaunches, long expectedFailedMaps) {
    assertEquals("Final State of Task", finalState, checkTask.getState());
    Map<TaskAttemptId, TaskAttempt> recoveredAttempts = checkTask.getAttempts();
    assertEquals("Expected Number of Task Attempts", finalAttemptStates.size(), recoveredAttempts.size());
    for (TaskAttemptID taID : finalAttemptStates.keySet()) {
        assertEquals("Expected Task Attempt State", finalAttemptStates.get(taID), recoveredAttempts.get(TypeConverter.toYarn(taID)).getState());
    }
    Iterator<Event> ie = arg.getAllValues().iterator();
    int eventNum = 0;
    long totalLaunchedMaps = 0;
    long totalFailedMaps = 0;
    boolean jobTaskEventReceived = false;
    while (ie.hasNext()) {
        Object current = ie.next();
        ++eventNum;
        LOG.info(eventNum + " " + current.getClass().getName());
        if (current instanceof JobHistoryEvent) {
            JobHistoryEvent jhe = (JobHistoryEvent) current;
            LOG.info(expectedJobHistoryEvents.get(0).toString() + " " + jhe.getHistoryEvent().getEventType().toString() + " " + jhe.getJobID());
            assertEquals(expectedJobHistoryEvents.get(0), jhe.getHistoryEvent().getEventType());
            expectedJobHistoryEvents.remove(0);
        } else if (current instanceof JobCounterUpdateEvent) {
            JobCounterUpdateEvent jcue = (JobCounterUpdateEvent) current;
            boolean containsUpdates = jcue.getCounterUpdates().size() > 0;
            // TaskAttempt recovery. Check that first.
            if (containsUpdates) {
                LOG.info("JobCounterUpdateEvent " + jcue.getCounterUpdates().get(0).getCounterKey() + " " + jcue.getCounterUpdates().get(0).getIncrementValue());
                if (jcue.getCounterUpdates().get(0).getCounterKey() == JobCounter.NUM_FAILED_MAPS) {
                    totalFailedMaps += jcue.getCounterUpdates().get(0).getIncrementValue();
                } else if (jcue.getCounterUpdates().get(0).getCounterKey() == JobCounter.TOTAL_LAUNCHED_MAPS) {
                    totalLaunchedMaps += jcue.getCounterUpdates().get(0).getIncrementValue();
                }
            }
        } else if (current instanceof JobTaskEvent) {
            JobTaskEvent jte = (JobTaskEvent) current;
            assertEquals(jte.getState(), finalState);
            jobTaskEventReceived = true;
        }
    }
    assertTrue(jobTaskEventReceived || (finalState == TaskState.RUNNING));
    assertEquals("Did not process all expected JobHistoryEvents", 0, expectedJobHistoryEvents.size());
    assertEquals("Expected Map Launches", expectedMapLaunches, totalLaunchedMaps);
    assertEquals("Expected Failed Maps", expectedFailedMaps, totalFailedMaps);
}
Also used : TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) TaskAttemptID(org.apache.hadoop.mapreduce.TaskAttemptID) JobHistoryEvent(org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent) JobCounterUpdateEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent) JobTaskEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskEvent) TaskAttemptContainerLaunchedEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerLaunchedEvent) Event(org.apache.hadoop.mapreduce.jobhistory.Event) TaskRecoverEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskRecoverEvent) JobTaskEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskEvent) JobHistoryEvent(org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent) JobCounterUpdateEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent) ContainerLauncherEvent(org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherEvent) TaskAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent) TaskEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt)

Example 7 with TaskState

use of org.apache.hadoop.mapreduce.v2.api.records.TaskState in project hadoop by apache.

the class TestTaskImpl method testKillRunningTaskAttempt.

@Test
public /**
   * Kill running attempt
   * {@link TaskState#RUNNING}->{@link TaskState#RUNNING} 
   */
void testKillRunningTaskAttempt() {
    LOG.info("--- START: testKillRunningTaskAttempt ---");
    mockTask = createMockTask(TaskType.MAP);
    TaskId taskId = getNewTaskID();
    scheduleTaskAttempt(taskId);
    launchTaskAttempt(getLastAttempt().getAttemptId());
    killRunningTaskAttempt(getLastAttempt().getAttemptId(), true);
    assertEquals(TaskAttemptEventType.TA_RESCHEDULE, taskAttemptEventHandler.lastTaskAttemptEvent.getType());
}
Also used : TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) Test(org.junit.Test)

Example 8 with TaskState

use of org.apache.hadoop.mapreduce.v2.api.records.TaskState in project hadoop by apache.

the class TestTaskImpl method testLaunchTaskAttempt.

@Test
public /**
   * Launch attempt
   * {@link TaskState#SCHEDULED}->{@link TaskState#RUNNING}
   */
void testLaunchTaskAttempt() {
    LOG.info("--- START: testLaunchTaskAttempt ---");
    mockTask = createMockTask(TaskType.MAP);
    TaskId taskId = getNewTaskID();
    scheduleTaskAttempt(taskId);
    launchTaskAttempt(getLastAttempt().getAttemptId());
}
Also used : TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) Test(org.junit.Test)

Example 9 with TaskState

use of org.apache.hadoop.mapreduce.v2.api.records.TaskState in project hadoop by apache.

the class TestTaskImpl method testKillAttemptForSuccessfulTask.

@Test
public /**
   * Kill map attempt for succeeded map task
   * {@link TaskState#SUCCEEDED}->{@link TaskState#SCHEDULED}
   */
void testKillAttemptForSuccessfulTask() {
    LOG.info("--- START: testKillAttemptForSuccessfulTask ---");
    mockTask = createMockTask(TaskType.MAP);
    TaskId taskId = getNewTaskID();
    scheduleTaskAttempt(taskId);
    launchTaskAttempt(getLastAttempt().getAttemptId());
    commitTaskAttempt(getLastAttempt().getAttemptId());
    mockTask.handle(new TaskTAttemptEvent(getLastAttempt().getAttemptId(), TaskEventType.T_ATTEMPT_SUCCEEDED));
    assertTaskSucceededState();
    mockTask.handle(new TaskTAttemptKilledEvent(getLastAttempt().getAttemptId(), true));
    assertEquals(TaskAttemptEventType.TA_RESCHEDULE, taskAttemptEventHandler.lastTaskAttemptEvent.getType());
    assertTaskScheduledState();
}
Also used : TaskTAttemptKilledEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskTAttemptKilledEvent) TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) TaskTAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskTAttemptEvent) Test(org.junit.Test)

Example 10 with TaskState

use of org.apache.hadoop.mapreduce.v2.api.records.TaskState in project hadoop by apache.

the class TestTaskImpl method testKillScheduledTask.

@Test
public /**
   * {@link TaskState#SCHEDULED}->{@link TaskState#KILL_WAIT}
   */
void testKillScheduledTask() {
    LOG.info("--- START: testKillScheduledTask ---");
    mockTask = createMockTask(TaskType.MAP);
    TaskId taskId = getNewTaskID();
    scheduleTaskAttempt(taskId);
    killTask(taskId);
}
Also used : TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) Test(org.junit.Test)

Aggregations

TaskId (org.apache.hadoop.mapreduce.v2.api.records.TaskId)7 Test (org.junit.Test)6 Counters (org.apache.hadoop.mapreduce.Counters)2 TaskAttemptID (org.apache.hadoop.mapreduce.TaskAttemptID)2 JobHistoryEvent (org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent)2 TaskAttemptId (org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId)2 TaskReport (org.apache.hadoop.mapreduce.v2.api.records.TaskReport)2 TaskAttempt (org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt)2 JobTaskEvent (org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskEvent)2 IOException (java.io.IOException)1 ArrayList (java.util.ArrayList)1 FileSystem (org.apache.hadoop.fs.FileSystem)1 Path (org.apache.hadoop.fs.Path)1 RemoteException (org.apache.hadoop.ipc.RemoteException)1 JobConf (org.apache.hadoop.mapred.JobConf)1 Job (org.apache.hadoop.mapreduce.Job)1 JobID (org.apache.hadoop.mapreduce.JobID)1 JobPriority (org.apache.hadoop.mapreduce.JobPriority)1 JobStatus (org.apache.hadoop.mapreduce.JobStatus)1 Event (org.apache.hadoop.mapreduce.jobhistory.Event)1