Search in sources :

Example 1 with TaskTAttemptKilledEvent

use of org.apache.hadoop.mapreduce.v2.app.job.event.TaskTAttemptKilledEvent in project hadoop by apache.

the class TestTaskAttempt method containerKillBeforeAssignment.

private void containerKillBeforeAssignment(boolean scheduleAttempt) throws Exception {
    MockEventHandler eventHandler = new MockEventHandler();
    ApplicationId appId = ApplicationId.newInstance(1, 2);
    JobId jobId = MRBuilderUtils.newJobId(appId, 1);
    TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
    TaskAttemptImpl taImpl = new MapTaskAttemptImpl(taskId, 1, eventHandler, mock(Path.class), 1, mock(TaskSplitMetaInfo.class), new JobConf(), mock(TaskAttemptListener.class), mock(Token.class), new Credentials(), SystemClock.getInstance(), mock(AppContext.class));
    if (scheduleAttempt) {
        taImpl.handle(new TaskAttemptEvent(taImpl.getID(), TaskAttemptEventType.TA_SCHEDULE));
    }
    taImpl.handle(new TaskAttemptKillEvent(taImpl.getID(), "", true));
    assertEquals("Task attempt is not in KILLED state", taImpl.getState(), TaskAttemptState.KILLED);
    assertEquals("Task attempt's internal state is not KILLED", taImpl.getInternalState(), TaskAttemptStateInternal.KILLED);
    assertFalse("InternalError occurred", eventHandler.internalError);
    TaskEvent event = eventHandler.lastTaskEvent;
    assertEquals(TaskEventType.T_ATTEMPT_KILLED, event.getType());
    // In NEW state, new map attempt should not be rescheduled.
    assertFalse(((TaskTAttemptKilledEvent) event).getRescheduleAttempt());
}
Also used : Path(org.apache.hadoop.fs.Path) TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) TaskAttemptListener(org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener) AppContext(org.apache.hadoop.mapreduce.v2.app.AppContext) MapTaskAttemptImpl(org.apache.hadoop.mapred.MapTaskAttemptImpl) Token(org.apache.hadoop.security.token.Token) TaskAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent) TaskEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent) TaskSplitMetaInfo(org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) MapTaskAttemptImpl(org.apache.hadoop.mapred.MapTaskAttemptImpl) TaskAttemptKillEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptKillEvent) JobConf(org.apache.hadoop.mapred.JobConf) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) Credentials(org.apache.hadoop.security.Credentials)

Example 2 with TaskTAttemptKilledEvent

use of org.apache.hadoop.mapreduce.v2.app.job.event.TaskTAttemptKilledEvent in project hadoop by apache.

the class TestTaskImpl method testFailedTransitions.

@Test
public void testFailedTransitions() {
    mockTask = new MockTaskImpl(jobId, partition, dispatcher.getEventHandler(), remoteJobConfFile, conf, taskAttemptListener, jobToken, credentials, clock, startCount, metrics, appContext, TaskType.MAP) {

        @Override
        protected int getMaxAttempts() {
            return 1;
        }
    };
    TaskId taskId = getNewTaskID();
    scheduleTaskAttempt(taskId);
    launchTaskAttempt(getLastAttempt().getAttemptId());
    // add three more speculative attempts
    mockTask.handle(new TaskTAttemptEvent(getLastAttempt().getAttemptId(), TaskEventType.T_ADD_SPEC_ATTEMPT));
    launchTaskAttempt(getLastAttempt().getAttemptId());
    mockTask.handle(new TaskTAttemptEvent(getLastAttempt().getAttemptId(), TaskEventType.T_ADD_SPEC_ATTEMPT));
    launchTaskAttempt(getLastAttempt().getAttemptId());
    mockTask.handle(new TaskTAttemptEvent(getLastAttempt().getAttemptId(), TaskEventType.T_ADD_SPEC_ATTEMPT));
    launchTaskAttempt(getLastAttempt().getAttemptId());
    assertEquals(4, taskAttempts.size());
    // have the first attempt fail, verify task failed due to no retries
    MockTaskAttemptImpl taskAttempt = taskAttempts.get(0);
    taskAttempt.setState(TaskAttemptState.FAILED);
    mockTask.handle(new TaskTAttemptEvent(taskAttempt.getAttemptId(), TaskEventType.T_ATTEMPT_FAILED));
    assertEquals(TaskState.FAILED, mockTask.getState());
    // verify task can no longer be killed
    mockTask.handle(new TaskEvent(taskId, TaskEventType.T_KILL));
    assertEquals(TaskState.FAILED, mockTask.getState());
    // verify speculative doesn't launch new tasks
    mockTask.handle(new TaskTAttemptEvent(getLastAttempt().getAttemptId(), TaskEventType.T_ADD_SPEC_ATTEMPT));
    mockTask.handle(new TaskTAttemptEvent(getLastAttempt().getAttemptId(), TaskEventType.T_ATTEMPT_LAUNCHED));
    assertEquals(TaskState.FAILED, mockTask.getState());
    assertEquals(4, taskAttempts.size());
    // verify attempt events from active tasks don't knock task out of FAILED
    taskAttempt = taskAttempts.get(1);
    taskAttempt.setState(TaskAttemptState.COMMIT_PENDING);
    mockTask.handle(new TaskTAttemptEvent(taskAttempt.getAttemptId(), TaskEventType.T_ATTEMPT_COMMIT_PENDING));
    assertEquals(TaskState.FAILED, mockTask.getState());
    taskAttempt.setState(TaskAttemptState.FAILED);
    mockTask.handle(new TaskTAttemptEvent(taskAttempt.getAttemptId(), TaskEventType.T_ATTEMPT_FAILED));
    assertEquals(TaskState.FAILED, mockTask.getState());
    taskAttempt = taskAttempts.get(2);
    taskAttempt.setState(TaskAttemptState.SUCCEEDED);
    mockTask.handle(new TaskTAttemptEvent(taskAttempt.getAttemptId(), TaskEventType.T_ATTEMPT_SUCCEEDED));
    assertEquals(TaskState.FAILED, mockTask.getState());
    taskAttempt = taskAttempts.get(3);
    taskAttempt.setState(TaskAttemptState.KILLED);
    mockTask.handle(new TaskTAttemptKilledEvent(taskAttempt.getAttemptId(), false));
    assertEquals(TaskState.FAILED, mockTask.getState());
}
Also used : TaskTAttemptKilledEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskTAttemptKilledEvent) TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) TaskEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent) TaskTAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskTAttemptEvent) Test(org.junit.Test)

Example 3 with TaskTAttemptKilledEvent

use of org.apache.hadoop.mapreduce.v2.app.job.event.TaskTAttemptKilledEvent in project hadoop by apache.

the class TestTaskImpl method killRunningTaskAttempt.

private void killRunningTaskAttempt(TaskAttemptId attemptId, boolean reschedule) {
    mockTask.handle(new TaskTAttemptKilledEvent(attemptId, reschedule));
    assertTaskRunningState();
}
Also used : TaskTAttemptKilledEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskTAttemptKilledEvent)

Example 4 with TaskTAttemptKilledEvent

use of org.apache.hadoop.mapreduce.v2.app.job.event.TaskTAttemptKilledEvent in project hadoop by apache.

the class TestTaskImpl method killScheduledTaskAttempt.

private void killScheduledTaskAttempt(TaskAttemptId attemptId, boolean reschedule) {
    mockTask.handle(new TaskTAttemptKilledEvent(attemptId, reschedule));
    assertTaskScheduledState();
}
Also used : TaskTAttemptKilledEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskTAttemptKilledEvent)

Example 5 with TaskTAttemptKilledEvent

use of org.apache.hadoop.mapreduce.v2.app.job.event.TaskTAttemptKilledEvent in project hadoop by apache.

the class TestTaskAttempt method testKillMapTaskAfterSuccess.

@Test
public void testKillMapTaskAfterSuccess() throws Exception {
    MockEventHandler eventHandler = new MockEventHandler();
    TaskAttemptImpl taImpl = createTaskAttemptImpl(eventHandler);
    taImpl.handle(new TaskAttemptEvent(taImpl.getID(), TaskAttemptEventType.TA_DONE));
    assertEquals("Task attempt is not in SUCCEEDED state", taImpl.getState(), TaskAttemptState.SUCCEEDED);
    assertEquals("Task attempt's internal state is not " + "SUCCESS_FINISHING_CONTAINER", taImpl.getInternalState(), TaskAttemptStateInternal.SUCCESS_FINISHING_CONTAINER);
    taImpl.handle(new TaskAttemptEvent(taImpl.getID(), TaskAttemptEventType.TA_CONTAINER_CLEANED));
    // Send a map task attempt kill event indicating next map attempt has to be
    // reschedule
    taImpl.handle(new TaskAttemptKillEvent(taImpl.getID(), "", true));
    assertEquals("Task attempt is not in KILLED state", taImpl.getState(), TaskAttemptState.KILLED);
    assertEquals("Task attempt's internal state is not KILLED", taImpl.getInternalState(), TaskAttemptStateInternal.KILLED);
    assertFalse("InternalError occurred", eventHandler.internalError);
    TaskEvent event = eventHandler.lastTaskEvent;
    assertEquals(TaskEventType.T_ATTEMPT_KILLED, event.getType());
    // Send an attempt killed event to TaskImpl forwarding the same reschedule
    // flag we received in task attempt kill event.
    assertTrue(((TaskTAttemptKilledEvent) event).getRescheduleAttempt());
}
Also used : TaskEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent) MapTaskAttemptImpl(org.apache.hadoop.mapred.MapTaskAttemptImpl) TaskAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent) TaskAttemptKillEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptKillEvent) Test(org.junit.Test)

Aggregations

TaskTAttemptKilledEvent (org.apache.hadoop.mapreduce.v2.app.job.event.TaskTAttemptKilledEvent)4 TaskId (org.apache.hadoop.mapreduce.v2.api.records.TaskId)3 TaskEvent (org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent)3 Test (org.junit.Test)3 MapTaskAttemptImpl (org.apache.hadoop.mapred.MapTaskAttemptImpl)2 TaskAttemptEvent (org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent)2 TaskAttemptKillEvent (org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptKillEvent)2 TaskTAttemptEvent (org.apache.hadoop.mapreduce.v2.app.job.event.TaskTAttemptEvent)2 Path (org.apache.hadoop.fs.Path)1 JobConf (org.apache.hadoop.mapred.JobConf)1 TaskSplitMetaInfo (org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo)1 JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)1 AppContext (org.apache.hadoop.mapreduce.v2.app.AppContext)1 TaskAttemptListener (org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener)1 Credentials (org.apache.hadoop.security.Credentials)1 Token (org.apache.hadoop.security.token.Token)1 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)1