Search in sources :

Example 11 with JobImpl

use of org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl in project hadoop by apache.

the class TestJobImpl method testUnusableNodeTransition.

@Test(timeout = 20000)
public void testUnusableNodeTransition() throws Exception {
    Configuration conf = new Configuration();
    conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
    conf.setInt(MRJobConfig.NUM_REDUCES, 1);
    DrainDispatcher dispatcher = new DrainDispatcher();
    dispatcher.init(conf);
    dispatcher.start();
    CyclicBarrier syncBarrier = new CyclicBarrier(2);
    OutputCommitter committer = new TestingOutputCommitter(syncBarrier, true);
    CommitterEventHandler commitHandler = createCommitterEventHandler(dispatcher, committer);
    commitHandler.init(conf);
    commitHandler.start();
    final JobImpl job = createRunningStubbedJob(conf, dispatcher, 2, null);
    // add a special task event handler to put the task back to running in case
    // of task rescheduling/killing
    EventHandler<TaskAttemptEvent> taskAttemptEventHandler = new EventHandler<TaskAttemptEvent>() {

        @Override
        public void handle(TaskAttemptEvent event) {
            if (event.getType() == TaskAttemptEventType.TA_KILL) {
                job.decrementSucceededMapperCount();
            }
        }
    };
    dispatcher.register(TaskAttemptEventType.class, taskAttemptEventHandler);
    // replace the tasks with spied versions to return the right attempts
    Map<TaskId, Task> spiedTasks = new HashMap<TaskId, Task>();
    List<NodeReport> nodeReports = new ArrayList<NodeReport>();
    Map<NodeReport, TaskId> nodeReportsToTaskIds = new HashMap<NodeReport, TaskId>();
    for (Map.Entry<TaskId, Task> e : job.tasks.entrySet()) {
        TaskId taskId = e.getKey();
        Task task = e.getValue();
        if (taskId.getTaskType() == TaskType.MAP) {
            // add an attempt to the task to simulate nodes
            NodeId nodeId = mock(NodeId.class);
            TaskAttempt attempt = mock(TaskAttempt.class);
            when(attempt.getNodeId()).thenReturn(nodeId);
            TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
            when(attempt.getID()).thenReturn(attemptId);
            // create a spied task
            Task spied = spy(task);
            doReturn(attempt).when(spied).getAttempt(any(TaskAttemptId.class));
            spiedTasks.put(taskId, spied);
            // create a NodeReport based on the node id
            NodeReport report = mock(NodeReport.class);
            when(report.getNodeState()).thenReturn(NodeState.UNHEALTHY);
            when(report.getNodeId()).thenReturn(nodeId);
            nodeReports.add(report);
            nodeReportsToTaskIds.put(report, taskId);
        }
    }
    // replace the tasks with the spied tasks
    job.tasks.putAll(spiedTasks);
    // complete all mappers first
    for (TaskId taskId : job.tasks.keySet()) {
        if (taskId.getTaskType() == TaskType.MAP) {
            // generate a task attempt completed event first to populate the
            // nodes-to-succeeded-attempts map
            TaskAttemptCompletionEvent tce = Records.newRecord(TaskAttemptCompletionEvent.class);
            TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
            tce.setAttemptId(attemptId);
            tce.setStatus(TaskAttemptCompletionEventStatus.SUCCEEDED);
            job.handle(new JobTaskAttemptCompletedEvent(tce));
            // complete the task itself
            job.handle(new JobTaskEvent(taskId, TaskState.SUCCEEDED));
            Assert.assertEquals(JobState.RUNNING, job.getState());
        }
    }
    // add an event for a node transition
    NodeReport firstMapperNodeReport = nodeReports.get(0);
    NodeReport secondMapperNodeReport = nodeReports.get(1);
    job.handle(new JobUpdatedNodesEvent(job.getID(), Collections.singletonList(firstMapperNodeReport)));
    dispatcher.await();
    // complete the reducer
    for (TaskId taskId : job.tasks.keySet()) {
        if (taskId.getTaskType() == TaskType.REDUCE) {
            job.handle(new JobTaskEvent(taskId, TaskState.SUCCEEDED));
        }
    }
    // add another event for a node transition for the other mapper
    // this should not trigger rescheduling
    job.handle(new JobUpdatedNodesEvent(job.getID(), Collections.singletonList(secondMapperNodeReport)));
    // complete the first mapper that was rescheduled
    TaskId firstMapper = nodeReportsToTaskIds.get(firstMapperNodeReport);
    job.handle(new JobTaskEvent(firstMapper, TaskState.SUCCEEDED));
    // verify the state is moving to committing
    assertJobState(job, JobStateInternal.COMMITTING);
    // let the committer complete and verify the job succeeds
    syncBarrier.await();
    assertJobState(job, JobStateInternal.SUCCEEDED);
    dispatcher.stop();
    commitHandler.stop();
}
Also used : DrainDispatcher(org.apache.hadoop.yarn.event.DrainDispatcher) Task(org.apache.hadoop.mapreduce.v2.app.job.Task) TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) Configuration(org.apache.hadoop.conf.Configuration) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) CommitterEventHandler(org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventHandler) EventHandler(org.apache.hadoop.yarn.event.EventHandler) TaskAttemptCompletionEvent(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent) JobTaskEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskEvent) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt) OutputCommitter(org.apache.hadoop.mapreduce.OutputCommitter) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) TaskAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent) CyclicBarrier(java.util.concurrent.CyclicBarrier) JobTaskAttemptCompletedEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskAttemptCompletedEvent) NodeId(org.apache.hadoop.yarn.api.records.NodeId) CommitterEventHandler(org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventHandler) Map(java.util.Map) HashMap(java.util.HashMap) JobUpdatedNodesEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobUpdatedNodesEvent) NodeReport(org.apache.hadoop.yarn.api.records.NodeReport) Test(org.junit.Test)

Example 12 with JobImpl

use of org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl in project hadoop by apache.

the class TestJobImpl method testKilledDuringCommit.

@Test(timeout = 20000)
public void testKilledDuringCommit() throws Exception {
    Configuration conf = new Configuration();
    conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
    AsyncDispatcher dispatcher = new AsyncDispatcher();
    dispatcher.init(conf);
    dispatcher.start();
    CyclicBarrier syncBarrier = new CyclicBarrier(2);
    OutputCommitter committer = new WaitingOutputCommitter(syncBarrier, true);
    CommitterEventHandler commitHandler = createCommitterEventHandler(dispatcher, committer);
    commitHandler.init(conf);
    commitHandler.start();
    JobImpl job = createRunningStubbedJob(conf, dispatcher, 2, null);
    completeJobTasks(job);
    assertJobState(job, JobStateInternal.COMMITTING);
    syncBarrier.await();
    job.handle(new JobEvent(job.getID(), JobEventType.JOB_KILL));
    assertJobState(job, JobStateInternal.KILLED);
    dispatcher.stop();
    commitHandler.stop();
}
Also used : OutputCommitter(org.apache.hadoop.mapreduce.OutputCommitter) Configuration(org.apache.hadoop.conf.Configuration) AsyncDispatcher(org.apache.hadoop.yarn.event.AsyncDispatcher) JobEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent) CommitterEventHandler(org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventHandler) CyclicBarrier(java.util.concurrent.CyclicBarrier) Test(org.junit.Test)

Example 13 with JobImpl

use of org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl in project hadoop by apache.

the class TestJobImpl method testRebootedDuringSetup.

@Test(timeout = 20000)
public void testRebootedDuringSetup() throws Exception {
    Configuration conf = new Configuration();
    conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
    AsyncDispatcher dispatcher = new AsyncDispatcher();
    dispatcher.init(conf);
    dispatcher.start();
    OutputCommitter committer = new StubbedOutputCommitter() {

        @Override
        public synchronized void setupJob(JobContext jobContext) throws IOException {
            while (!Thread.interrupted()) {
                try {
                    wait();
                } catch (InterruptedException e) {
                }
            }
        }
    };
    CommitterEventHandler commitHandler = createCommitterEventHandler(dispatcher, committer);
    commitHandler.init(conf);
    commitHandler.start();
    AppContext mockContext = mock(AppContext.class);
    when(mockContext.isLastAMRetry()).thenReturn(false);
    JobImpl job = createStubbedJob(conf, dispatcher, 2, mockContext);
    JobId jobId = job.getID();
    job.handle(new JobEvent(jobId, JobEventType.JOB_INIT));
    assertJobState(job, JobStateInternal.INITED);
    job.handle(new JobStartEvent(jobId));
    assertJobState(job, JobStateInternal.SETUP);
    job.handle(new JobEvent(job.getID(), JobEventType.JOB_AM_REBOOT));
    assertJobState(job, JobStateInternal.REBOOT);
    // return the external state as RUNNING since otherwise JobClient will
    // exit when it polls the AM for job state
    Assert.assertEquals(JobState.RUNNING, job.getState());
    dispatcher.stop();
    commitHandler.stop();
}
Also used : OutputCommitter(org.apache.hadoop.mapreduce.OutputCommitter) Configuration(org.apache.hadoop.conf.Configuration) AsyncDispatcher(org.apache.hadoop.yarn.event.AsyncDispatcher) JobEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent) JobStartEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobStartEvent) AppContext(org.apache.hadoop.mapreduce.v2.app.AppContext) CommitterEventHandler(org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventHandler) JobContext(org.apache.hadoop.mapreduce.JobContext) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) Test(org.junit.Test)

Example 14 with JobImpl

use of org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl in project hadoop by apache.

the class TestJobImpl method testUberDecision.

private boolean testUberDecision(Configuration conf) {
    JobID jobID = JobID.forName("job_1234567890000_0001");
    JobId jobId = TypeConverter.toYarn(jobID);
    MRAppMetrics mrAppMetrics = MRAppMetrics.create();
    JobImpl job = new JobImpl(jobId, ApplicationAttemptId.newInstance(ApplicationId.newInstance(0, 0), 0), conf, mock(EventHandler.class), null, new JobTokenSecretManager(), new Credentials(), null, null, mrAppMetrics, null, true, null, 0, null, null, null, null);
    InitTransition initTransition = getInitTransition(2);
    JobEvent mockJobEvent = mock(JobEvent.class);
    initTransition.transition(job, mockJobEvent);
    boolean isUber = job.isUber();
    return isUber;
}
Also used : InitTransition(org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl.InitTransition) JobEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent) JobTokenSecretManager(org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager) CommitterEventHandler(org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventHandler) EventHandler(org.apache.hadoop.yarn.event.EventHandler) MRAppMetrics(org.apache.hadoop.mapreduce.v2.app.metrics.MRAppMetrics) JobID(org.apache.hadoop.mapreduce.JobID) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) Credentials(org.apache.hadoop.security.Credentials)

Example 15 with JobImpl

use of org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl in project hadoop by apache.

the class TestJobImpl method testTransitionsAtFailed.

@Test
public void testTransitionsAtFailed() throws IOException {
    Configuration conf = new Configuration();
    AsyncDispatcher dispatcher = new AsyncDispatcher();
    dispatcher.init(conf);
    dispatcher.start();
    OutputCommitter committer = mock(OutputCommitter.class);
    doThrow(new IOException("forcefail")).when(committer).setupJob(any(JobContext.class));
    CommitterEventHandler commitHandler = createCommitterEventHandler(dispatcher, committer);
    commitHandler.init(conf);
    commitHandler.start();
    AppContext mockContext = mock(AppContext.class);
    when(mockContext.hasSuccessfullyUnregistered()).thenReturn(false);
    JobImpl job = createStubbedJob(conf, dispatcher, 2, mockContext);
    JobId jobId = job.getID();
    job.handle(new JobEvent(jobId, JobEventType.JOB_INIT));
    assertJobState(job, JobStateInternal.INITED);
    job.handle(new JobStartEvent(jobId));
    assertJobState(job, JobStateInternal.FAILED);
    job.handle(new JobEvent(jobId, JobEventType.JOB_TASK_COMPLETED));
    assertJobState(job, JobStateInternal.FAILED);
    job.handle(new JobEvent(jobId, JobEventType.JOB_TASK_ATTEMPT_COMPLETED));
    assertJobState(job, JobStateInternal.FAILED);
    job.handle(new JobEvent(jobId, JobEventType.JOB_MAP_TASK_RESCHEDULED));
    assertJobState(job, JobStateInternal.FAILED);
    job.handle(new JobEvent(jobId, JobEventType.JOB_TASK_ATTEMPT_FETCH_FAILURE));
    assertJobState(job, JobStateInternal.FAILED);
    Assert.assertEquals(JobState.RUNNING, job.getState());
    when(mockContext.hasSuccessfullyUnregistered()).thenReturn(true);
    Assert.assertEquals(JobState.FAILED, job.getState());
    dispatcher.stop();
    commitHandler.stop();
}
Also used : OutputCommitter(org.apache.hadoop.mapreduce.OutputCommitter) Configuration(org.apache.hadoop.conf.Configuration) AsyncDispatcher(org.apache.hadoop.yarn.event.AsyncDispatcher) JobEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent) JobStartEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobStartEvent) AppContext(org.apache.hadoop.mapreduce.v2.app.AppContext) CommitterEventHandler(org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventHandler) IOException(java.io.IOException) JobContext(org.apache.hadoop.mapreduce.JobContext) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) Test(org.junit.Test)

Aggregations

Test (org.junit.Test)32 Configuration (org.apache.hadoop.conf.Configuration)29 JobEvent (org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent)21 CommitterEventHandler (org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventHandler)16 AsyncDispatcher (org.apache.hadoop.yarn.event.AsyncDispatcher)14 OutputCommitter (org.apache.hadoop.mapreduce.OutputCommitter)13 JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)13 Task (org.apache.hadoop.mapreduce.v2.app.job.Task)10 Job (org.apache.hadoop.mapreduce.v2.app.job.Job)9 JobImpl (org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl)9 AppContext (org.apache.hadoop.mapreduce.v2.app.AppContext)7 TaskAttempt (org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt)7 JobStartEvent (org.apache.hadoop.mapreduce.v2.app.job.event.JobStartEvent)7 CyclicBarrier (java.util.concurrent.CyclicBarrier)5 JobContext (org.apache.hadoop.mapreduce.JobContext)5 CountDownLatch (java.util.concurrent.CountDownLatch)4 JobID (org.apache.hadoop.mapreduce.JobID)4 TaskAttemptId (org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId)4 TaskId (org.apache.hadoop.mapreduce.v2.api.records.TaskId)4 JobTaskEvent (org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskEvent)4