Search in sources :

Example 31 with JobImpl

use of org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl in project hadoop by apache.

the class TestJobImpl method testJobPriorityUpdate.

@Test
public void testJobPriorityUpdate() throws Exception {
    Configuration conf = new Configuration();
    AsyncDispatcher dispatcher = new AsyncDispatcher();
    Priority submittedPriority = Priority.newInstance(5);
    AppContext mockContext = mock(AppContext.class);
    when(mockContext.hasSuccessfullyUnregistered()).thenReturn(false);
    JobImpl job = createStubbedJob(conf, dispatcher, 2, mockContext);
    JobId jobId = job.getID();
    job.handle(new JobEvent(jobId, JobEventType.JOB_INIT));
    assertJobState(job, JobStateInternal.INITED);
    job.handle(new JobStartEvent(jobId));
    assertJobState(job, JobStateInternal.SETUP);
    // Update priority of job to 5, and it will be updated
    job.setJobPriority(submittedPriority);
    Assert.assertEquals(submittedPriority, job.getReport().getJobPriority());
    job.handle(new JobSetupCompletedEvent(jobId));
    assertJobState(job, JobStateInternal.RUNNING);
    // Update priority of job to 8, and see whether its updated
    Priority updatedPriority = Priority.newInstance(8);
    job.setJobPriority(updatedPriority);
    assertJobState(job, JobStateInternal.RUNNING);
    Priority jobPriority = job.getReport().getJobPriority();
    Assert.assertNotNull(jobPriority);
    // Verify whether changed priority is same as what is set in Job.
    Assert.assertEquals(updatedPriority, jobPriority);
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) AsyncDispatcher(org.apache.hadoop.yarn.event.AsyncDispatcher) JobEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent) JobSetupCompletedEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobSetupCompletedEvent) Priority(org.apache.hadoop.yarn.api.records.Priority) JobStartEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobStartEvent) AppContext(org.apache.hadoop.mapreduce.v2.app.AppContext) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) Test(org.junit.Test)

Example 32 with JobImpl

use of org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl in project hadoop by apache.

the class TestJobImpl method testJobNoTasks.

@Test
public void testJobNoTasks() {
    Configuration conf = new Configuration();
    conf.setInt(MRJobConfig.NUM_REDUCES, 0);
    conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
    conf.set(MRJobConfig.WORKFLOW_ID, "testId");
    conf.set(MRJobConfig.WORKFLOW_NAME, "testName");
    conf.set(MRJobConfig.WORKFLOW_NODE_NAME, "testNodeName");
    conf.set(MRJobConfig.WORKFLOW_ADJACENCY_PREFIX_STRING + "key1", "value1");
    conf.set(MRJobConfig.WORKFLOW_ADJACENCY_PREFIX_STRING + "key2", "value2");
    conf.set(MRJobConfig.WORKFLOW_TAGS, "tag1,tag2");
    AsyncDispatcher dispatcher = new AsyncDispatcher();
    dispatcher.init(conf);
    dispatcher.start();
    OutputCommitter committer = mock(OutputCommitter.class);
    CommitterEventHandler commitHandler = createCommitterEventHandler(dispatcher, committer);
    commitHandler.init(conf);
    commitHandler.start();
    JobSubmittedEventHandler jseHandler = new JobSubmittedEventHandler("testId", "testName", "testNodeName", "\"key2\"=\"value2\" \"key1\"=\"value1\" ", "tag1,tag2");
    dispatcher.register(EventType.class, jseHandler);
    JobImpl job = createStubbedJob(conf, dispatcher, 0, null);
    job.handle(new JobEvent(job.getID(), JobEventType.JOB_INIT));
    assertJobState(job, JobStateInternal.INITED);
    job.handle(new JobStartEvent(job.getID()));
    assertJobState(job, JobStateInternal.SUCCEEDED);
    dispatcher.stop();
    commitHandler.stop();
    try {
        Assert.assertTrue(jseHandler.getAssertValue());
    } catch (InterruptedException e) {
        Assert.fail("Workflow related attributes are not tested properly");
    }
}
Also used : OutputCommitter(org.apache.hadoop.mapreduce.OutputCommitter) Configuration(org.apache.hadoop.conf.Configuration) AsyncDispatcher(org.apache.hadoop.yarn.event.AsyncDispatcher) JobEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent) JobStartEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobStartEvent) CommitterEventHandler(org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventHandler) Test(org.junit.Test)

Example 33 with JobImpl

use of org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl in project hadoop by apache.

the class TestJobImpl method testCheckJobCompleteSuccess.

@Test(timeout = 20000)
public void testCheckJobCompleteSuccess() throws Exception {
    Configuration conf = new Configuration();
    conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
    AsyncDispatcher dispatcher = new AsyncDispatcher();
    dispatcher.init(conf);
    dispatcher.start();
    CyclicBarrier syncBarrier = new CyclicBarrier(2);
    OutputCommitter committer = new TestingOutputCommitter(syncBarrier, true);
    CommitterEventHandler commitHandler = createCommitterEventHandler(dispatcher, committer);
    commitHandler.init(conf);
    commitHandler.start();
    JobImpl job = createRunningStubbedJob(conf, dispatcher, 2, null);
    completeJobTasks(job);
    assertJobState(job, JobStateInternal.COMMITTING);
    job.handle(new JobEvent(job.getID(), JobEventType.JOB_TASK_ATTEMPT_COMPLETED));
    assertJobState(job, JobStateInternal.COMMITTING);
    job.handle(new JobEvent(job.getID(), JobEventType.JOB_MAP_TASK_RESCHEDULED));
    assertJobState(job, JobStateInternal.COMMITTING);
    // let the committer complete and verify the job succeeds
    syncBarrier.await();
    assertJobState(job, JobStateInternal.SUCCEEDED);
    job.handle(new JobEvent(job.getID(), JobEventType.JOB_TASK_ATTEMPT_COMPLETED));
    assertJobState(job, JobStateInternal.SUCCEEDED);
    job.handle(new JobEvent(job.getID(), JobEventType.JOB_MAP_TASK_RESCHEDULED));
    assertJobState(job, JobStateInternal.SUCCEEDED);
    dispatcher.stop();
    commitHandler.stop();
}
Also used : OutputCommitter(org.apache.hadoop.mapreduce.OutputCommitter) Configuration(org.apache.hadoop.conf.Configuration) AsyncDispatcher(org.apache.hadoop.yarn.event.AsyncDispatcher) JobEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent) CommitterEventHandler(org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventHandler) CyclicBarrier(java.util.concurrent.CyclicBarrier) Test(org.junit.Test)

Example 34 with JobImpl

use of org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl in project hadoop by apache.

the class TestJobImpl method testKilledDuringFailAbort.

@Test(timeout = 20000)
public void testKilledDuringFailAbort() throws Exception {
    Configuration conf = new Configuration();
    conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
    AsyncDispatcher dispatcher = new AsyncDispatcher();
    dispatcher.init(conf);
    dispatcher.start();
    OutputCommitter committer = new StubbedOutputCommitter() {

        @Override
        public void setupJob(JobContext jobContext) throws IOException {
            throw new IOException("forced failure");
        }

        @Override
        public synchronized void abortJob(JobContext jobContext, State state) throws IOException {
            while (!Thread.interrupted()) {
                try {
                    wait();
                } catch (InterruptedException e) {
                }
            }
        }
    };
    CommitterEventHandler commitHandler = createCommitterEventHandler(dispatcher, committer);
    commitHandler.init(conf);
    commitHandler.start();
    JobImpl job = createStubbedJob(conf, dispatcher, 2, null);
    JobId jobId = job.getID();
    job.handle(new JobEvent(jobId, JobEventType.JOB_INIT));
    assertJobState(job, JobStateInternal.INITED);
    job.handle(new JobStartEvent(jobId));
    assertJobState(job, JobStateInternal.FAIL_ABORT);
    job.handle(new JobEvent(jobId, JobEventType.JOB_KILL));
    assertJobState(job, JobStateInternal.KILLED);
    dispatcher.stop();
    commitHandler.stop();
}
Also used : OutputCommitter(org.apache.hadoop.mapreduce.OutputCommitter) Configuration(org.apache.hadoop.conf.Configuration) JobStartEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobStartEvent) IOException(java.io.IOException) AsyncDispatcher(org.apache.hadoop.yarn.event.AsyncDispatcher) JobEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent) State(org.apache.hadoop.mapreduce.JobStatus.State) TaskState(org.apache.hadoop.mapreduce.v2.api.records.TaskState) NodeState(org.apache.hadoop.yarn.api.records.NodeState) JobState(org.apache.hadoop.mapreduce.v2.api.records.JobState) CommitterEventHandler(org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventHandler) JobContext(org.apache.hadoop.mapreduce.JobContext) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) Test(org.junit.Test)

Example 35 with JobImpl

use of org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl in project hadoop by apache.

the class TestJobEndNotifier method testNotificationOnLastRetryNormalShutdown.

@Test
public void testNotificationOnLastRetryNormalShutdown() throws Exception {
    HttpServer2 server = startHttpServer();
    // Act like it is the second attempt. Default max attempts is 2
    MRApp app = spy(new MRAppWithCustomContainerAllocator(2, 2, true, this.getClass().getName(), true, 2, true));
    doNothing().when(app).sysexit();
    JobConf conf = new JobConf();
    conf.set(JobContext.MR_JOB_END_NOTIFICATION_URL, JobEndServlet.baseUrl + "jobend?jobid=$jobId&status=$jobStatus");
    JobImpl job = (JobImpl) app.submit(conf);
    app.waitForInternalState(job, JobStateInternal.SUCCEEDED);
    // Unregistration succeeds: successfullyUnregistered is set
    app.shutDownJob();
    Assert.assertTrue(app.isLastAMRetry());
    Assert.assertEquals(1, JobEndServlet.calledTimes);
    Assert.assertEquals("jobid=" + job.getID() + "&status=SUCCEEDED", JobEndServlet.requestUri.getQuery());
    Assert.assertEquals(JobState.SUCCEEDED.toString(), JobEndServlet.foundJobState);
    server.stop();
}
Also used : JobImpl(org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl) HttpServer2(org.apache.hadoop.http.HttpServer2) JobConf(org.apache.hadoop.mapred.JobConf) Test(org.junit.Test)

Aggregations

Test (org.junit.Test)32 Configuration (org.apache.hadoop.conf.Configuration)29 JobEvent (org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent)21 CommitterEventHandler (org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventHandler)16 AsyncDispatcher (org.apache.hadoop.yarn.event.AsyncDispatcher)14 OutputCommitter (org.apache.hadoop.mapreduce.OutputCommitter)13 JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)13 Task (org.apache.hadoop.mapreduce.v2.app.job.Task)10 Job (org.apache.hadoop.mapreduce.v2.app.job.Job)9 JobImpl (org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl)9 AppContext (org.apache.hadoop.mapreduce.v2.app.AppContext)7 TaskAttempt (org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt)7 JobStartEvent (org.apache.hadoop.mapreduce.v2.app.job.event.JobStartEvent)7 CyclicBarrier (java.util.concurrent.CyclicBarrier)5 JobContext (org.apache.hadoop.mapreduce.JobContext)5 CountDownLatch (java.util.concurrent.CountDownLatch)4 JobID (org.apache.hadoop.mapreduce.JobID)4 TaskAttemptId (org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId)4 TaskId (org.apache.hadoop.mapreduce.v2.api.records.TaskId)4 JobTaskEvent (org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskEvent)4