Search in sources :

Example 26 with JobID

use of org.apache.hadoop.mapreduce.JobID in project hadoop by apache.

the class TestJobImpl method testReportDiagnostics.

@Test
public void testReportDiagnostics() throws Exception {
    JobID jobID = JobID.forName("job_1234567890000_0001");
    JobId jobId = TypeConverter.toYarn(jobID);
    final String diagMsg = "some diagnostic message";
    final JobDiagnosticsUpdateEvent diagUpdateEvent = new JobDiagnosticsUpdateEvent(jobId, diagMsg);
    MRAppMetrics mrAppMetrics = MRAppMetrics.create();
    AppContext mockContext = mock(AppContext.class);
    when(mockContext.hasSuccessfullyUnregistered()).thenReturn(true);
    JobImpl job = new JobImpl(jobId, Records.newRecord(ApplicationAttemptId.class), new Configuration(), mock(EventHandler.class), null, mock(JobTokenSecretManager.class), null, SystemClock.getInstance(), null, mrAppMetrics, null, true, null, 0, null, mockContext, null, null);
    job.handle(diagUpdateEvent);
    String diagnostics = job.getReport().getDiagnostics();
    Assert.assertNotNull(diagnostics);
    Assert.assertTrue(diagnostics.contains(diagMsg));
    job = new JobImpl(jobId, Records.newRecord(ApplicationAttemptId.class), new Configuration(), mock(EventHandler.class), null, mock(JobTokenSecretManager.class), null, SystemClock.getInstance(), null, mrAppMetrics, null, true, null, 0, null, mockContext, null, null);
    job.handle(new JobEvent(jobId, JobEventType.JOB_KILL));
    job.handle(diagUpdateEvent);
    diagnostics = job.getReport().getDiagnostics();
    Assert.assertNotNull(diagnostics);
    Assert.assertTrue(diagnostics.contains(diagMsg));
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) JobEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent) JobTokenSecretManager(org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager) AppContext(org.apache.hadoop.mapreduce.v2.app.AppContext) CommitterEventHandler(org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventHandler) EventHandler(org.apache.hadoop.yarn.event.EventHandler) JobDiagnosticsUpdateEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobDiagnosticsUpdateEvent) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) MRAppMetrics(org.apache.hadoop.mapreduce.v2.app.metrics.MRAppMetrics) JobID(org.apache.hadoop.mapreduce.JobID) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) Test(org.junit.Test)

Example 27 with JobID

use of org.apache.hadoop.mapreduce.JobID in project ignite by apache.

the class HadoopClientProtocolSelfTest method checkJobSubmit.

/**
 * Test job submission.
 *
 * @param noCombiners Whether there are no combiners.
 * @param noReducers Whether there are no reducers.
 * @throws Exception If failed.
 */
public void checkJobSubmit(boolean noCombiners, boolean noReducers) throws Exception {
    IgniteFileSystem igfs = grid(0).fileSystem(HadoopAbstractSelfTest.igfsName);
    igfs.mkdirs(new IgfsPath(PATH_INPUT));
    try (BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(igfs.create(new IgfsPath(PATH_INPUT + "/test.file"), true)))) {
        bw.write("word");
    }
    Configuration conf = config(HadoopAbstractSelfTest.REST_PORT);
    final Job job = Job.getInstance(conf);
    try {
        job.setJobName(JOB_NAME);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);
        job.setMapperClass(TestMapper.class);
        job.setReducerClass(TestReducer.class);
        if (!noCombiners)
            job.setCombinerClass(TestCombiner.class);
        if (noReducers)
            job.setNumReduceTasks(0);
        job.setInputFormatClass(TextInputFormat.class);
        job.setOutputFormatClass(TestOutputFormat.class);
        FileInputFormat.setInputPaths(job, new Path(PATH_INPUT));
        FileOutputFormat.setOutputPath(job, new Path(PATH_OUTPUT));
        job.submit();
        JobID jobId = job.getJobID();
        // Setup phase.
        JobStatus jobStatus = job.getStatus();
        checkJobStatus(jobStatus, jobId, JOB_NAME, JobStatus.State.RUNNING, 0.0f);
        assert jobStatus.getSetupProgress() >= 0.0f && jobStatus.getSetupProgress() < 1.0f;
        assert jobStatus.getMapProgress() == 0.0f;
        assert jobStatus.getReduceProgress() == 0.0f;
        U.sleep(2100);
        JobStatus recentJobStatus = job.getStatus();
        assert recentJobStatus.getSetupProgress() > jobStatus.getSetupProgress() : "Old=" + jobStatus.getSetupProgress() + ", new=" + recentJobStatus.getSetupProgress();
        // Transferring to map phase.
        setupLockFile.delete();
        assert GridTestUtils.waitForCondition(new GridAbsPredicate() {

            @Override
            public boolean apply() {
                try {
                    return F.eq(1.0f, job.getStatus().getSetupProgress());
                } catch (Exception e) {
                    throw new RuntimeException("Unexpected exception.", e);
                }
            }
        }, 5000L);
        // Map phase.
        jobStatus = job.getStatus();
        checkJobStatus(jobStatus, jobId, JOB_NAME, JobStatus.State.RUNNING, 0.0f);
        assert jobStatus.getSetupProgress() == 1.0f;
        assert jobStatus.getMapProgress() >= 0.0f && jobStatus.getMapProgress() < 1.0f;
        assert jobStatus.getReduceProgress() == 0.0f;
        U.sleep(2100);
        recentJobStatus = job.getStatus();
        assert recentJobStatus.getMapProgress() > jobStatus.getMapProgress() : "Old=" + jobStatus.getMapProgress() + ", new=" + recentJobStatus.getMapProgress();
        // Transferring to reduce phase.
        mapLockFile.delete();
        assert GridTestUtils.waitForCondition(new GridAbsPredicate() {

            @Override
            public boolean apply() {
                try {
                    return F.eq(1.0f, job.getStatus().getMapProgress());
                } catch (Exception e) {
                    throw new RuntimeException("Unexpected exception.", e);
                }
            }
        }, 5000L);
        if (!noReducers) {
            // Reduce phase.
            jobStatus = job.getStatus();
            checkJobStatus(jobStatus, jobId, JOB_NAME, JobStatus.State.RUNNING, 0.0f);
            assert jobStatus.getSetupProgress() == 1.0f;
            assert jobStatus.getMapProgress() == 1.0f;
            assert jobStatus.getReduceProgress() >= 0.0f && jobStatus.getReduceProgress() < 1.0f;
            // Ensure that reduces progress increases.
            U.sleep(2100);
            recentJobStatus = job.getStatus();
            assert recentJobStatus.getReduceProgress() > jobStatus.getReduceProgress() : "Old=" + jobStatus.getReduceProgress() + ", new=" + recentJobStatus.getReduceProgress();
            reduceLockFile.delete();
        }
        job.waitForCompletion(false);
        jobStatus = job.getStatus();
        checkJobStatus(job.getStatus(), jobId, JOB_NAME, JobStatus.State.SUCCEEDED, 1.0f);
        assert jobStatus.getSetupProgress() == 1.0f;
        assert jobStatus.getMapProgress() == 1.0f;
        assert jobStatus.getReduceProgress() == 1.0f;
        dumpIgfs(igfs, new IgfsPath(PATH_OUTPUT));
    } finally {
        job.getCluster().close();
    }
}
Also used : IgfsPath(org.apache.ignite.igfs.IgfsPath) Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) GridAbsPredicate(org.apache.ignite.internal.util.lang.GridAbsPredicate) IgniteFileSystem(org.apache.ignite.IgniteFileSystem) IOException(java.io.IOException) BufferedWriter(java.io.BufferedWriter) IgfsPath(org.apache.ignite.igfs.IgfsPath) JobStatus(org.apache.hadoop.mapreduce.JobStatus) OutputStreamWriter(java.io.OutputStreamWriter) Job(org.apache.hadoop.mapreduce.Job) JobID(org.apache.hadoop.mapreduce.JobID)

Example 28 with JobID

use of org.apache.hadoop.mapreduce.JobID in project ignite by apache.

the class HadoopClientProtocolSelfTest method tstNextJobId.

/**
 * Test next job ID generation.
 *
 * @throws Exception If failed.
 */
@SuppressWarnings("ConstantConditions")
private void tstNextJobId() throws Exception {
    IgniteHadoopClientProtocolProvider provider = provider();
    ClientProtocol proto = provider.create(config(HadoopAbstractSelfTest.REST_PORT));
    JobID jobId = proto.getNewJobID();
    assert jobId != null;
    assert jobId.getJtIdentifier() != null;
    JobID nextJobId = proto.getNewJobID();
    assert nextJobId != null;
    assert nextJobId.getJtIdentifier() != null;
    assert !F.eq(jobId, nextJobId);
}
Also used : IgniteHadoopClientProtocolProvider(org.apache.ignite.hadoop.mapreduce.IgniteHadoopClientProtocolProvider) ClientProtocol(org.apache.hadoop.mapreduce.protocol.ClientProtocol) JobID(org.apache.hadoop.mapreduce.JobID)

Example 29 with JobID

use of org.apache.hadoop.mapreduce.JobID in project ignite by apache.

the class HadoopClientProtocolSelfTest method tstUnknownJobCounters.

/**
 * Tests job counters retrieval for unknown job id.
 *
 * @throws Exception If failed.
 */
private void tstUnknownJobCounters() throws Exception {
    IgniteHadoopClientProtocolProvider provider = provider();
    ClientProtocol proto = provider.create(config(HadoopAbstractSelfTest.REST_PORT));
    try {
        proto.getJobCounters(new JobID(UUID.randomUUID().toString(), -1));
        fail("exception must be thrown");
    } catch (Exception e) {
        assert e instanceof IOException : "wrong error has been thrown";
    }
}
Also used : IgniteHadoopClientProtocolProvider(org.apache.ignite.hadoop.mapreduce.IgniteHadoopClientProtocolProvider) IOException(java.io.IOException) ClientProtocol(org.apache.hadoop.mapreduce.protocol.ClientProtocol) JobID(org.apache.hadoop.mapreduce.JobID) IOException(java.io.IOException)

Example 30 with JobID

use of org.apache.hadoop.mapreduce.JobID in project ignite by apache.

the class HadoopClientProtocol method getNewJobID.

/**
 * {@inheritDoc}
 */
@Override
public JobID getNewJobID() throws IOException, InterruptedException {
    try {
        conf.setLong(HadoopCommonUtils.REQ_NEW_JOBID_TS_PROPERTY, U.currentTimeMillis());
        HadoopJobId jobID = execute(HadoopProtocolNextTaskIdTask.class);
        conf.setLong(HadoopCommonUtils.RESPONSE_NEW_JOBID_TS_PROPERTY, U.currentTimeMillis());
        return new JobID(jobID.globalId().toString(), jobID.localId());
    } catch (GridClientException e) {
        throw new IOException("Failed to get new job ID.", e);
    }
}
Also used : GridClientException(org.apache.ignite.internal.client.GridClientException) IOException(java.io.IOException) HadoopJobId(org.apache.ignite.internal.processors.hadoop.HadoopJobId) JobID(org.apache.hadoop.mapreduce.JobID)

Aggregations

JobID (org.apache.hadoop.mapreduce.JobID)61 Test (org.junit.Test)33 JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)17 IOException (java.io.IOException)16 TaskAttemptID (org.apache.hadoop.mapreduce.TaskAttemptID)16 TaskID (org.apache.hadoop.mapreduce.TaskID)16 Configuration (org.apache.hadoop.conf.Configuration)12 Job (org.apache.hadoop.mapreduce.Job)8 ArrayList (java.util.ArrayList)7 Path (org.apache.hadoop.fs.Path)7 EventHandler (org.apache.hadoop.yarn.event.EventHandler)7 HashMap (java.util.HashMap)6 FileSystem (org.apache.hadoop.fs.FileSystem)6 JobConf (org.apache.hadoop.mapred.JobConf)6 TaskAttemptInfo (org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo)6 OutputCommitter (org.apache.hadoop.mapreduce.OutputCommitter)5 Event (org.apache.hadoop.mapreduce.jobhistory.Event)5 EventType (org.apache.hadoop.mapreduce.jobhistory.EventType)5 JobHistoryEvent (org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent)5 JobHistoryEventHandler (org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler)5