use of org.apache.hadoop.mapreduce.JobID in project hadoop by apache.
the class TestJobImpl method testReportDiagnostics.
@Test
public void testReportDiagnostics() throws Exception {
JobID jobID = JobID.forName("job_1234567890000_0001");
JobId jobId = TypeConverter.toYarn(jobID);
final String diagMsg = "some diagnostic message";
final JobDiagnosticsUpdateEvent diagUpdateEvent = new JobDiagnosticsUpdateEvent(jobId, diagMsg);
MRAppMetrics mrAppMetrics = MRAppMetrics.create();
AppContext mockContext = mock(AppContext.class);
when(mockContext.hasSuccessfullyUnregistered()).thenReturn(true);
JobImpl job = new JobImpl(jobId, Records.newRecord(ApplicationAttemptId.class), new Configuration(), mock(EventHandler.class), null, mock(JobTokenSecretManager.class), null, SystemClock.getInstance(), null, mrAppMetrics, null, true, null, 0, null, mockContext, null, null);
job.handle(diagUpdateEvent);
String diagnostics = job.getReport().getDiagnostics();
Assert.assertNotNull(diagnostics);
Assert.assertTrue(diagnostics.contains(diagMsg));
job = new JobImpl(jobId, Records.newRecord(ApplicationAttemptId.class), new Configuration(), mock(EventHandler.class), null, mock(JobTokenSecretManager.class), null, SystemClock.getInstance(), null, mrAppMetrics, null, true, null, 0, null, mockContext, null, null);
job.handle(new JobEvent(jobId, JobEventType.JOB_KILL));
job.handle(diagUpdateEvent);
diagnostics = job.getReport().getDiagnostics();
Assert.assertNotNull(diagnostics);
Assert.assertTrue(diagnostics.contains(diagMsg));
}
use of org.apache.hadoop.mapreduce.JobID in project ignite by apache.
the class HadoopClientProtocolSelfTest method checkJobSubmit.
/**
* Test job submission.
*
* @param noCombiners Whether there are no combiners.
* @param noReducers Whether there are no reducers.
* @throws Exception If failed.
*/
public void checkJobSubmit(boolean noCombiners, boolean noReducers) throws Exception {
IgniteFileSystem igfs = grid(0).fileSystem(HadoopAbstractSelfTest.igfsName);
igfs.mkdirs(new IgfsPath(PATH_INPUT));
try (BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(igfs.create(new IgfsPath(PATH_INPUT + "/test.file"), true)))) {
bw.write("word");
}
Configuration conf = config(HadoopAbstractSelfTest.REST_PORT);
final Job job = Job.getInstance(conf);
try {
job.setJobName(JOB_NAME);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
job.setMapperClass(TestMapper.class);
job.setReducerClass(TestReducer.class);
if (!noCombiners)
job.setCombinerClass(TestCombiner.class);
if (noReducers)
job.setNumReduceTasks(0);
job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(TestOutputFormat.class);
FileInputFormat.setInputPaths(job, new Path(PATH_INPUT));
FileOutputFormat.setOutputPath(job, new Path(PATH_OUTPUT));
job.submit();
JobID jobId = job.getJobID();
// Setup phase.
JobStatus jobStatus = job.getStatus();
checkJobStatus(jobStatus, jobId, JOB_NAME, JobStatus.State.RUNNING, 0.0f);
assert jobStatus.getSetupProgress() >= 0.0f && jobStatus.getSetupProgress() < 1.0f;
assert jobStatus.getMapProgress() == 0.0f;
assert jobStatus.getReduceProgress() == 0.0f;
U.sleep(2100);
JobStatus recentJobStatus = job.getStatus();
assert recentJobStatus.getSetupProgress() > jobStatus.getSetupProgress() : "Old=" + jobStatus.getSetupProgress() + ", new=" + recentJobStatus.getSetupProgress();
// Transferring to map phase.
setupLockFile.delete();
assert GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
try {
return F.eq(1.0f, job.getStatus().getSetupProgress());
} catch (Exception e) {
throw new RuntimeException("Unexpected exception.", e);
}
}
}, 5000L);
// Map phase.
jobStatus = job.getStatus();
checkJobStatus(jobStatus, jobId, JOB_NAME, JobStatus.State.RUNNING, 0.0f);
assert jobStatus.getSetupProgress() == 1.0f;
assert jobStatus.getMapProgress() >= 0.0f && jobStatus.getMapProgress() < 1.0f;
assert jobStatus.getReduceProgress() == 0.0f;
U.sleep(2100);
recentJobStatus = job.getStatus();
assert recentJobStatus.getMapProgress() > jobStatus.getMapProgress() : "Old=" + jobStatus.getMapProgress() + ", new=" + recentJobStatus.getMapProgress();
// Transferring to reduce phase.
mapLockFile.delete();
assert GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
try {
return F.eq(1.0f, job.getStatus().getMapProgress());
} catch (Exception e) {
throw new RuntimeException("Unexpected exception.", e);
}
}
}, 5000L);
if (!noReducers) {
// Reduce phase.
jobStatus = job.getStatus();
checkJobStatus(jobStatus, jobId, JOB_NAME, JobStatus.State.RUNNING, 0.0f);
assert jobStatus.getSetupProgress() == 1.0f;
assert jobStatus.getMapProgress() == 1.0f;
assert jobStatus.getReduceProgress() >= 0.0f && jobStatus.getReduceProgress() < 1.0f;
// Ensure that reduces progress increases.
U.sleep(2100);
recentJobStatus = job.getStatus();
assert recentJobStatus.getReduceProgress() > jobStatus.getReduceProgress() : "Old=" + jobStatus.getReduceProgress() + ", new=" + recentJobStatus.getReduceProgress();
reduceLockFile.delete();
}
job.waitForCompletion(false);
jobStatus = job.getStatus();
checkJobStatus(job.getStatus(), jobId, JOB_NAME, JobStatus.State.SUCCEEDED, 1.0f);
assert jobStatus.getSetupProgress() == 1.0f;
assert jobStatus.getMapProgress() == 1.0f;
assert jobStatus.getReduceProgress() == 1.0f;
dumpIgfs(igfs, new IgfsPath(PATH_OUTPUT));
} finally {
job.getCluster().close();
}
}
use of org.apache.hadoop.mapreduce.JobID in project ignite by apache.
the class HadoopClientProtocolSelfTest method tstNextJobId.
/**
* Test next job ID generation.
*
* @throws Exception If failed.
*/
@SuppressWarnings("ConstantConditions")
private void tstNextJobId() throws Exception {
IgniteHadoopClientProtocolProvider provider = provider();
ClientProtocol proto = provider.create(config(HadoopAbstractSelfTest.REST_PORT));
JobID jobId = proto.getNewJobID();
assert jobId != null;
assert jobId.getJtIdentifier() != null;
JobID nextJobId = proto.getNewJobID();
assert nextJobId != null;
assert nextJobId.getJtIdentifier() != null;
assert !F.eq(jobId, nextJobId);
}
use of org.apache.hadoop.mapreduce.JobID in project ignite by apache.
the class HadoopClientProtocolSelfTest method tstUnknownJobCounters.
/**
* Tests job counters retrieval for unknown job id.
*
* @throws Exception If failed.
*/
private void tstUnknownJobCounters() throws Exception {
IgniteHadoopClientProtocolProvider provider = provider();
ClientProtocol proto = provider.create(config(HadoopAbstractSelfTest.REST_PORT));
try {
proto.getJobCounters(new JobID(UUID.randomUUID().toString(), -1));
fail("exception must be thrown");
} catch (Exception e) {
assert e instanceof IOException : "wrong error has been thrown";
}
}
use of org.apache.hadoop.mapreduce.JobID in project ignite by apache.
the class HadoopClientProtocol method getNewJobID.
/**
* {@inheritDoc}
*/
@Override
public JobID getNewJobID() throws IOException, InterruptedException {
try {
conf.setLong(HadoopCommonUtils.REQ_NEW_JOBID_TS_PROPERTY, U.currentTimeMillis());
HadoopJobId jobID = execute(HadoopProtocolNextTaskIdTask.class);
conf.setLong(HadoopCommonUtils.RESPONSE_NEW_JOBID_TS_PROPERTY, U.currentTimeMillis());
return new JobID(jobID.globalId().toString(), jobID.localId());
} catch (GridClientException e) {
throw new IOException("Failed to get new job ID.", e);
}
}
Aggregations