use of org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl in project hadoop by apache.
the class MRAppMaster method shutDownJob.
@VisibleForTesting
public void shutDownJob() {
try {
//if isLastAMRetry comes as true, should never set it to false
if (!isLastAMRetry) {
if (((JobImpl) job).getInternalState() != JobStateInternal.REBOOT) {
LOG.info("Job finished cleanly, recording last MRAppMaster retry");
isLastAMRetry = true;
}
}
notifyIsLastAMRetry(isLastAMRetry);
// Stop all services
// This will also send the final report to the ResourceManager
LOG.info("Calling stop for all the services");
MRAppMaster.this.stop();
if (isLastAMRetry) {
// users and it is the last AM retry
if (getConfig().get(MRJobConfig.MR_JOB_END_NOTIFICATION_URL) != null) {
try {
LOG.info("Job end notification started for jobID : " + job.getReport().getJobId());
JobEndNotifier notifier = new JobEndNotifier();
notifier.setConf(getConfig());
JobReport report = job.getReport();
// from RM, so we should let users know FAILED via notifier as well
if (!context.hasSuccessfullyUnregistered()) {
report.setJobState(JobState.FAILED);
}
notifier.notify(report);
} catch (InterruptedException ie) {
LOG.warn("Job end notification interrupted for jobID : " + job.getReport().getJobId(), ie);
}
}
}
try {
Thread.sleep(5000);
} catch (InterruptedException e) {
e.printStackTrace();
}
clientService.stop();
} catch (Throwable t) {
LOG.warn("Graceful stop failed. Exiting.. ", t);
exitMRAppMaster(1, t);
}
exitMRAppMaster(0, null);
}
use of org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl in project hadoop by apache.
the class MRAppMaster method createJob.
/** Create and initialize (but don't start) a single job.
* @param forcedState a state to force the job into or null for normal operation.
* @param diagnostic a diagnostic message to include with the job.
*/
protected Job createJob(Configuration conf, JobStateInternal forcedState, String diagnostic) {
// create single job
Job newJob = new JobImpl(jobId, appAttemptID, conf, dispatcher.getEventHandler(), taskAttemptListener, jobTokenSecretManager, jobCredentials, clock, completedTasksFromPreviousRun, metrics, committer, newApiCommitter, currentUser.getUserName(), appSubmitTime, amInfos, context, forcedState, diagnostic);
((RunningAppContext) context).jobs.put(newJob.getID(), newJob);
dispatcher.register(JobFinishEvent.Type.class, createJobFinishEventHandler());
return newJob;
}
use of org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl in project hadoop by apache.
the class MRApp method waitForInternalState.
public void waitForInternalState(JobImpl job, JobStateInternal finalState) throws Exception {
int timeoutSecs = 0;
JobStateInternal iState = job.getInternalState();
while (!finalState.equals(iState) && timeoutSecs++ < 20) {
System.out.println("Job Internal State is : " + iState + " Waiting for Internal state : " + finalState);
Thread.sleep(500);
iState = job.getInternalState();
}
System.out.println("Task Internal State is : " + iState);
Assert.assertEquals("Task Internal state is not correct (timedout)", finalState, iState);
}
use of org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl in project hadoop by apache.
the class TestJobImpl method testKilledDuringSetup.
@Test(timeout = 20000)
public void testKilledDuringSetup() throws Exception {
Configuration conf = new Configuration();
conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
AsyncDispatcher dispatcher = new AsyncDispatcher();
dispatcher.init(conf);
dispatcher.start();
OutputCommitter committer = new StubbedOutputCommitter() {
@Override
public synchronized void setupJob(JobContext jobContext) throws IOException {
while (!Thread.interrupted()) {
try {
wait();
} catch (InterruptedException e) {
}
}
}
};
CommitterEventHandler commitHandler = createCommitterEventHandler(dispatcher, committer);
commitHandler.init(conf);
commitHandler.start();
JobImpl job = createStubbedJob(conf, dispatcher, 2, null);
JobId jobId = job.getID();
job.handle(new JobEvent(jobId, JobEventType.JOB_INIT));
assertJobState(job, JobStateInternal.INITED);
job.handle(new JobStartEvent(jobId));
assertJobState(job, JobStateInternal.SETUP);
job.handle(new JobEvent(job.getID(), JobEventType.JOB_KILL));
assertJobState(job, JobStateInternal.KILLED);
dispatcher.stop();
commitHandler.stop();
}
use of org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl in project hadoop by apache.
the class TestMRApp method testCountersOnJobFinish.
@Test
public void testCountersOnJobFinish() throws Exception {
MRAppWithSpiedJob app = new MRAppWithSpiedJob(1, 1, true, this.getClass().getName(), true);
JobImpl job = (JobImpl) app.submit(new Configuration());
app.waitForState(job, JobState.SUCCEEDED);
app.verifyCompleted();
System.out.println(job.getAllCounters());
// Just call getCounters
job.getAllCounters();
job.getAllCounters();
// Should be called only once
verify(job, times(1)).constructFinalFullcounters();
}
Aggregations