Search in sources :

Example 1 with JobFinishEvent

use of org.apache.hadoop.mapreduce.v2.app.job.event.JobFinishEvent in project hadoop by apache.

the class JobHistoryEventHandler method serviceStop.

@Override
protected void serviceStop() throws Exception {
    LOG.info("Stopping JobHistoryEventHandler. " + "Size of the outstanding queue size is " + eventQueue.size());
    stopped = true;
    //do not interrupt while event handling is in progress
    synchronized (lock) {
        if (eventHandlingThread != null) {
            LOG.debug("Interrupting Event Handling thread");
            eventHandlingThread.interrupt();
        } else {
            LOG.debug("Null event handling thread");
        }
    }
    try {
        if (eventHandlingThread != null) {
            LOG.debug("Waiting for Event Handling thread to complete");
            eventHandlingThread.join();
        }
    } catch (InterruptedException ie) {
        LOG.info("Interrupted Exception while stopping", ie);
    }
    // the metaInfo object is wrapped up.
    for (MetaInfo mi : fileMap.values()) {
        try {
            if (LOG.isDebugEnabled()) {
                LOG.debug("Shutting down timer for " + mi);
            }
            mi.shutDownTimer();
        } catch (IOException e) {
            LOG.info("Exception while canceling delayed flush timer. " + "Likely caused by a failed flush " + e.getMessage());
        }
    }
    //write all the events remaining in queue
    Iterator<JobHistoryEvent> it = eventQueue.iterator();
    while (it.hasNext()) {
        JobHistoryEvent ev = it.next();
        LOG.info("In stop, writing event " + ev.getType());
        handleEvent(ev);
    }
    // closed their event writers
    if (forceJobCompletion) {
        for (Map.Entry<JobId, MetaInfo> jobIt : fileMap.entrySet()) {
            JobId toClose = jobIt.getKey();
            MetaInfo mi = jobIt.getValue();
            if (mi != null && mi.isWriterActive()) {
                LOG.warn("Found jobId " + toClose + " to have not been closed. Will close");
                //Create a JobFinishEvent so that it is written to the job history
                final Job job = context.getJob(toClose);
                JobUnsuccessfulCompletionEvent jucEvent = new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(toClose), System.currentTimeMillis(), job.getCompletedMaps(), job.getCompletedReduces(), createJobStateForJobUnsuccessfulCompletionEvent(mi.getForcedJobStateOnShutDown()), job.getDiagnostics());
                JobHistoryEvent jfEvent = new JobHistoryEvent(toClose, jucEvent);
                //Bypass the queue mechanism which might wait. Call the method directly
                handleEvent(jfEvent);
            }
        }
    }
    //close all file handles
    for (MetaInfo mi : fileMap.values()) {
        try {
            mi.closeWriter();
        } catch (IOException e) {
            LOG.info("Exception while closing file " + e.getMessage());
        }
    }
    if (timelineClient != null) {
        timelineClient.stop();
    } else if (timelineV2Client != null) {
        timelineV2Client.stop();
    }
    LOG.info("Stopped JobHistoryEventHandler. super.stop()");
    super.serviceStop();
}
Also used : IOException(java.io.IOException) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) Map(java.util.Map) HashMap(java.util.HashMap) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId)

Example 2 with JobFinishEvent

use of org.apache.hadoop.mapreduce.v2.app.job.event.JobFinishEvent in project hadoop by apache.

the class MRApp method createJob.

@Override
protected Job createJob(Configuration conf, JobStateInternal forcedState, String diagnostic) {
    UserGroupInformation currentUser = null;
    try {
        currentUser = UserGroupInformation.getCurrentUser();
    } catch (IOException e) {
        throw new YarnRuntimeException(e);
    }
    Job newJob = new TestJob(getJobId(), getAttemptID(), conf, getDispatcher().getEventHandler(), getTaskAttemptListener(), getContext().getClock(), getCommitter(), isNewApiCommitter(), currentUser.getUserName(), getContext(), forcedState, diagnostic);
    ((AppContext) getContext()).getAllJobs().put(newJob.getID(), newJob);
    getDispatcher().register(JobFinishEvent.Type.class, new EventHandler<JobFinishEvent>() {

        @Override
        public void handle(JobFinishEvent event) {
            stop();
        }
    });
    return newJob;
}
Also used : YarnRuntimeException(org.apache.hadoop.yarn.exceptions.YarnRuntimeException) JobFinishEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobFinishEvent) IOException(java.io.IOException) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation)

Aggregations

IOException (java.io.IOException)2 Job (org.apache.hadoop.mapreduce.v2.app.job.Job)2 HashMap (java.util.HashMap)1 Map (java.util.Map)1 JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)1 JobFinishEvent (org.apache.hadoop.mapreduce.v2.app.job.event.JobFinishEvent)1 UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)1 YarnRuntimeException (org.apache.hadoop.yarn.exceptions.YarnRuntimeException)1