use of org.apache.hadoop.yarn.exceptions.YarnRuntimeException in project hadoop by apache.
the class JobHistoryEventHandler method handleEvent.
@Private
public void handleEvent(JobHistoryEvent event) {
synchronized (lock) {
// If this is JobSubmitted Event, setup the writer
if (event.getHistoryEvent().getEventType() == EventType.AM_STARTED) {
try {
AMStartedEvent amStartedEvent = (AMStartedEvent) event.getHistoryEvent();
setupEventWriter(event.getJobID(), amStartedEvent);
} catch (IOException ioe) {
LOG.error("Error JobHistoryEventHandler in handleEvent: " + event, ioe);
throw new YarnRuntimeException(ioe);
}
}
// For all events
// (1) Write it out
// (2) Process it for JobSummary
// (3) Process it for ATS (if enabled)
MetaInfo mi = fileMap.get(event.getJobID());
try {
HistoryEvent historyEvent = event.getHistoryEvent();
if (!(historyEvent instanceof NormalizedResourceEvent)) {
mi.writeEvent(historyEvent);
}
processEventForJobSummary(event.getHistoryEvent(), mi.getJobSummary(), event.getJobID());
if (timelineV2Client != null) {
processEventForNewTimelineService(historyEvent, event.getJobID(), event.getTimestamp());
} else if (timelineClient != null) {
processEventForTimelineServer(historyEvent, event.getJobID(), event.getTimestamp());
}
if (LOG.isDebugEnabled()) {
LOG.debug("In HistoryEventHandler " + event.getHistoryEvent().getEventType());
}
} catch (IOException e) {
LOG.error("Error writing History Event: " + event.getHistoryEvent(), e);
throw new YarnRuntimeException(e);
}
if (event.getHistoryEvent().getEventType() == EventType.JOB_SUBMITTED) {
JobSubmittedEvent jobSubmittedEvent = (JobSubmittedEvent) event.getHistoryEvent();
mi.getJobIndexInfo().setSubmitTime(jobSubmittedEvent.getSubmitTime());
mi.getJobIndexInfo().setQueueName(jobSubmittedEvent.getJobQueueName());
}
//initialize the launchTime in the JobIndexInfo of MetaInfo
if (event.getHistoryEvent().getEventType() == EventType.JOB_INITED) {
JobInitedEvent jie = (JobInitedEvent) event.getHistoryEvent();
mi.getJobIndexInfo().setJobStartTime(jie.getLaunchTime());
}
if (event.getHistoryEvent().getEventType() == EventType.JOB_QUEUE_CHANGED) {
JobQueueChangeEvent jQueueEvent = (JobQueueChangeEvent) event.getHistoryEvent();
mi.getJobIndexInfo().setQueueName(jQueueEvent.getJobQueueName());
}
// If this is JobFinishedEvent, close the writer and setup the job-index
if (event.getHistoryEvent().getEventType() == EventType.JOB_FINISHED) {
try {
JobFinishedEvent jFinishedEvent = (JobFinishedEvent) event.getHistoryEvent();
mi.getJobIndexInfo().setFinishTime(jFinishedEvent.getFinishTime());
mi.getJobIndexInfo().setNumMaps(jFinishedEvent.getFinishedMaps());
mi.getJobIndexInfo().setNumReduces(jFinishedEvent.getFinishedReduces());
mi.getJobIndexInfo().setJobStatus(JobState.SUCCEEDED.toString());
closeEventWriter(event.getJobID());
processDoneFiles(event.getJobID());
} catch (IOException e) {
throw new YarnRuntimeException(e);
}
}
// summary, job history file etc.) if it is last AM retry.
if (event.getHistoryEvent().getEventType() == EventType.JOB_ERROR) {
try {
JobUnsuccessfulCompletionEvent jucEvent = (JobUnsuccessfulCompletionEvent) event.getHistoryEvent();
mi.getJobIndexInfo().setFinishTime(jucEvent.getFinishTime());
mi.getJobIndexInfo().setNumMaps(jucEvent.getFinishedMaps());
mi.getJobIndexInfo().setNumReduces(jucEvent.getFinishedReduces());
mi.getJobIndexInfo().setJobStatus(jucEvent.getStatus());
closeEventWriter(event.getJobID());
if (context.isLastAMRetry())
processDoneFiles(event.getJobID());
} catch (IOException e) {
throw new YarnRuntimeException(e);
}
}
if (event.getHistoryEvent().getEventType() == EventType.JOB_FAILED || event.getHistoryEvent().getEventType() == EventType.JOB_KILLED) {
try {
JobUnsuccessfulCompletionEvent jucEvent = (JobUnsuccessfulCompletionEvent) event.getHistoryEvent();
mi.getJobIndexInfo().setFinishTime(jucEvent.getFinishTime());
mi.getJobIndexInfo().setNumMaps(jucEvent.getFinishedMaps());
mi.getJobIndexInfo().setNumReduces(jucEvent.getFinishedReduces());
mi.getJobIndexInfo().setJobStatus(jucEvent.getStatus());
closeEventWriter(event.getJobID());
processDoneFiles(event.getJobID());
} catch (IOException e) {
throw new YarnRuntimeException(e);
}
}
}
}
use of org.apache.hadoop.yarn.exceptions.YarnRuntimeException in project hadoop by apache.
the class MRAppMaster method serviceInit.
@Override
protected void serviceInit(final Configuration conf) throws Exception {
// create the job classloader if enabled
createJobClassLoader(conf);
conf.setBoolean(Dispatcher.DISPATCHER_EXIT_ON_ERROR_KEY, true);
initJobCredentialsAndUGI(conf);
dispatcher = createDispatcher();
addIfService(dispatcher);
taskAttemptFinishingMonitor = createTaskAttemptFinishingMonitor(dispatcher.getEventHandler());
addIfService(taskAttemptFinishingMonitor);
context = new RunningAppContext(conf, taskAttemptFinishingMonitor);
// Job name is the same as the app name util we support DAG of jobs
// for an app later
appName = conf.get(MRJobConfig.JOB_NAME, "<missing app name>");
conf.setInt(MRJobConfig.APPLICATION_ATTEMPT_ID, appAttemptID.getAttemptId());
newApiCommitter = false;
jobId = MRBuilderUtils.newJobId(appAttemptID.getApplicationId(), appAttemptID.getApplicationId().getId());
int numReduceTasks = conf.getInt(MRJobConfig.NUM_REDUCES, 0);
if ((numReduceTasks > 0 && conf.getBoolean("mapred.reducer.new-api", false)) || (numReduceTasks == 0 && conf.getBoolean("mapred.mapper.new-api", false))) {
newApiCommitter = true;
LOG.info("Using mapred newApiCommitter.");
}
boolean copyHistory = false;
committer = createOutputCommitter(conf);
try {
String user = UserGroupInformation.getCurrentUser().getShortUserName();
Path stagingDir = MRApps.getStagingAreaDir(conf, user);
FileSystem fs = getFileSystem(conf);
boolean stagingExists = fs.exists(stagingDir);
Path startCommitFile = MRApps.getStartJobCommitFile(conf, user, jobId);
boolean commitStarted = fs.exists(startCommitFile);
Path endCommitSuccessFile = MRApps.getEndJobCommitSuccessFile(conf, user, jobId);
boolean commitSuccess = fs.exists(endCommitSuccessFile);
Path endCommitFailureFile = MRApps.getEndJobCommitFailureFile(conf, user, jobId);
boolean commitFailure = fs.exists(endCommitFailureFile);
if (!stagingExists) {
isLastAMRetry = true;
LOG.info("Attempt num: " + appAttemptID.getAttemptId() + " is last retry: " + isLastAMRetry + " because the staging dir doesn't exist.");
errorHappenedShutDown = true;
forcedState = JobStateInternal.ERROR;
shutDownMessage = "Staging dir does not exist " + stagingDir;
LOG.fatal(shutDownMessage);
} else if (commitStarted) {
//A commit was started so this is the last time, we just need to know
// what result we will use to notify, and how we will unregister
errorHappenedShutDown = true;
isLastAMRetry = true;
LOG.info("Attempt num: " + appAttemptID.getAttemptId() + " is last retry: " + isLastAMRetry + " because a commit was started.");
copyHistory = true;
if (commitSuccess) {
shutDownMessage = "Job commit succeeded in a prior MRAppMaster attempt " + "before it crashed. Recovering.";
forcedState = JobStateInternal.SUCCEEDED;
} else if (commitFailure) {
shutDownMessage = "Job commit failed in a prior MRAppMaster attempt " + "before it crashed. Not retrying.";
forcedState = JobStateInternal.FAILED;
} else {
if (isCommitJobRepeatable()) {
// cleanup previous half done commits if committer supports
// repeatable job commit.
errorHappenedShutDown = false;
cleanupInterruptedCommit(conf, fs, startCommitFile);
} else {
//The commit is still pending, commit error
shutDownMessage = "Job commit from a prior MRAppMaster attempt is " + "potentially in progress. Preventing multiple commit executions";
forcedState = JobStateInternal.ERROR;
}
}
}
} catch (IOException e) {
throw new YarnRuntimeException("Error while initializing", e);
}
if (errorHappenedShutDown) {
NoopEventHandler eater = new NoopEventHandler();
//We do not have a JobEventDispatcher in this path
dispatcher.register(JobEventType.class, eater);
EventHandler<JobHistoryEvent> historyService = null;
if (copyHistory) {
historyService = createJobHistoryHandler(context);
dispatcher.register(org.apache.hadoop.mapreduce.jobhistory.EventType.class, historyService);
} else {
dispatcher.register(org.apache.hadoop.mapreduce.jobhistory.EventType.class, eater);
}
if (copyHistory) {
// Now that there's a FINISHING state for application on RM to give AMs
// plenty of time to clean up after unregister it's safe to clean staging
// directory after unregistering with RM. So, we start the staging-dir
// cleaner BEFORE the ContainerAllocator so that on shut-down,
// ContainerAllocator unregisters first and then the staging-dir cleaner
// deletes staging directory.
addService(createStagingDirCleaningService());
}
// service to allocate containers from RM (if non-uber) or to fake it (uber)
containerAllocator = createContainerAllocator(null, context);
addIfService(containerAllocator);
dispatcher.register(ContainerAllocator.EventType.class, containerAllocator);
if (copyHistory) {
// Add the JobHistoryEventHandler last so that it is properly stopped first.
// This will guarantee that all history-events are flushed before AM goes
// ahead with shutdown.
// Note: Even though JobHistoryEventHandler is started last, if any
// component creates a JobHistoryEvent in the meanwhile, it will be just be
// queued inside the JobHistoryEventHandler
addIfService(historyService);
JobHistoryCopyService cpHist = new JobHistoryCopyService(appAttemptID, dispatcher.getEventHandler());
addIfService(cpHist);
}
} else {
//service to handle requests from JobClient
clientService = createClientService(context);
// Init ClientService separately so that we stop it separately, since this
// service needs to wait some time before it stops so clients can know the
// final states
clientService.init(conf);
containerAllocator = createContainerAllocator(clientService, context);
//service to handle the output committer
committerEventHandler = createCommitterEventHandler(context, committer);
addIfService(committerEventHandler);
//policy handling preemption requests from RM
callWithJobClassLoader(conf, new Action<Void>() {
public Void call(Configuration conf) {
preemptionPolicy = createPreemptionPolicy(conf);
preemptionPolicy.init(context);
return null;
}
});
//service to handle requests to TaskUmbilicalProtocol
taskAttemptListener = createTaskAttemptListener(context, preemptionPolicy);
addIfService(taskAttemptListener);
//service to log job history events
EventHandler<JobHistoryEvent> historyService = createJobHistoryHandler(context);
dispatcher.register(org.apache.hadoop.mapreduce.jobhistory.EventType.class, historyService);
this.jobEventDispatcher = new JobEventDispatcher();
//register the event dispatchers
dispatcher.register(JobEventType.class, jobEventDispatcher);
dispatcher.register(TaskEventType.class, new TaskEventDispatcher());
dispatcher.register(TaskAttemptEventType.class, new TaskAttemptEventDispatcher());
dispatcher.register(CommitterEventType.class, committerEventHandler);
if (conf.getBoolean(MRJobConfig.MAP_SPECULATIVE, false) || conf.getBoolean(MRJobConfig.REDUCE_SPECULATIVE, false)) {
//optional service to speculate on task attempts' progress
speculator = createSpeculator(conf, context);
addIfService(speculator);
}
speculatorEventDispatcher = new SpeculatorEventDispatcher(conf);
dispatcher.register(Speculator.EventType.class, speculatorEventDispatcher);
// Now that there's a FINISHING state for application on RM to give AMs
// plenty of time to clean up after unregister it's safe to clean staging
// directory after unregistering with RM. So, we start the staging-dir
// cleaner BEFORE the ContainerAllocator so that on shut-down,
// ContainerAllocator unregisters first and then the staging-dir cleaner
// deletes staging directory.
addService(createStagingDirCleaningService());
// service to allocate containers from RM (if non-uber) or to fake it (uber)
addIfService(containerAllocator);
dispatcher.register(ContainerAllocator.EventType.class, containerAllocator);
// corresponding service to launch allocated containers via NodeManager
containerLauncher = createContainerLauncher(context);
addIfService(containerLauncher);
dispatcher.register(ContainerLauncher.EventType.class, containerLauncher);
// Add the JobHistoryEventHandler last so that it is properly stopped first.
// This will guarantee that all history-events are flushed before AM goes
// ahead with shutdown.
// Note: Even though JobHistoryEventHandler is started last, if any
// component creates a JobHistoryEvent in the meanwhile, it will be just be
// queued inside the JobHistoryEventHandler
addIfService(historyService);
}
super.serviceInit(conf);
}
use of org.apache.hadoop.yarn.exceptions.YarnRuntimeException in project hadoop by apache.
the class MRAppMaster method initJobCredentialsAndUGI.
// end createJob()
/**
* Obtain the tokens needed by the job and put them in the UGI
* @param conf
*/
protected void initJobCredentialsAndUGI(Configuration conf) {
try {
this.currentUser = UserGroupInformation.getCurrentUser();
this.jobCredentials = ((JobConf) conf).getCredentials();
if (CryptoUtils.isEncryptedSpillEnabled(conf)) {
int keyLen = conf.getInt(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA_KEY_SIZE_BITS, MRJobConfig.DEFAULT_MR_ENCRYPTED_INTERMEDIATE_DATA_KEY_SIZE_BITS);
KeyGenerator keyGen = KeyGenerator.getInstance(INTERMEDIATE_DATA_ENCRYPTION_ALGO);
keyGen.init(keyLen);
encryptedSpillKey = keyGen.generateKey().getEncoded();
} else {
encryptedSpillKey = new byte[] { 0 };
}
} catch (IOException e) {
throw new YarnRuntimeException(e);
} catch (NoSuchAlgorithmException e) {
throw new YarnRuntimeException(e);
}
}
use of org.apache.hadoop.yarn.exceptions.YarnRuntimeException in project hadoop by apache.
the class TaskAttemptListenerImpl method startRpcServer.
protected void startRpcServer() {
Configuration conf = getConfig();
try {
server = new RPC.Builder(conf).setProtocol(TaskUmbilicalProtocol.class).setInstance(this).setBindAddress("0.0.0.0").setPortRangeConfig(MRJobConfig.MR_AM_JOB_CLIENT_PORT_RANGE).setNumHandlers(conf.getInt(MRJobConfig.MR_AM_TASK_LISTENER_THREAD_COUNT, MRJobConfig.DEFAULT_MR_AM_TASK_LISTENER_THREAD_COUNT)).setVerbose(false).setSecretManager(jobTokenSecretManager).build();
// Enable service authorization?
if (conf.getBoolean(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, false)) {
refreshServiceAcls(conf, new MRAMPolicyProvider());
}
server.start();
this.address = NetUtils.createSocketAddrForHost(context.getNMHostname(), server.getListenerAddress().getPort());
} catch (IOException e) {
throw new YarnRuntimeException(e);
}
}
use of org.apache.hadoop.yarn.exceptions.YarnRuntimeException in project hadoop by apache.
the class ContainerManagerImpl method handle.
@SuppressWarnings("unchecked")
@Override
public void handle(ContainerManagerEvent event) {
switch(event.getType()) {
case FINISH_APPS:
CMgrCompletedAppsEvent appsFinishedEvent = (CMgrCompletedAppsEvent) event;
for (ApplicationId appID : appsFinishedEvent.getAppsToCleanup()) {
Application app = this.context.getApplications().get(appID);
if (app == null) {
LOG.warn("couldn't find application " + appID + " while processing" + " FINISH_APPS event");
continue;
}
boolean shouldDropEvent = false;
for (Container container : app.getContainers().values()) {
if (container.isRecovering()) {
LOG.info("drop FINISH_APPS event to " + appID + " because " + "container " + container.getContainerId() + " is recovering");
shouldDropEvent = true;
break;
}
}
if (shouldDropEvent) {
continue;
}
String diagnostic = "";
if (appsFinishedEvent.getReason() == CMgrCompletedAppsEvent.Reason.ON_SHUTDOWN) {
diagnostic = "Application killed on shutdown";
} else if (appsFinishedEvent.getReason() == CMgrCompletedAppsEvent.Reason.BY_RESOURCEMANAGER) {
diagnostic = "Application killed by ResourceManager";
}
this.dispatcher.getEventHandler().handle(new ApplicationFinishEvent(appID, diagnostic));
}
break;
case FINISH_CONTAINERS:
CMgrCompletedContainersEvent containersFinishedEvent = (CMgrCompletedContainersEvent) event;
for (ContainerId containerId : containersFinishedEvent.getContainersToCleanup()) {
ApplicationId appId = containerId.getApplicationAttemptId().getApplicationId();
Application app = this.context.getApplications().get(appId);
if (app == null) {
LOG.warn("couldn't find app " + appId + " while processing" + " FINISH_CONTAINERS event");
continue;
}
Container container = app.getContainers().get(containerId);
if (container == null) {
LOG.warn("couldn't find container " + containerId + " while processing FINISH_CONTAINERS event");
continue;
}
if (container.isRecovering()) {
LOG.info("drop FINISH_CONTAINERS event to " + containerId + " because container is recovering");
continue;
}
this.dispatcher.getEventHandler().handle(new ContainerKillEvent(containerId, ContainerExitStatus.KILLED_BY_RESOURCEMANAGER, "Container Killed by ResourceManager"));
}
break;
case DECREASE_CONTAINERS_RESOURCE:
CMgrDecreaseContainersResourceEvent containersDecreasedEvent = (CMgrDecreaseContainersResourceEvent) event;
for (org.apache.hadoop.yarn.api.records.Container container : containersDecreasedEvent.getContainersToDecrease()) {
try {
changeContainerResourceInternal(container.getId(), container.getVersion(), container.getResource(), false);
} catch (YarnException e) {
LOG.error("Unable to decrease container resource", e);
} catch (IOException e) {
LOG.error("Unable to update container resource in store", e);
}
}
break;
case SIGNAL_CONTAINERS:
CMgrSignalContainersEvent containersSignalEvent = (CMgrSignalContainersEvent) event;
for (SignalContainerRequest request : containersSignalEvent.getContainersToSignal()) {
internalSignalToContainer(request, "ResourceManager");
}
break;
default:
throw new YarnRuntimeException("Got an unknown ContainerManagerEvent type: " + event.getType());
}
}
Aggregations