Search in sources :

Example 1 with TaskAttemptFinishingMonitor

use of org.apache.hadoop.mapreduce.v2.app.TaskAttemptFinishingMonitor in project hadoop by apache.

the class MRAppMaster method serviceInit.

@Override
protected void serviceInit(final Configuration conf) throws Exception {
    // create the job classloader if enabled
    createJobClassLoader(conf);
    conf.setBoolean(Dispatcher.DISPATCHER_EXIT_ON_ERROR_KEY, true);
    initJobCredentialsAndUGI(conf);
    dispatcher = createDispatcher();
    addIfService(dispatcher);
    taskAttemptFinishingMonitor = createTaskAttemptFinishingMonitor(dispatcher.getEventHandler());
    addIfService(taskAttemptFinishingMonitor);
    context = new RunningAppContext(conf, taskAttemptFinishingMonitor);
    // Job name is the same as the app name util we support DAG of jobs
    // for an app later
    appName = conf.get(MRJobConfig.JOB_NAME, "<missing app name>");
    conf.setInt(MRJobConfig.APPLICATION_ATTEMPT_ID, appAttemptID.getAttemptId());
    newApiCommitter = false;
    jobId = MRBuilderUtils.newJobId(appAttemptID.getApplicationId(), appAttemptID.getApplicationId().getId());
    int numReduceTasks = conf.getInt(MRJobConfig.NUM_REDUCES, 0);
    if ((numReduceTasks > 0 && conf.getBoolean("mapred.reducer.new-api", false)) || (numReduceTasks == 0 && conf.getBoolean("mapred.mapper.new-api", false))) {
        newApiCommitter = true;
        LOG.info("Using mapred newApiCommitter.");
    }
    boolean copyHistory = false;
    committer = createOutputCommitter(conf);
    try {
        String user = UserGroupInformation.getCurrentUser().getShortUserName();
        Path stagingDir = MRApps.getStagingAreaDir(conf, user);
        FileSystem fs = getFileSystem(conf);
        boolean stagingExists = fs.exists(stagingDir);
        Path startCommitFile = MRApps.getStartJobCommitFile(conf, user, jobId);
        boolean commitStarted = fs.exists(startCommitFile);
        Path endCommitSuccessFile = MRApps.getEndJobCommitSuccessFile(conf, user, jobId);
        boolean commitSuccess = fs.exists(endCommitSuccessFile);
        Path endCommitFailureFile = MRApps.getEndJobCommitFailureFile(conf, user, jobId);
        boolean commitFailure = fs.exists(endCommitFailureFile);
        if (!stagingExists) {
            isLastAMRetry = true;
            LOG.info("Attempt num: " + appAttemptID.getAttemptId() + " is last retry: " + isLastAMRetry + " because the staging dir doesn't exist.");
            errorHappenedShutDown = true;
            forcedState = JobStateInternal.ERROR;
            shutDownMessage = "Staging dir does not exist " + stagingDir;
            LOG.fatal(shutDownMessage);
        } else if (commitStarted) {
            //A commit was started so this is the last time, we just need to know
            // what result we will use to notify, and how we will unregister
            errorHappenedShutDown = true;
            isLastAMRetry = true;
            LOG.info("Attempt num: " + appAttemptID.getAttemptId() + " is last retry: " + isLastAMRetry + " because a commit was started.");
            copyHistory = true;
            if (commitSuccess) {
                shutDownMessage = "Job commit succeeded in a prior MRAppMaster attempt " + "before it crashed. Recovering.";
                forcedState = JobStateInternal.SUCCEEDED;
            } else if (commitFailure) {
                shutDownMessage = "Job commit failed in a prior MRAppMaster attempt " + "before it crashed. Not retrying.";
                forcedState = JobStateInternal.FAILED;
            } else {
                if (isCommitJobRepeatable()) {
                    // cleanup previous half done commits if committer supports
                    // repeatable job commit.
                    errorHappenedShutDown = false;
                    cleanupInterruptedCommit(conf, fs, startCommitFile);
                } else {
                    //The commit is still pending, commit error
                    shutDownMessage = "Job commit from a prior MRAppMaster attempt is " + "potentially in progress. Preventing multiple commit executions";
                    forcedState = JobStateInternal.ERROR;
                }
            }
        }
    } catch (IOException e) {
        throw new YarnRuntimeException("Error while initializing", e);
    }
    if (errorHappenedShutDown) {
        NoopEventHandler eater = new NoopEventHandler();
        //We do not have a JobEventDispatcher in this path
        dispatcher.register(JobEventType.class, eater);
        EventHandler<JobHistoryEvent> historyService = null;
        if (copyHistory) {
            historyService = createJobHistoryHandler(context);
            dispatcher.register(org.apache.hadoop.mapreduce.jobhistory.EventType.class, historyService);
        } else {
            dispatcher.register(org.apache.hadoop.mapreduce.jobhistory.EventType.class, eater);
        }
        if (copyHistory) {
            // Now that there's a FINISHING state for application on RM to give AMs
            // plenty of time to clean up after unregister it's safe to clean staging
            // directory after unregistering with RM. So, we start the staging-dir
            // cleaner BEFORE the ContainerAllocator so that on shut-down,
            // ContainerAllocator unregisters first and then the staging-dir cleaner
            // deletes staging directory.
            addService(createStagingDirCleaningService());
        }
        // service to allocate containers from RM (if non-uber) or to fake it (uber)
        containerAllocator = createContainerAllocator(null, context);
        addIfService(containerAllocator);
        dispatcher.register(ContainerAllocator.EventType.class, containerAllocator);
        if (copyHistory) {
            // Add the JobHistoryEventHandler last so that it is properly stopped first.
            // This will guarantee that all history-events are flushed before AM goes
            // ahead with shutdown.
            // Note: Even though JobHistoryEventHandler is started last, if any
            // component creates a JobHistoryEvent in the meanwhile, it will be just be
            // queued inside the JobHistoryEventHandler 
            addIfService(historyService);
            JobHistoryCopyService cpHist = new JobHistoryCopyService(appAttemptID, dispatcher.getEventHandler());
            addIfService(cpHist);
        }
    } else {
        //service to handle requests from JobClient
        clientService = createClientService(context);
        // Init ClientService separately so that we stop it separately, since this
        // service needs to wait some time before it stops so clients can know the
        // final states
        clientService.init(conf);
        containerAllocator = createContainerAllocator(clientService, context);
        //service to handle the output committer
        committerEventHandler = createCommitterEventHandler(context, committer);
        addIfService(committerEventHandler);
        //policy handling preemption requests from RM
        callWithJobClassLoader(conf, new Action<Void>() {

            public Void call(Configuration conf) {
                preemptionPolicy = createPreemptionPolicy(conf);
                preemptionPolicy.init(context);
                return null;
            }
        });
        //service to handle requests to TaskUmbilicalProtocol
        taskAttemptListener = createTaskAttemptListener(context, preemptionPolicy);
        addIfService(taskAttemptListener);
        //service to log job history events
        EventHandler<JobHistoryEvent> historyService = createJobHistoryHandler(context);
        dispatcher.register(org.apache.hadoop.mapreduce.jobhistory.EventType.class, historyService);
        this.jobEventDispatcher = new JobEventDispatcher();
        //register the event dispatchers
        dispatcher.register(JobEventType.class, jobEventDispatcher);
        dispatcher.register(TaskEventType.class, new TaskEventDispatcher());
        dispatcher.register(TaskAttemptEventType.class, new TaskAttemptEventDispatcher());
        dispatcher.register(CommitterEventType.class, committerEventHandler);
        if (conf.getBoolean(MRJobConfig.MAP_SPECULATIVE, false) || conf.getBoolean(MRJobConfig.REDUCE_SPECULATIVE, false)) {
            //optional service to speculate on task attempts' progress
            speculator = createSpeculator(conf, context);
            addIfService(speculator);
        }
        speculatorEventDispatcher = new SpeculatorEventDispatcher(conf);
        dispatcher.register(Speculator.EventType.class, speculatorEventDispatcher);
        // Now that there's a FINISHING state for application on RM to give AMs
        // plenty of time to clean up after unregister it's safe to clean staging
        // directory after unregistering with RM. So, we start the staging-dir
        // cleaner BEFORE the ContainerAllocator so that on shut-down,
        // ContainerAllocator unregisters first and then the staging-dir cleaner
        // deletes staging directory.
        addService(createStagingDirCleaningService());
        // service to allocate containers from RM (if non-uber) or to fake it (uber)
        addIfService(containerAllocator);
        dispatcher.register(ContainerAllocator.EventType.class, containerAllocator);
        // corresponding service to launch allocated containers via NodeManager
        containerLauncher = createContainerLauncher(context);
        addIfService(containerLauncher);
        dispatcher.register(ContainerLauncher.EventType.class, containerLauncher);
        // Add the JobHistoryEventHandler last so that it is properly stopped first.
        // This will guarantee that all history-events are flushed before AM goes
        // ahead with shutdown.
        // Note: Even though JobHistoryEventHandler is started last, if any
        // component creates a JobHistoryEvent in the meanwhile, it will be just be
        // queued inside the JobHistoryEventHandler 
        addIfService(historyService);
    }
    super.serviceInit(conf);
}
Also used : JobHistoryCopyService(org.apache.hadoop.mapreduce.jobhistory.JobHistoryCopyService) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) JobHistoryEvent(org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent) ContainerLauncher(org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncher) LocalContainerLauncher(org.apache.hadoop.mapred.LocalContainerLauncher) FileSystem(org.apache.hadoop.fs.FileSystem) Path(org.apache.hadoop.fs.Path) IOException(java.io.IOException) ContainerAllocator(org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator) RMContainerAllocator(org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator) LocalContainerAllocator(org.apache.hadoop.mapreduce.v2.app.local.LocalContainerAllocator) DefaultSpeculator(org.apache.hadoop.mapreduce.v2.app.speculate.DefaultSpeculator) Speculator(org.apache.hadoop.mapreduce.v2.app.speculate.Speculator) YarnRuntimeException(org.apache.hadoop.yarn.exceptions.YarnRuntimeException)

Example 2 with TaskAttemptFinishingMonitor

use of org.apache.hadoop.mapreduce.v2.app.TaskAttemptFinishingMonitor in project hadoop by apache.

the class TestTaskAttemptFinishingMonitor method testFinshingAttemptTimeout.

@Test
public void testFinshingAttemptTimeout() throws IOException, InterruptedException {
    SystemClock clock = SystemClock.getInstance();
    Configuration conf = new Configuration();
    conf.setInt(MRJobConfig.TASK_EXIT_TIMEOUT, 100);
    conf.setInt(MRJobConfig.TASK_EXIT_TIMEOUT_CHECK_INTERVAL_MS, 10);
    AppContext appCtx = mock(AppContext.class);
    JobTokenSecretManager secret = mock(JobTokenSecretManager.class);
    RMHeartbeatHandler rmHeartbeatHandler = mock(RMHeartbeatHandler.class);
    MockEventHandler eventHandler = new MockEventHandler();
    TaskAttemptFinishingMonitor taskAttemptFinishingMonitor = new TaskAttemptFinishingMonitor(eventHandler);
    taskAttemptFinishingMonitor.init(conf);
    taskAttemptFinishingMonitor.start();
    when(appCtx.getEventHandler()).thenReturn(eventHandler);
    when(appCtx.getNMHostname()).thenReturn("0.0.0.0");
    when(appCtx.getTaskAttemptFinishingMonitor()).thenReturn(taskAttemptFinishingMonitor);
    when(appCtx.getClock()).thenReturn(clock);
    CheckpointAMPreemptionPolicy policy = new CheckpointAMPreemptionPolicy();
    policy.init(appCtx);
    TaskAttemptListenerImpl listener = new TaskAttemptListenerImpl(appCtx, secret, rmHeartbeatHandler, policy);
    listener.init(conf);
    listener.start();
    JobId jid = MRBuilderUtils.newJobId(12345, 1, 1);
    TaskId tid = MRBuilderUtils.newTaskId(jid, 0, org.apache.hadoop.mapreduce.v2.api.records.TaskType.MAP);
    TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(tid, 0);
    appCtx.getTaskAttemptFinishingMonitor().register(attemptId);
    int check = 0;
    while (!eventHandler.timedOut && check++ < 10) {
        Thread.sleep(100);
    }
    taskAttemptFinishingMonitor.stop();
    assertTrue("Finishing attempt didn't time out.", eventHandler.timedOut);
}
Also used : RMHeartbeatHandler(org.apache.hadoop.mapreduce.v2.app.rm.RMHeartbeatHandler) TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) SystemClock(org.apache.hadoop.yarn.util.SystemClock) Configuration(org.apache.hadoop.conf.Configuration) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) AppContext(org.apache.hadoop.mapreduce.v2.app.AppContext) CheckpointAMPreemptionPolicy(org.apache.hadoop.mapreduce.v2.app.rm.preemption.CheckpointAMPreemptionPolicy) TaskAttemptFinishingMonitor(org.apache.hadoop.mapreduce.v2.app.TaskAttemptFinishingMonitor) JobTokenSecretManager(org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) Test(org.junit.Test)

Example 3 with TaskAttemptFinishingMonitor

use of org.apache.hadoop.mapreduce.v2.app.TaskAttemptFinishingMonitor in project hadoop by apache.

the class TestTaskAttempt method setupTaskAttemptFinishingMonitor.

private void setupTaskAttemptFinishingMonitor(EventHandler eventHandler, JobConf jobConf, AppContext appCtx) {
    TaskAttemptFinishingMonitor taskAttemptFinishingMonitor = new TaskAttemptFinishingMonitor(eventHandler);
    taskAttemptFinishingMonitor.init(jobConf);
    when(appCtx.getTaskAttemptFinishingMonitor()).thenReturn(taskAttemptFinishingMonitor);
}
Also used : TaskAttemptFinishingMonitor(org.apache.hadoop.mapreduce.v2.app.TaskAttemptFinishingMonitor)

Aggregations

Configuration (org.apache.hadoop.conf.Configuration)2 TaskAttemptFinishingMonitor (org.apache.hadoop.mapreduce.v2.app.TaskAttemptFinishingMonitor)2 IOException (java.io.IOException)1 FileSystem (org.apache.hadoop.fs.FileSystem)1 Path (org.apache.hadoop.fs.Path)1 LocalContainerLauncher (org.apache.hadoop.mapred.LocalContainerLauncher)1 JobHistoryCopyService (org.apache.hadoop.mapreduce.jobhistory.JobHistoryCopyService)1 JobHistoryEvent (org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent)1 JobTokenSecretManager (org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager)1 JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)1 TaskAttemptId (org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId)1 TaskId (org.apache.hadoop.mapreduce.v2.api.records.TaskId)1 AppContext (org.apache.hadoop.mapreduce.v2.app.AppContext)1 ContainerLauncher (org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncher)1 LocalContainerAllocator (org.apache.hadoop.mapreduce.v2.app.local.LocalContainerAllocator)1 ContainerAllocator (org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator)1 RMContainerAllocator (org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator)1 RMHeartbeatHandler (org.apache.hadoop.mapreduce.v2.app.rm.RMHeartbeatHandler)1 CheckpointAMPreemptionPolicy (org.apache.hadoop.mapreduce.v2.app.rm.preemption.CheckpointAMPreemptionPolicy)1 DefaultSpeculator (org.apache.hadoop.mapreduce.v2.app.speculate.DefaultSpeculator)1