Search in sources :

Example 36 with RunningJob

use of org.apache.hadoop.mapred.RunningJob in project hive by apache.

the class PartialScanTask method execute.

@Override
public /**
   * start a new map-reduce job to do partial scan to calculate Stats,
   * almost the same as BlockMergeTask or ExecDriver.
   */
int execute(DriverContext driverContext) {
    HiveConf.setVar(job, HiveConf.ConfVars.HIVEINPUTFORMAT, CombineHiveInputFormat.class.getName());
    success = true;
    HiveFileFormatUtils.prepareJobOutput(job);
    job.setOutputFormat(HiveOutputFormatImpl.class);
    job.setMapperClass(work.getMapperClass());
    Context ctx = driverContext.getCtx();
    boolean ctxCreated = false;
    try {
        if (ctx == null) {
            ctx = new Context(job);
            ctxCreated = true;
        }
    } catch (IOException e) {
        e.printStackTrace();
        console.printError("Error launching map-reduce job", "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
        return 5;
    }
    job.setMapOutputKeyClass(NullWritable.class);
    job.setMapOutputValueClass(NullWritable.class);
    if (work.getNumMapTasks() != null) {
        job.setNumMapTasks(work.getNumMapTasks());
    }
    // zero reducers
    job.setNumReduceTasks(0);
    if (work.getMinSplitSize() != null) {
        HiveConf.setLongVar(job, HiveConf.ConfVars.MAPREDMINSPLITSIZE, work.getMinSplitSize().longValue());
    }
    if (work.getInputformat() != null) {
        HiveConf.setVar(job, HiveConf.ConfVars.HIVEINPUTFORMAT, work.getInputformat());
    }
    String inpFormat = HiveConf.getVar(job, HiveConf.ConfVars.HIVEINPUTFORMAT);
    LOG.info("Using " + inpFormat);
    try {
        job.setInputFormat(JavaUtils.loadClass(inpFormat));
    } catch (ClassNotFoundException e) {
        throw new RuntimeException(e.getMessage(), e);
    }
    job.setOutputKeyClass(NullWritable.class);
    job.setOutputValueClass(NullWritable.class);
    int returnVal = 0;
    RunningJob rj = null;
    boolean noName = StringUtils.isEmpty(job.get(MRJobConfig.JOB_NAME));
    String jobName = null;
    if (noName && this.getQueryPlan() != null) {
        int maxlen = conf.getIntVar(HiveConf.ConfVars.HIVEJOBNAMELENGTH);
        jobName = Utilities.abbreviate(this.getQueryPlan().getQueryStr(), maxlen - 6);
    }
    if (noName) {
        // This is for a special case to ensure unit tests pass
        job.set(MRJobConfig.JOB_NAME, jobName != null ? jobName : "JOB" + Utilities.randGen.nextInt());
    }
    // pass aggregation key to mapper
    HiveConf.setVar(job, HiveConf.ConfVars.HIVE_STATS_KEY_PREFIX, work.getAggKey());
    job.set(StatsSetupConst.STATS_TMP_LOC, work.getStatsTmpDir());
    try {
        addInputPaths(job, work);
        MapredWork mrWork = new MapredWork();
        mrWork.setMapWork(work);
        Utilities.setMapRedWork(job, mrWork, ctx.getMRTmpPath());
        // remove the pwd from conf file so that job tracker doesn't show this
        // logs
        String pwd = HiveConf.getVar(job, HiveConf.ConfVars.METASTOREPWD);
        if (pwd != null) {
            HiveConf.setVar(job, HiveConf.ConfVars.METASTOREPWD, "HIVE");
        }
        JobClient jc = new JobClient(job);
        String addedJars = Utilities.getResourceFiles(job, SessionState.ResourceType.JAR);
        if (!addedJars.isEmpty()) {
            job.set("tmpjars", addedJars);
        }
        // make this client wait if job trcker is not behaving well.
        Throttle.checkJobTracker(job, LOG);
        if (work.isGatheringStats()) {
            // initialize stats publishing table
            StatsPublisher statsPublisher;
            StatsFactory factory = StatsFactory.newFactory(job);
            if (factory != null) {
                statsPublisher = factory.getStatsPublisher();
                StatsCollectionContext sc = new StatsCollectionContext(job);
                sc.setStatsTmpDir(work.getStatsTmpDir());
                if (!statsPublisher.init(sc)) {
                    // creating stats table if not exists
                    if (HiveConf.getBoolVar(job, HiveConf.ConfVars.HIVE_STATS_RELIABLE)) {
                        throw new HiveException(ErrorMsg.STATSPUBLISHER_INITIALIZATION_ERROR.getErrorCodedMsg());
                    }
                }
            }
        }
        // Finally SUBMIT the JOB!
        rj = jc.submitJob(job);
        this.jobID = rj.getJobID();
        returnVal = jobExecHelper.progress(rj, jc, ctx);
        success = (returnVal == 0);
    } catch (Exception e) {
        e.printStackTrace();
        setException(e);
        String mesg = " with exception '" + Utilities.getNameMessage(e) + "'";
        if (rj != null) {
            mesg = "Ended Job = " + rj.getJobID() + mesg;
        } else {
            mesg = "Job Submission failed" + mesg;
        }
        // Has to use full name to make sure it does not conflict with
        // org.apache.commons.lang.StringUtils
        console.printError(mesg, "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
        success = false;
        returnVal = 1;
    } finally {
        try {
            if (ctxCreated) {
                ctx.clear();
            }
            if (rj != null) {
                if (returnVal != 0) {
                    rj.killJob();
                }
            }
        } catch (Exception e) {
            LOG.warn("Failed in cleaning up ", e);
        } finally {
            HadoopJobExecHelper.runningJobs.remove(rj);
        }
    }
    return (returnVal);
}
Also used : StatsCollectionContext(org.apache.hadoop.hive.ql.stats.StatsCollectionContext) Context(org.apache.hadoop.hive.ql.Context) DriverContext(org.apache.hadoop.hive.ql.DriverContext) CompilationOpContext(org.apache.hadoop.hive.ql.CompilationOpContext) StatsCollectionContext(org.apache.hadoop.hive.ql.stats.StatsCollectionContext) CombineHiveInputFormat(org.apache.hadoop.hive.ql.io.CombineHiveInputFormat) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) IOException(java.io.IOException) JobClient(org.apache.hadoop.mapred.JobClient) IOException(java.io.IOException) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) StatsPublisher(org.apache.hadoop.hive.ql.stats.StatsPublisher) StatsFactory(org.apache.hadoop.hive.ql.stats.StatsFactory) MapredWork(org.apache.hadoop.hive.ql.plan.MapredWork) RunningJob(org.apache.hadoop.mapred.RunningJob)

Example 37 with RunningJob

use of org.apache.hadoop.mapred.RunningJob in project hive by apache.

the class HadoopJobExecHelper method progress.

private MapRedStats progress(ExecDriverTaskHandle th) throws IOException, LockException {
    JobClient jc = th.getJobClient();
    RunningJob rj = th.getRunningJob();
    SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss,SSS");
    //DecimalFormat longFormatter = new DecimalFormat("###,###");
    long reportTime = System.currentTimeMillis();
    long maxReportInterval = HiveConf.getTimeVar(job, HiveConf.ConfVars.HIVE_LOG_INCREMENTAL_PLAN_PROGRESS_INTERVAL, TimeUnit.MILLISECONDS);
    boolean fatal = false;
    StringBuilder errMsg = new StringBuilder();
    long pullInterval = HiveConf.getLongVar(job, HiveConf.ConfVars.HIVECOUNTERSPULLINTERVAL);
    boolean initializing = true;
    boolean initOutputPrinted = false;
    long cpuMsec = -1;
    int numMap = -1;
    int numReduce = -1;
    List<ClientStatsPublisher> clientStatPublishers = getClientStatPublishers();
    final boolean localMode = ShimLoader.getHadoopShims().isLocalMode(job);
    while (!rj.isComplete()) {
        if (th.getContext() != null) {
            th.getContext().checkHeartbeaterLockException();
        }
        try {
            Thread.sleep(pullInterval);
        } catch (InterruptedException e) {
        }
        if (initializing && rj.getJobState() == JobStatus.PREP) {
            // No reason to poll untill the job is initialized
            continue;
        } else {
            // By now the job is initialized so no reason to do
            // rj.getJobState() again and we do not want to do an extra RPC call
            initializing = false;
        }
        if (!localMode) {
            if (!initOutputPrinted) {
                SessionState ss = SessionState.get();
                String logMapper;
                String logReducer;
                TaskReport[] mappers = jc.getMapTaskReports(rj.getID());
                if (mappers == null) {
                    logMapper = "no information for number of mappers; ";
                } else {
                    numMap = mappers.length;
                    if (ss != null) {
                        ss.getHiveHistory().setTaskProperty(queryId, getId(), Keys.TASK_NUM_MAPPERS, Integer.toString(numMap));
                    }
                    logMapper = "number of mappers: " + numMap + "; ";
                }
                TaskReport[] reducers = jc.getReduceTaskReports(rj.getID());
                if (reducers == null) {
                    logReducer = "no information for number of reducers. ";
                } else {
                    numReduce = reducers.length;
                    if (ss != null) {
                        ss.getHiveHistory().setTaskProperty(queryId, getId(), Keys.TASK_NUM_REDUCERS, Integer.toString(numReduce));
                    }
                    logReducer = "number of reducers: " + numReduce;
                }
                console.printInfo("Hadoop job information for " + getId() + ": " + logMapper + logReducer);
                initOutputPrinted = true;
            }
            RunningJob newRj = jc.getJob(rj.getID());
            if (newRj == null) {
                // So raise a meaningful exception
                throw new IOException("Could not find status of job:" + rj.getID());
            } else {
                th.setRunningJob(newRj);
                rj = newRj;
            }
        }
        // let the job retry several times, which eventually lead to failure.
        if (fatal) {
            // wait until rj.isComplete
            continue;
        }
        Counters ctrs = th.getCounters();
        if (fatal = checkFatalErrors(ctrs, errMsg)) {
            console.printError("[Fatal Error] " + errMsg.toString() + ". Killing the job.");
            rj.killJob();
            continue;
        }
        errMsg.setLength(0);
        updateCounters(ctrs, rj);
        // Prepare data for Client Stat Publishers (if any present) and execute them
        if (clientStatPublishers.size() > 0 && ctrs != null) {
            Map<String, Double> exctractedCounters = extractAllCounterValues(ctrs);
            for (ClientStatsPublisher clientStatPublisher : clientStatPublishers) {
                try {
                    clientStatPublisher.run(exctractedCounters, rj.getID().toString());
                } catch (RuntimeException runtimeException) {
                    LOG.error("Exception " + runtimeException.getClass().getCanonicalName() + " thrown when running clientStatsPublishers. The stack trace is: ", runtimeException);
                }
            }
        }
        if (mapProgress == lastMapProgress && reduceProgress == lastReduceProgress && System.currentTimeMillis() < reportTime + maxReportInterval) {
            continue;
        }
        StringBuilder report = new StringBuilder();
        report.append(dateFormat.format(Calendar.getInstance().getTime()));
        report.append(' ').append(getId());
        report.append(" map = ").append(mapProgress).append("%, ");
        report.append(" reduce = ").append(reduceProgress).append('%');
        // it out.
        if (ctrs != null) {
            Counter counterCpuMsec = ctrs.findCounter("org.apache.hadoop.mapred.Task$Counter", "CPU_MILLISECONDS");
            if (counterCpuMsec != null) {
                long newCpuMSec = counterCpuMsec.getValue();
                if (newCpuMSec > 0) {
                    cpuMsec = newCpuMSec;
                    report.append(", Cumulative CPU ").append((cpuMsec / 1000D)).append(" sec");
                }
            }
        }
        // write out serialized plan with counters to log file
        // LOG.info(queryPlan);
        String output = report.toString();
        SessionState ss = SessionState.get();
        if (ss != null) {
            ss.getHiveHistory().setTaskCounters(queryId, getId(), ctrs);
            ss.getHiveHistory().setTaskProperty(queryId, getId(), Keys.TASK_HADOOP_PROGRESS, output);
            if (ss.getConf().getBoolVar(HiveConf.ConfVars.HIVE_LOG_INCREMENTAL_PLAN_PROGRESS)) {
                ss.getHiveHistory().progressTask(queryId, this.task);
                this.callBackObj.logPlanProgress(ss);
            }
        }
        console.printInfo(output);
        task.setStatusMessage(output);
        reportTime = System.currentTimeMillis();
    }
    Counters ctrs = th.getCounters();
    if (ctrs != null) {
        Counter counterCpuMsec = ctrs.findCounter("org.apache.hadoop.mapred.Task$Counter", "CPU_MILLISECONDS");
        if (counterCpuMsec != null) {
            long newCpuMSec = counterCpuMsec.getValue();
            if (newCpuMSec > cpuMsec) {
                cpuMsec = newCpuMSec;
            }
        }
    }
    if (cpuMsec > 0) {
        String status = "MapReduce Total cumulative CPU time: " + Utilities.formatMsecToStr(cpuMsec);
        console.printInfo(status);
        task.setStatusMessage(status);
    }
    boolean success;
    if (fatal) {
        success = false;
    } else {
        // the last check before the job is completed
        if (checkFatalErrors(ctrs, errMsg)) {
            console.printError("[Fatal Error] " + errMsg.toString());
            success = false;
        } else {
            SessionState ss = SessionState.get();
            if (ss != null) {
                ss.getHiveHistory().setTaskCounters(queryId, getId(), ctrs);
            }
            success = rj.isSuccessful();
        }
    }
    MapRedStats mapRedStats = new MapRedStats(numMap, numReduce, cpuMsec, success, rj.getID().toString());
    mapRedStats.setCounters(ctrs);
    // update based on the final value of the counters
    updateCounters(ctrs, rj);
    SessionState ss = SessionState.get();
    if (ss != null) {
        this.callBackObj.logPlanProgress(ss);
    }
    // LOG.info(queryPlan);
    return mapRedStats;
}
Also used : SessionState(org.apache.hadoop.hive.ql.session.SessionState) TaskReport(org.apache.hadoop.mapred.TaskReport) IOException(java.io.IOException) JobClient(org.apache.hadoop.mapred.JobClient) Counter(org.apache.hadoop.mapred.Counters.Counter) ClientStatsPublisher(org.apache.hadoop.hive.ql.stats.ClientStatsPublisher) RunningJob(org.apache.hadoop.mapred.RunningJob) Counters(org.apache.hadoop.mapred.Counters) MapRedStats(org.apache.hadoop.hive.ql.MapRedStats) SimpleDateFormat(java.text.SimpleDateFormat)

Example 38 with RunningJob

use of org.apache.hadoop.mapred.RunningJob in project hive by apache.

the class MergeFileTask method execute.

/**
   * start a new map-reduce job to do the merge, almost the same as ExecDriver.
   */
@Override
public int execute(DriverContext driverContext) {
    Context ctx = driverContext.getCtx();
    boolean ctxCreated = false;
    RunningJob rj = null;
    int returnVal = 0;
    try {
        if (ctx == null) {
            ctx = new Context(job);
            ctxCreated = true;
        }
        HiveFileFormatUtils.prepareJobOutput(job);
        job.setInputFormat(work.getInputformatClass());
        job.setOutputFormat(HiveOutputFormatImpl.class);
        job.setMapperClass(MergeFileMapper.class);
        job.setMapOutputKeyClass(NullWritable.class);
        job.setMapOutputValueClass(NullWritable.class);
        job.setOutputKeyClass(NullWritable.class);
        job.setOutputValueClass(NullWritable.class);
        job.setNumReduceTasks(0);
        // create the temp directories
        Path outputPath = work.getOutputDir();
        Path tempOutPath = Utilities.toTempPath(outputPath);
        FileSystem fs = tempOutPath.getFileSystem(job);
        if (!fs.exists(tempOutPath)) {
            fs.mkdirs(tempOutPath);
        }
        ExecDriver.propagateSplitSettings(job, work);
        // set job name
        boolean noName = StringUtils.isEmpty(job.get(MRJobConfig.JOB_NAME));
        String jobName = null;
        if (noName && this.getQueryPlan() != null) {
            int maxlen = conf.getIntVar(HiveConf.ConfVars.HIVEJOBNAMELENGTH);
            jobName = Utilities.abbreviate(this.getQueryPlan().getQueryStr(), maxlen - 6);
        }
        if (noName) {
            // This is for a special case to ensure unit tests pass
            job.set(MRJobConfig.JOB_NAME, jobName != null ? jobName : "JOB" + Utilities.randGen.nextInt());
        }
        // add input path
        addInputPaths(job, work);
        // serialize work
        Utilities.setMapWork(job, work, ctx.getMRTmpPath(), true);
        // remove pwd from conf file so that job tracker doesn't show this logs
        String pwd = HiveConf.getVar(job, HiveConf.ConfVars.METASTOREPWD);
        if (pwd != null) {
            HiveConf.setVar(job, HiveConf.ConfVars.METASTOREPWD, "HIVE");
        }
        // submit the job
        JobClient jc = new JobClient(job);
        String addedJars = Utilities.getResourceFiles(job, SessionState.ResourceType.JAR);
        if (!addedJars.isEmpty()) {
            job.set("tmpjars", addedJars);
        }
        // make this client wait if job trcker is not behaving well.
        Throttle.checkJobTracker(job, LOG);
        // Finally SUBMIT the JOB!
        rj = jc.submitJob(job);
        this.jobID = rj.getJobID();
        returnVal = jobExecHelper.progress(rj, jc, ctx);
        success = (returnVal == 0);
    } catch (Exception e) {
        setException(e);
        String mesg = " with exception '" + Utilities.getNameMessage(e) + "'";
        if (rj != null) {
            mesg = "Ended Job = " + rj.getJobID() + mesg;
        } else {
            mesg = "Job Submission failed" + mesg;
        }
        // Has to use full name to make sure it does not conflict with
        // org.apache.commons.lang.StringUtils
        console.printError(mesg, "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
        success = false;
        returnVal = 1;
    } finally {
        try {
            if (ctxCreated) {
                ctx.clear();
            }
            if (rj != null) {
                if (returnVal != 0) {
                    rj.killJob();
                }
            }
            // get the list of Dynamic partition paths
            if (rj != null) {
                if (work.getAliasToWork() != null) {
                    for (Operator<? extends OperatorDesc> op : work.getAliasToWork().values()) {
                        op.jobClose(job, success);
                    }
                }
            }
        } catch (Exception e) {
            // jobClose needs to execute successfully otherwise fail task
            LOG.warn("Job close failed ", e);
            if (success) {
                setException(e);
                success = false;
                returnVal = 3;
                String mesg = "Job Commit failed with exception '" + Utilities.getNameMessage(e) + "'";
                console.printError(mesg, "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
            }
        } finally {
            HadoopJobExecHelper.runningJobs.remove(rj);
        }
    }
    return returnVal;
}
Also used : Context(org.apache.hadoop.hive.ql.Context) DriverContext(org.apache.hadoop.hive.ql.DriverContext) CompilationOpContext(org.apache.hadoop.hive.ql.CompilationOpContext) Path(org.apache.hadoop.fs.Path) FileSystem(org.apache.hadoop.fs.FileSystem) RunningJob(org.apache.hadoop.mapred.RunningJob) JobClient(org.apache.hadoop.mapred.JobClient) IOException(java.io.IOException)

Example 39 with RunningJob

use of org.apache.hadoop.mapred.RunningJob in project hive by apache.

the class ColumnTruncateTask method execute.

@Override
public /**
   * start a new map-reduce job to do the truncation, almost the same as ExecDriver.
   */
int execute(DriverContext driverContext) {
    HiveConf.setVar(job, HiveConf.ConfVars.HIVEINPUTFORMAT, BucketizedHiveInputFormat.class.getName());
    success = true;
    HiveFileFormatUtils.prepareJobOutput(job);
    job.setOutputFormat(HiveOutputFormatImpl.class);
    job.setMapperClass(work.getMapperClass());
    Context ctx = driverContext.getCtx();
    boolean ctxCreated = false;
    try {
        if (ctx == null) {
            ctx = new Context(job);
            ctxCreated = true;
        }
    } catch (IOException e) {
        e.printStackTrace();
        console.printError("Error launching map-reduce job", "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
        return 5;
    }
    job.setMapOutputKeyClass(NullWritable.class);
    job.setMapOutputValueClass(NullWritable.class);
    if (work.getNumMapTasks() != null) {
        job.setNumMapTasks(work.getNumMapTasks());
    }
    // zero reducers
    job.setNumReduceTasks(0);
    if (work.getMinSplitSize() != null) {
        HiveConf.setLongVar(job, HiveConf.ConfVars.MAPREDMINSPLITSIZE, work.getMinSplitSize().longValue());
    }
    if (work.getInputformat() != null) {
        HiveConf.setVar(job, HiveConf.ConfVars.HIVEINPUTFORMAT, work.getInputformat());
    }
    String inpFormat = HiveConf.getVar(job, HiveConf.ConfVars.HIVEINPUTFORMAT);
    LOG.info("Using " + inpFormat);
    try {
        job.setInputFormat(JavaUtils.loadClass(inpFormat));
    } catch (ClassNotFoundException e) {
        throw new RuntimeException(e.getMessage(), e);
    }
    Path outputPath = this.work.getOutputDir();
    Path tempOutPath = Utilities.toTempPath(outputPath);
    try {
        FileSystem fs = tempOutPath.getFileSystem(job);
        if (!fs.exists(tempOutPath)) {
            fs.mkdirs(tempOutPath);
        }
    } catch (IOException e) {
        console.printError("Can't make path " + outputPath + " : " + e.getMessage());
        return 6;
    }
    job.setOutputKeyClass(NullWritable.class);
    job.setOutputValueClass(NullWritable.class);
    int returnVal = 0;
    RunningJob rj = null;
    boolean noName = StringUtils.isEmpty(job.get(MRJobConfig.JOB_NAME));
    String jobName = null;
    if (noName && this.getQueryPlan() != null) {
        int maxlen = conf.getIntVar(HiveConf.ConfVars.HIVEJOBNAMELENGTH);
        jobName = Utilities.abbreviate(this.getQueryPlan().getQueryStr(), maxlen - 6);
    }
    if (noName) {
        // This is for a special case to ensure unit tests pass
        job.set(MRJobConfig.JOB_NAME, jobName != null ? jobName : "JOB" + Utilities.randGen.nextInt());
    }
    try {
        addInputPaths(job, work);
        MapredWork mrWork = new MapredWork();
        mrWork.setMapWork(work);
        Utilities.setMapRedWork(job, mrWork, ctx.getMRTmpPath());
        // remove the pwd from conf file so that job tracker doesn't show this
        // logs
        String pwd = HiveConf.getVar(job, HiveConf.ConfVars.METASTOREPWD);
        if (pwd != null) {
            HiveConf.setVar(job, HiveConf.ConfVars.METASTOREPWD, "HIVE");
        }
        JobClient jc = new JobClient(job);
        String addedJars = Utilities.getResourceFiles(job, SessionState.ResourceType.JAR);
        if (!addedJars.isEmpty()) {
            job.set("tmpjars", addedJars);
        }
        // make this client wait if job trcker is not behaving well.
        Throttle.checkJobTracker(job, LOG);
        // Finally SUBMIT the JOB!
        rj = jc.submitJob(job);
        this.jobID = rj.getJobID();
        returnVal = jobExecHelper.progress(rj, jc, ctx);
        success = (returnVal == 0);
    } catch (Exception e) {
        e.printStackTrace();
        setException(e);
        String mesg = " with exception '" + Utilities.getNameMessage(e) + "'";
        if (rj != null) {
            mesg = "Ended Job = " + rj.getJobID() + mesg;
        } else {
            mesg = "Job Submission failed" + mesg;
        }
        // Has to use full name to make sure it does not conflict with
        // org.apache.commons.lang.StringUtils
        console.printError(mesg, "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
        success = false;
        returnVal = 1;
    } finally {
        try {
            if (ctxCreated) {
                ctx.clear();
            }
            if (rj != null) {
                if (returnVal != 0) {
                    rj.killJob();
                }
            }
            ColumnTruncateMapper.jobClose(outputPath, success, job, console, work.getDynPartCtx(), null);
        } catch (Exception e) {
            LOG.warn("Failed while cleaning up ", e);
        } finally {
            HadoopJobExecHelper.runningJobs.remove(rj);
        }
    }
    return (returnVal);
}
Also used : Context(org.apache.hadoop.hive.ql.Context) DriverContext(org.apache.hadoop.hive.ql.DriverContext) CompilationOpContext(org.apache.hadoop.hive.ql.CompilationOpContext) Path(org.apache.hadoop.fs.Path) BucketizedHiveInputFormat(org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat) IOException(java.io.IOException) JobClient(org.apache.hadoop.mapred.JobClient) IOException(java.io.IOException) MapredWork(org.apache.hadoop.hive.ql.plan.MapredWork) FileSystem(org.apache.hadoop.fs.FileSystem) RunningJob(org.apache.hadoop.mapred.RunningJob)

Example 40 with RunningJob

use of org.apache.hadoop.mapred.RunningJob in project hive by apache.

the class CompactorMR method launchCompactionJob.

private void launchCompactionJob(JobConf job, Path baseDir, CompactionType compactionType, StringableList dirsToSearch, List<AcidUtils.ParsedDelta> parsedDeltas, int curDirNumber, int obsoleteDirNumber, HiveConf hiveConf, TxnStore txnHandler, long id) throws IOException {
    job.setBoolean(IS_MAJOR, compactionType == CompactionType.MAJOR);
    if (dirsToSearch == null) {
        dirsToSearch = new StringableList();
    }
    StringableList deltaDirs = new StringableList();
    long minTxn = Long.MAX_VALUE;
    long maxTxn = Long.MIN_VALUE;
    for (AcidUtils.ParsedDelta delta : parsedDeltas) {
        LOG.debug("Adding delta " + delta.getPath() + " to directories to search");
        dirsToSearch.add(delta.getPath());
        deltaDirs.add(delta.getPath());
        minTxn = Math.min(minTxn, delta.getMinTransaction());
        maxTxn = Math.max(maxTxn, delta.getMaxTransaction());
    }
    if (baseDir != null)
        job.set(BASE_DIR, baseDir.toString());
    job.set(DELTA_DIRS, deltaDirs.toString());
    job.set(DIRS_TO_SEARCH, dirsToSearch.toString());
    job.setLong(MIN_TXN, minTxn);
    job.setLong(MAX_TXN, maxTxn);
    if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_IN_TEST)) {
        mrJob = job;
    }
    LOG.info("Submitting " + compactionType + " compaction job '" + job.getJobName() + "' to " + job.getQueueName() + " queue.  " + "(current delta dirs count=" + curDirNumber + ", obsolete delta dirs count=" + obsoleteDirNumber + ". TxnIdRange[" + minTxn + "," + maxTxn + "]");
    RunningJob rj = new JobClient(job).submitJob(job);
    LOG.info("Submitted compaction job '" + job.getJobName() + "' with jobID=" + rj.getID() + " compaction ID=" + id);
    txnHandler.setHadoopJobId(rj.getID().toString(), id);
    rj.waitForCompletion();
}
Also used : RunningJob(org.apache.hadoop.mapred.RunningJob) JobClient(org.apache.hadoop.mapred.JobClient) AcidUtils(org.apache.hadoop.hive.ql.io.AcidUtils)

Aggregations

RunningJob (org.apache.hadoop.mapred.RunningJob)61 JobConf (org.apache.hadoop.mapred.JobConf)45 Path (org.apache.hadoop.fs.Path)35 FileSystem (org.apache.hadoop.fs.FileSystem)24 JobClient (org.apache.hadoop.mapred.JobClient)20 IOException (java.io.IOException)15 Counters (org.apache.hadoop.mapred.Counters)14 Group (org.apache.hadoop.mapred.Counters.Group)13 DMLConfig (org.apache.sysml.conf.DMLConfig)13 Configuration (org.apache.hadoop.conf.Configuration)7 MatrixChar_N_ReducerGroups (org.apache.sysml.runtime.matrix.mapred.MRJobConfiguration.MatrixChar_N_ReducerGroups)7 DataOutputStream (java.io.DataOutputStream)6 File (java.io.File)5 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)5 FileStatus (org.apache.hadoop.fs.FileStatus)5 Text (org.apache.hadoop.io.Text)5 DMLRuntimeException (org.apache.sysml.runtime.DMLRuntimeException)5 InputInfo (org.apache.sysml.runtime.matrix.data.InputInfo)5 Test (org.junit.Test)5 URI (java.net.URI)4