Search in sources :

Example 11 with DriverContext

use of org.apache.hadoop.hive.ql.DriverContext in project hive by apache.

the class SparkProcessAnalyzeTable method handlePartialScanCommand.

/**
   * handle partial scan command.
   *
   * It is composed of PartialScanTask followed by StatsTask.
   */
private void handlePartialScanCommand(TableScanOperator tableScan, ParseContext parseContext, StatsWork statsWork, GenSparkProcContext context, Task<StatsWork> statsTask) throws SemanticException {
    String aggregationKey = tableScan.getConf().getStatsAggPrefix();
    StringBuilder aggregationKeyBuffer = new StringBuilder(aggregationKey);
    List<Path> inputPaths = GenMapRedUtils.getInputPathsForPartialScan(tableScan, aggregationKeyBuffer);
    aggregationKey = aggregationKeyBuffer.toString();
    // scan work
    PartialScanWork scanWork = new PartialScanWork(inputPaths);
    scanWork.setMapperCannotSpanPartns(true);
    scanWork.setAggKey(aggregationKey);
    scanWork.setStatsTmpDir(tableScan.getConf().getTmpStatsDir(), parseContext.getConf());
    // stats work
    statsWork.setPartialScanAnalyzeCommand(true);
    // partial scan task
    DriverContext driverCxt = new DriverContext();
    @SuppressWarnings("unchecked") Task<PartialScanWork> partialScanTask = TaskFactory.get(scanWork, parseContext.getConf());
    partialScanTask.initialize(parseContext.getQueryState(), null, driverCxt, tableScan.getCompilationOpContext());
    partialScanTask.setWork(scanWork);
    statsWork.setSourceTask(partialScanTask);
    // task dependency
    context.rootTasks.remove(context.currentTask);
    context.rootTasks.add(partialScanTask);
    partialScanTask.addDependentTask(statsTask);
}
Also used : Path(org.apache.hadoop.fs.Path) DriverContext(org.apache.hadoop.hive.ql.DriverContext) PartialScanWork(org.apache.hadoop.hive.ql.io.rcfile.stats.PartialScanWork)

Example 12 with DriverContext

use of org.apache.hadoop.hive.ql.DriverContext in project hive by apache.

the class ColumnTruncateTask method execute.

@Override
public /**
 * start a new map-reduce job to do the truncation, almost the same as ExecDriver.
 */
int execute(DriverContext driverContext) {
    HiveConf.setVar(job, HiveConf.ConfVars.HIVEINPUTFORMAT, BucketizedHiveInputFormat.class.getName());
    success = true;
    HiveFileFormatUtils.prepareJobOutput(job);
    job.setOutputFormat(HiveOutputFormatImpl.class);
    job.setMapperClass(work.getMapperClass());
    Context ctx = driverContext.getCtx();
    boolean ctxCreated = false;
    try {
        if (ctx == null) {
            ctx = new Context(job);
            ctxCreated = true;
        }
    } catch (IOException e) {
        e.printStackTrace();
        console.printError("Error launching map-reduce job", "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
        return 5;
    }
    job.setMapOutputKeyClass(NullWritable.class);
    job.setMapOutputValueClass(NullWritable.class);
    if (work.getNumMapTasks() != null) {
        job.setNumMapTasks(work.getNumMapTasks());
    }
    // zero reducers
    job.setNumReduceTasks(0);
    if (work.getMinSplitSize() != null) {
        HiveConf.setLongVar(job, HiveConf.ConfVars.MAPREDMINSPLITSIZE, work.getMinSplitSize().longValue());
    }
    if (work.getInputformat() != null) {
        HiveConf.setVar(job, HiveConf.ConfVars.HIVEINPUTFORMAT, work.getInputformat());
    }
    String inpFormat = HiveConf.getVar(job, HiveConf.ConfVars.HIVEINPUTFORMAT);
    LOG.info("Using " + inpFormat);
    try {
        job.setInputFormat(JavaUtils.loadClass(inpFormat));
    } catch (ClassNotFoundException e) {
        throw new RuntimeException(e.getMessage(), e);
    }
    Path outputPath = this.work.getOutputDir();
    Path tempOutPath = Utilities.toTempPath(outputPath);
    try {
        FileSystem fs = tempOutPath.getFileSystem(job);
        if (!fs.exists(tempOutPath)) {
            fs.mkdirs(tempOutPath);
        }
    } catch (IOException e) {
        console.printError("Can't make path " + outputPath + " : " + e.getMessage());
        return 6;
    }
    job.setOutputKeyClass(NullWritable.class);
    job.setOutputValueClass(NullWritable.class);
    int returnVal = 0;
    RunningJob rj = null;
    boolean noName = StringUtils.isEmpty(job.get(MRJobConfig.JOB_NAME));
    String jobName = null;
    if (noName && this.getQueryPlan() != null) {
        int maxlen = conf.getIntVar(HiveConf.ConfVars.HIVEJOBNAMELENGTH);
        jobName = Utilities.abbreviate(this.getQueryPlan().getQueryStr(), maxlen - 6);
    }
    if (noName) {
        // This is for a special case to ensure unit tests pass
        job.set(MRJobConfig.JOB_NAME, jobName != null ? jobName : "JOB" + Utilities.randGen.nextInt());
    }
    try {
        addInputPaths(job, work);
        MapredWork mrWork = new MapredWork();
        mrWork.setMapWork(work);
        Utilities.setMapRedWork(job, mrWork, ctx.getMRTmpPath());
        // remove the pwd from conf file so that job tracker doesn't show this
        // logs
        String pwd = HiveConf.getVar(job, HiveConf.ConfVars.METASTOREPWD);
        if (pwd != null) {
            HiveConf.setVar(job, HiveConf.ConfVars.METASTOREPWD, "HIVE");
        }
        JobClient jc = new JobClient(job);
        String addedJars = Utilities.getResourceFiles(job, SessionState.ResourceType.JAR);
        if (!addedJars.isEmpty()) {
            job.set("tmpjars", addedJars);
        }
        // make this client wait if job trcker is not behaving well.
        Throttle.checkJobTracker(job, LOG);
        // Finally SUBMIT the JOB!
        rj = jc.submitJob(job);
        this.jobID = rj.getJobID();
        returnVal = jobExecHelper.progress(rj, jc, ctx);
        success = (returnVal == 0);
    } catch (Exception e) {
        e.printStackTrace();
        setException(e);
        String mesg = " with exception '" + Utilities.getNameMessage(e) + "'";
        if (rj != null) {
            mesg = "Ended Job = " + rj.getJobID() + mesg;
        } else {
            mesg = "Job Submission failed" + mesg;
        }
        // Has to use full name to make sure it does not conflict with
        // org.apache.commons.lang.StringUtils
        console.printError(mesg, "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
        success = false;
        returnVal = 1;
    } finally {
        try {
            if (ctxCreated) {
                ctx.clear();
            }
            if (rj != null) {
                if (returnVal != 0) {
                    rj.killJob();
                }
            }
            ColumnTruncateMapper.jobClose(outputPath, success, job, console, work.getDynPartCtx(), null);
        } catch (Exception e) {
            LOG.warn("Failed while cleaning up ", e);
        } finally {
            HadoopJobExecHelper.runningJobs.remove(rj);
        }
    }
    return (returnVal);
}
Also used : Context(org.apache.hadoop.hive.ql.Context) DriverContext(org.apache.hadoop.hive.ql.DriverContext) CompilationOpContext(org.apache.hadoop.hive.ql.CompilationOpContext) Path(org.apache.hadoop.fs.Path) BucketizedHiveInputFormat(org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat) IOException(java.io.IOException) JobClient(org.apache.hadoop.mapred.JobClient) IOException(java.io.IOException) MapredWork(org.apache.hadoop.hive.ql.plan.MapredWork) FileSystem(org.apache.hadoop.fs.FileSystem) RunningJob(org.apache.hadoop.mapred.RunningJob)

Example 13 with DriverContext

use of org.apache.hadoop.hive.ql.DriverContext in project hive by apache.

the class MergeFileTask method execute.

/**
 * start a new map-reduce job to do the merge, almost the same as ExecDriver.
 */
@Override
public int execute(DriverContext driverContext) {
    Context ctx = driverContext.getCtx();
    boolean ctxCreated = false;
    RunningJob rj = null;
    int returnVal = 0;
    try {
        if (ctx == null) {
            ctx = new Context(job);
            ctxCreated = true;
        }
        HiveFileFormatUtils.prepareJobOutput(job);
        job.setInputFormat(work.getInputformatClass());
        job.setOutputFormat(HiveOutputFormatImpl.class);
        job.setMapperClass(MergeFileMapper.class);
        job.setMapOutputKeyClass(NullWritable.class);
        job.setMapOutputValueClass(NullWritable.class);
        job.setOutputKeyClass(NullWritable.class);
        job.setOutputValueClass(NullWritable.class);
        job.setNumReduceTasks(0);
        // create the temp directories
        Path outputPath = work.getOutputDir();
        Path tempOutPath = Utilities.toTempPath(outputPath);
        FileSystem fs = tempOutPath.getFileSystem(job);
        if (!fs.exists(tempOutPath)) {
            fs.mkdirs(tempOutPath);
        }
        ExecDriver.propagateSplitSettings(job, work);
        // set job name
        boolean noName = StringUtils.isEmpty(job.get(MRJobConfig.JOB_NAME));
        String jobName = null;
        if (noName && this.getQueryPlan() != null) {
            int maxlen = conf.getIntVar(HiveConf.ConfVars.HIVEJOBNAMELENGTH);
            jobName = Utilities.abbreviate(this.getQueryPlan().getQueryStr(), maxlen - 6);
        }
        if (noName) {
            // This is for a special case to ensure unit tests pass
            job.set(MRJobConfig.JOB_NAME, jobName != null ? jobName : "JOB" + Utilities.randGen.nextInt());
        }
        // add input path
        addInputPaths(job, work);
        // serialize work
        Utilities.setMapWork(job, work, ctx.getMRTmpPath(), true);
        // remove pwd from conf file so that job tracker doesn't show this logs
        String pwd = HiveConf.getVar(job, HiveConf.ConfVars.METASTOREPWD);
        if (pwd != null) {
            HiveConf.setVar(job, HiveConf.ConfVars.METASTOREPWD, "HIVE");
        }
        // submit the job
        JobClient jc = new JobClient(job);
        String addedJars = Utilities.getResourceFiles(job, SessionState.ResourceType.JAR);
        if (!addedJars.isEmpty()) {
            job.set("tmpjars", addedJars);
        }
        // make this client wait if job trcker is not behaving well.
        Throttle.checkJobTracker(job, LOG);
        // Finally SUBMIT the JOB!
        rj = jc.submitJob(job);
        this.jobID = rj.getJobID();
        returnVal = jobExecHelper.progress(rj, jc, ctx);
        success = (returnVal == 0);
    } catch (Exception e) {
        setException(e);
        String mesg = " with exception '" + Utilities.getNameMessage(e) + "'";
        if (rj != null) {
            mesg = "Ended Job = " + rj.getJobID() + mesg;
        } else {
            mesg = "Job Submission failed" + mesg;
        }
        // Has to use full name to make sure it does not conflict with
        // org.apache.commons.lang.StringUtils
        console.printError(mesg, "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
        success = false;
        returnVal = 1;
    } finally {
        try {
            if (ctxCreated) {
                ctx.clear();
            }
            if (rj != null) {
                if (returnVal != 0) {
                    rj.killJob();
                }
            }
            // get the list of Dynamic partition paths
            if (rj != null) {
                if (work.getAliasToWork() != null) {
                    for (Operator<? extends OperatorDesc> op : work.getAliasToWork().values()) {
                        op.jobClose(job, success);
                    }
                }
            }
        } catch (Exception e) {
            // jobClose needs to execute successfully otherwise fail task
            LOG.warn("Job close failed ", e);
            if (success) {
                setException(e);
                success = false;
                returnVal = 3;
                String mesg = "Job Commit failed with exception '" + Utilities.getNameMessage(e) + "'";
                console.printError(mesg, "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
            }
        } finally {
            HadoopJobExecHelper.runningJobs.remove(rj);
        }
    }
    return returnVal;
}
Also used : Context(org.apache.hadoop.hive.ql.Context) DriverContext(org.apache.hadoop.hive.ql.DriverContext) CompilationOpContext(org.apache.hadoop.hive.ql.CompilationOpContext) Path(org.apache.hadoop.fs.Path) FileSystem(org.apache.hadoop.fs.FileSystem) RunningJob(org.apache.hadoop.mapred.RunningJob) JobClient(org.apache.hadoop.mapred.JobClient) IOException(java.io.IOException)

Example 14 with DriverContext

use of org.apache.hadoop.hive.ql.DriverContext in project hive by apache.

the class TezTask method execute.

@Override
public int execute(DriverContext driverContext) {
    int rc = 1;
    boolean cleanContext = false;
    Context ctx = null;
    Ref<TezSessionState> sessionRef = Ref.from(null);
    try {
        // Get or create Context object. If we create it we have to clean it later as well.
        ctx = driverContext.getCtx();
        if (ctx == null) {
            ctx = new Context(conf);
            cleanContext = true;
            // some DDL task that directly executes a TezTask does not setup Context and hence TriggerContext.
            // Setting queryId is messed up. Some DDL tasks have executionId instead of proper queryId.
            String queryId = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYID);
            WmContext wmContext = new WmContext(System.currentTimeMillis(), queryId);
            ctx.setWmContext(wmContext);
        }
        // Need to remove this static hack. But this is the way currently to get a session.
        SessionState ss = SessionState.get();
        // Note: given that we return pool sessions to the pool in the finally block below, and that
        // we need to set the global to null to do that, this "reuse" may be pointless.
        TezSessionState session = sessionRef.value = ss.getTezSession();
        if (session != null && !session.isOpen()) {
            LOG.warn("The session: " + session + " has not been opened");
        }
        // We only need a username for UGI to use for groups; getGroups will fetch the groups
        // based on Hadoop configuration, as documented at
        // https://hadoop.apache.org/docs/r2.8.0/hadoop-project-dist/hadoop-common/GroupsMapping.html
        String userName = ss.getUserName();
        List<String> groups = null;
        if (userName == null) {
            userName = "anonymous";
        } else {
            groups = UserGroupInformation.createRemoteUser(ss.getUserName()).getGroups();
        }
        MappingInput mi = new MappingInput(userName, groups, ss.getHiveVariables().get("wmpool"), ss.getHiveVariables().get("wmapp"));
        WmContext wmContext = ctx.getWmContext();
        // jobConf will hold all the configuration for hadoop, tez, and hive
        JobConf jobConf = utils.createConfiguration(conf);
        // Get all user jars from work (e.g. input format stuff).
        String[] allNonConfFiles = work.configureJobConfAndExtractJars(jobConf);
        // DAG scratch dir. We get a session from the pool so it may be different from Tez one.
        // TODO: we could perhaps reuse the same directory for HiveResources?
        Path scratchDir = utils.createTezDir(ctx.getMRScratchDir(), conf);
        CallerContext callerContext = CallerContext.create("HIVE", queryPlan.getQueryId(), "HIVE_QUERY_ID", queryPlan.getQueryStr());
        perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.TEZ_GET_SESSION);
        session = sessionRef.value = WorkloadManagerFederation.getSession(sessionRef.value, conf, mi, getWork().getLlapMode(), wmContext);
        perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.TEZ_GET_SESSION);
        try {
            ss.setTezSession(session);
            LOG.info("Subscribed to counters: {} for queryId: {}", wmContext.getSubscribedCounters(), wmContext.getQueryId());
            // Ensure the session is open and has the necessary local resources.
            // This would refresh any conf resources and also local resources.
            ensureSessionHasResources(session, allNonConfFiles);
            // This is a combination of the jar stuff from conf, and not from conf.
            List<LocalResource> allNonAppResources = session.getLocalizedResources();
            logResources(allNonAppResources);
            Map<String, LocalResource> allResources = DagUtils.createTezLrMap(session.getAppJarLr(), allNonAppResources);
            // next we translate the TezWork to a Tez DAG
            DAG dag = build(jobConf, work, scratchDir, ctx, allResources);
            dag.setCallerContext(callerContext);
            // Check isShutdown opportunistically; it's never unset.
            if (this.isShutdown) {
                throw new HiveException("Operation cancelled");
            }
            DAGClient dagClient = submit(jobConf, dag, sessionRef);
            session = sessionRef.value;
            boolean wasShutdown = false;
            synchronized (dagClientLock) {
                assert this.dagClient == null;
                wasShutdown = this.isShutdown;
                if (!wasShutdown) {
                    this.dagClient = dagClient;
                }
            }
            if (wasShutdown) {
                closeDagClientOnCancellation(dagClient);
                throw new HiveException("Operation cancelled");
            }
            // finally monitor will print progress until the job is done
            TezJobMonitor monitor = new TezJobMonitor(work.getAllWork(), dagClient, conf, dag, ctx);
            rc = monitor.monitorExecution();
            if (rc != 0) {
                this.setException(new HiveException(monitor.getDiagnostics()));
            }
            // fetch the counters
            try {
                Set<StatusGetOpts> statusGetOpts = EnumSet.of(StatusGetOpts.GET_COUNTERS);
                counters = dagClient.getDAGStatus(statusGetOpts).getDAGCounters();
            } catch (Exception err) {
                // Don't fail execution due to counters - just don't print summary info
                LOG.warn("Failed to get counters. Ignoring, summary info will be incomplete. " + err, err);
                counters = null;
            }
        } finally {
            // Note: due to TEZ-3846, the session may actually be invalid in case of some errors.
            // Currently, reopen on an attempted reuse will take care of that; we cannot tell
            // if the session is usable until we try.
            // We return this to the pool even if it's unusable; reopen is supposed to handle this.
            wmContext = ctx.getWmContext();
            try {
                if (sessionRef.value != null) {
                    sessionRef.value.returnToSessionManager();
                }
            } catch (Exception e) {
                LOG.error("Failed to return session: {} to pool", session, e);
                throw e;
            }
            if (!conf.getVar(HiveConf.ConfVars.TEZ_SESSION_EVENTS_SUMMARY).equalsIgnoreCase("none") && wmContext != null) {
                if (conf.getVar(HiveConf.ConfVars.TEZ_SESSION_EVENTS_SUMMARY).equalsIgnoreCase("json")) {
                    wmContext.printJson(console);
                } else if (conf.getVar(HiveConf.ConfVars.TEZ_SESSION_EVENTS_SUMMARY).equalsIgnoreCase("text")) {
                    wmContext.print(console);
                }
            }
        }
        if (LOG.isInfoEnabled() && counters != null && (HiveConf.getBoolVar(conf, HiveConf.ConfVars.TEZ_EXEC_SUMMARY) || Utilities.isPerfOrAboveLogging(conf))) {
            for (CounterGroup group : counters) {
                LOG.info(group.getDisplayName() + ":");
                for (TezCounter counter : group) {
                    LOG.info("   " + counter.getDisplayName() + ": " + counter.getValue());
                }
            }
        }
    } catch (Exception e) {
        LOG.error("Failed to execute tez graph.", e);
    // rc will be 1 at this point indicating failure.
    } finally {
        Utilities.clearWork(conf);
        // Clear gWorkMap
        for (BaseWork w : work.getAllWork()) {
            JobConf workCfg = workToConf.get(w);
            if (workCfg != null) {
                Utilities.clearWorkMapForConf(workCfg);
            }
        }
        if (cleanContext) {
            try {
                ctx.clear();
            } catch (Exception e) {
                /*best effort*/
                LOG.warn("Failed to clean up after tez job", e);
            }
        }
        // need to either move tmp files or remove them
        DAGClient dagClient = null;
        synchronized (dagClientLock) {
            dagClient = this.dagClient;
            this.dagClient = null;
        }
        // DagClient as such should have no bearing on jobClose.
        if (dagClient != null) {
            // rc will only be overwritten if close errors out
            rc = close(work, rc, dagClient);
        }
    }
    return rc;
}
Also used : SessionState(org.apache.hadoop.hive.ql.session.SessionState) CallerContext(org.apache.tez.client.CallerContext) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) WmContext(org.apache.hadoop.hive.ql.wm.WmContext) TezCounter(org.apache.tez.common.counters.TezCounter) MappingInput(org.apache.hadoop.hive.ql.exec.tez.UserPoolMapping.MappingInput) TezJobMonitor(org.apache.hadoop.hive.ql.exec.tez.monitoring.TezJobMonitor) JobConf(org.apache.hadoop.mapred.JobConf) BaseWork(org.apache.hadoop.hive.ql.plan.BaseWork) CallerContext(org.apache.tez.client.CallerContext) Context(org.apache.hadoop.hive.ql.Context) DriverContext(org.apache.hadoop.hive.ql.DriverContext) WmContext(org.apache.hadoop.hive.ql.wm.WmContext) Path(org.apache.hadoop.fs.Path) CounterGroup(org.apache.tez.common.counters.CounterGroup) DAG(org.apache.tez.dag.api.DAG) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) IOException(java.io.IOException) TezException(org.apache.tez.dag.api.TezException) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) StatusGetOpts(org.apache.tez.dag.api.client.StatusGetOpts) DAGClient(org.apache.tez.dag.api.client.DAGClient)

Example 15 with DriverContext

use of org.apache.hadoop.hive.ql.DriverContext in project hive by apache.

the class ExecDriver method main.

@SuppressWarnings("unchecked")
public static void main(String[] args) throws IOException, HiveException {
    String planFileName = null;
    String jobConfFileName = null;
    boolean noLog = false;
    String files = null;
    String libjars = null;
    boolean localtask = false;
    try {
        for (int i = 0; i < args.length; i++) {
            if (args[i].equals("-plan")) {
                planFileName = args[++i];
            } else if (args[i].equals("-jobconffile")) {
                jobConfFileName = args[++i];
            } else if (args[i].equals("-nolog")) {
                noLog = true;
            } else if (args[i].equals("-files")) {
                files = args[++i];
            } else if (args[i].equals("-libjars")) {
                libjars = args[++i];
            } else if (args[i].equals("-localtask")) {
                localtask = true;
            }
        }
    } catch (IndexOutOfBoundsException e) {
        System.err.println("Missing argument to option");
        printUsage();
    }
    JobConf conf;
    if (localtask) {
        conf = new JobConf(MapredLocalTask.class);
    } else {
        conf = new JobConf(ExecDriver.class);
    }
    if (jobConfFileName != null) {
        conf.addResource(new Path(jobConfFileName));
    }
    // Initialize the resources from command line
    if (files != null) {
        conf.set("tmpfiles", files);
    }
    if (libjars != null) {
        conf.set("tmpjars", libjars);
    }
    if (UserGroupInformation.isSecurityEnabled()) {
        String hadoopAuthToken = System.getenv(UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION);
        if (hadoopAuthToken != null) {
            conf.set("mapreduce.job.credentials.binary", hadoopAuthToken);
        }
    }
    boolean isSilent = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVESESSIONSILENT);
    String queryId = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYID, "").trim();
    if (queryId.isEmpty()) {
        queryId = "unknown-" + System.currentTimeMillis();
        HiveConf.setVar(conf, HiveConf.ConfVars.HIVEQUERYID, queryId);
    }
    System.setProperty(HiveConf.ConfVars.HIVEQUERYID.toString(), queryId);
    LogUtils.registerLoggingContext(conf);
    if (noLog) {
        // If started from main(), and noLog is on, we should not output
        // any logs. To turn the log on, please set -Dtest.silent=false
        org.apache.logging.log4j.Logger logger = org.apache.logging.log4j.LogManager.getRootLogger();
        NullAppender appender = NullAppender.createNullAppender();
        appender.addToLogger(logger.getName(), Level.ERROR);
        appender.start();
    } else {
        setupChildLog4j(conf);
    }
    Logger LOG = LoggerFactory.getLogger(ExecDriver.class.getName());
    LogHelper console = new LogHelper(LOG, isSilent);
    if (planFileName == null) {
        console.printError("Must specify Plan File Name");
        printUsage();
    }
    // that it's easy to find reason for local mode execution failures
    for (Appender appender : ((org.apache.logging.log4j.core.Logger) LogManager.getRootLogger()).getAppenders().values()) {
        if (appender instanceof FileAppender) {
            console.printInfo("Execution log at: " + ((FileAppender) appender).getFileName());
        } else if (appender instanceof RollingFileAppender) {
            console.printInfo("Execution log at: " + ((RollingFileAppender) appender).getFileName());
        }
    }
    // the plan file should always be in local directory
    Path p = new Path(planFileName);
    FileSystem fs = FileSystem.getLocal(conf);
    InputStream pathData = fs.open(p);
    // child process. so we add it here explicitly
    try {
        // see also - code in CliDriver.java
        ClassLoader loader = conf.getClassLoader();
        if (StringUtils.isNotBlank(libjars)) {
            loader = Utilities.addToClassPath(loader, StringUtils.split(libjars, ","));
        }
        conf.setClassLoader(loader);
        // Also set this to the Thread ContextClassLoader, so new threads will
        // inherit
        // this class loader, and propagate into newly created Configurations by
        // those
        // new threads.
        Thread.currentThread().setContextClassLoader(loader);
    } catch (Exception e) {
        throw new HiveException(e.getMessage(), e);
    }
    int ret;
    if (localtask) {
        memoryMXBean = ManagementFactory.getMemoryMXBean();
        MapredLocalWork plan = SerializationUtilities.deserializePlan(pathData, MapredLocalWork.class);
        MapredLocalTask ed = new MapredLocalTask(plan, conf, isSilent);
        ret = ed.executeInProcess(new DriverContext());
    } else {
        MapredWork plan = SerializationUtilities.deserializePlan(pathData, MapredWork.class);
        ExecDriver ed = new ExecDriver(plan, conf, isSilent);
        ret = ed.execute(new DriverContext());
    }
    if (ret != 0) {
        System.exit(ret);
    }
}
Also used : DriverContext(org.apache.hadoop.hive.ql.DriverContext) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) NullAppender(org.apache.hadoop.hive.ql.log.NullAppender) LogHelper(org.apache.hadoop.hive.ql.session.SessionState.LogHelper) RollingFileAppender(org.apache.logging.log4j.core.appender.RollingFileAppender) Logger(org.slf4j.Logger) MapredWork(org.apache.hadoop.hive.ql.plan.MapredWork) FileSystem(org.apache.hadoop.fs.FileSystem) JobConf(org.apache.hadoop.mapred.JobConf) Path(org.apache.hadoop.fs.Path) NullAppender(org.apache.hadoop.hive.ql.log.NullAppender) RollingFileAppender(org.apache.logging.log4j.core.appender.RollingFileAppender) Appender(org.apache.logging.log4j.core.Appender) LogDivertAppender(org.apache.hadoop.hive.ql.log.LogDivertAppender) FileAppender(org.apache.logging.log4j.core.appender.FileAppender) RollingFileAppender(org.apache.logging.log4j.core.appender.RollingFileAppender) FileAppender(org.apache.logging.log4j.core.appender.FileAppender) InputStream(java.io.InputStream) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) LogInitializationException(org.apache.hadoop.hive.common.LogUtils.LogInitializationException) IOException(java.io.IOException) MapredLocalWork(org.apache.hadoop.hive.ql.plan.MapredLocalWork)

Aggregations

DriverContext (org.apache.hadoop.hive.ql.DriverContext)19 Path (org.apache.hadoop.fs.Path)14 IOException (java.io.IOException)10 Context (org.apache.hadoop.hive.ql.Context)9 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)8 FileSystem (org.apache.hadoop.fs.FileSystem)7 CompilationOpContext (org.apache.hadoop.hive.ql.CompilationOpContext)7 JobConf (org.apache.hadoop.mapred.JobConf)5 ArrayList (java.util.ArrayList)4 MapredWork (org.apache.hadoop.hive.ql.plan.MapredWork)4 JobClient (org.apache.hadoop.mapred.JobClient)4 HiveConf (org.apache.hadoop.hive.conf.HiveConf)3 MapredLocalWork (org.apache.hadoop.hive.ql.plan.MapredLocalWork)3 RunningJob (org.apache.hadoop.mapred.RunningJob)3 File (java.io.File)2 OutputStream (java.io.OutputStream)2 Serializable (java.io.Serializable)2 HashMap (java.util.HashMap)2 Map (java.util.Map)2 Properties (java.util.Properties)2