Search in sources :

Example 26 with Context

use of org.apache.hadoop.hive.ql.Context in project hive by apache.

the class TezTask method execute.

@Override
public int execute(DriverContext driverContext) {
    int rc = 1;
    boolean cleanContext = false;
    Context ctx = null;
    TezSessionState session = null;
    try {
        // Get or create Context object. If we create it we have to clean it later as well.
        ctx = driverContext.getCtx();
        if (ctx == null) {
            ctx = new Context(conf);
            cleanContext = true;
        }
        // Need to remove this static hack. But this is the way currently to get a session.
        SessionState ss = SessionState.get();
        session = ss.getTezSession();
        if (session != null && !session.isOpen()) {
            LOG.warn("The session: " + session + " has not been opened");
        }
        session = TezSessionPoolManager.getInstance().getSession(session, conf, false, getWork().getLlapMode());
        ss.setTezSession(session);
        try {
            // jobConf will hold all the configuration for hadoop, tez, and hive
            JobConf jobConf = utils.createConfiguration(conf);
            // Get all user jars from work (e.g. input format stuff).
            String[] inputOutputJars = work.configureJobConfAndExtractJars(jobConf);
            // we will localize all the files (jars, plans, hashtables) to the
            // scratch dir. let's create this and tmp first.
            Path scratchDir = ctx.getMRScratchDir();
            // create the tez tmp dir
            scratchDir = utils.createTezDir(scratchDir, conf);
            Map<String, LocalResource> inputOutputLocalResources = getExtraLocalResources(jobConf, scratchDir, inputOutputJars);
            // Ensure the session is open and has the necessary local resources
            updateSession(session, jobConf, scratchDir, inputOutputJars, inputOutputLocalResources);
            List<LocalResource> additionalLr = session.getLocalizedResources();
            logResources(additionalLr);
            // unless already installed on all the cluster nodes, we'll have to
            // localize hive-exec.jar as well.
            LocalResource appJarLr = session.getAppJarLr();
            // next we translate the TezWork to a Tez DAG
            DAG dag = build(jobConf, work, scratchDir, appJarLr, additionalLr, ctx);
            CallerContext callerContext = CallerContext.create("HIVE", queryPlan.getQueryId(), "HIVE_QUERY_ID", queryPlan.getQueryStr());
            dag.setCallerContext(callerContext);
            // Add the extra resources to the dag
            addExtraResourcesToDag(session, dag, inputOutputJars, inputOutputLocalResources);
            // submit will send the job to the cluster and start executing
            dagClient = submit(jobConf, dag, scratchDir, appJarLr, session, additionalLr, inputOutputJars, inputOutputLocalResources);
            // finally monitor will print progress until the job is done
            TezJobMonitor monitor = new TezJobMonitor(work.getWorkMap(), dagClient, conf, dag, ctx);
            rc = monitor.monitorExecution();
            if (rc != 0) {
                this.setException(new HiveException(monitor.getDiagnostics()));
            }
            // fetch the counters
            try {
                Set<StatusGetOpts> statusGetOpts = EnumSet.of(StatusGetOpts.GET_COUNTERS);
                counters = dagClient.getDAGStatus(statusGetOpts).getDAGCounters();
            } catch (Exception err) {
                // Don't fail execution due to counters - just don't print summary info
                LOG.warn("Failed to get counters. Ignoring, summary info will be incomplete. " + err, err);
                counters = null;
            }
        } finally {
            // We return this to the pool even if it's unusable; reopen is supposed to handle this.
            try {
                TezSessionPoolManager.getInstance().returnSession(session, getWork().getLlapMode());
            } catch (Exception e) {
                LOG.error("Failed to return session: {} to pool", session, e);
                throw e;
            }
        }
        if (LOG.isInfoEnabled() && counters != null && (HiveConf.getBoolVar(conf, HiveConf.ConfVars.TEZ_EXEC_SUMMARY) || Utilities.isPerfOrAboveLogging(conf))) {
            for (CounterGroup group : counters) {
                LOG.info(group.getDisplayName() + ":");
                for (TezCounter counter : group) {
                    LOG.info("   " + counter.getDisplayName() + ": " + counter.getValue());
                }
            }
        }
    } catch (Exception e) {
        LOG.error("Failed to execute tez graph.", e);
    // rc will be 1 at this point indicating failure.
    } finally {
        Utilities.clearWork(conf);
        // Clear gWorkMap
        for (BaseWork w : work.getAllWork()) {
            JobConf workCfg = workToConf.get(w);
            if (workCfg != null) {
                Utilities.clearWorkMapForConf(workCfg);
            }
        }
        if (cleanContext) {
            try {
                ctx.clear();
            } catch (Exception e) {
                /*best effort*/
                LOG.warn("Failed to clean up after tez job", e);
            }
        }
        // need to either move tmp files or remove them
        if (dagClient != null) {
            // rc will only be overwritten if close errors out
            rc = close(work, rc);
        }
    }
    return rc;
}
Also used : CallerContext(org.apache.tez.client.CallerContext) Context(org.apache.hadoop.hive.ql.Context) DriverContext(org.apache.hadoop.hive.ql.DriverContext) Path(org.apache.hadoop.fs.Path) SessionState(org.apache.hadoop.hive.ql.session.SessionState) CallerContext(org.apache.tez.client.CallerContext) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) CounterGroup(org.apache.tez.common.counters.CounterGroup) DAG(org.apache.tez.dag.api.DAG) TezCounter(org.apache.tez.common.counters.TezCounter) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) IOException(java.io.IOException) TezException(org.apache.tez.dag.api.TezException) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) TezJobMonitor(org.apache.hadoop.hive.ql.exec.tez.monitoring.TezJobMonitor) StatusGetOpts(org.apache.tez.dag.api.client.StatusGetOpts) JobConf(org.apache.hadoop.mapred.JobConf) BaseWork(org.apache.hadoop.hive.ql.plan.BaseWork)

Example 27 with Context

use of org.apache.hadoop.hive.ql.Context in project hive by apache.

the class GenMRUnion1 method processSubQueryUnionCreateIntermediate.

/**
   * Process the union when the parent is a map-reduce job. Create a temporary
   * output, and let the union task read from the temporary output.
   *
   * The files created for all the inputs are in the union context and later
   * used to initialize the union plan
   *
   * @param parent
   * @param child
   * @param uTask
   * @param ctx
   * @param uCtxTask
   */
private void processSubQueryUnionCreateIntermediate(Operator<? extends OperatorDesc> parent, Operator<? extends OperatorDesc> child, Task<? extends Serializable> uTask, GenMRProcContext ctx, GenMRUnionCtx uCtxTask) {
    ParseContext parseCtx = ctx.getParseCtx();
    TableDesc tt_desc = PlanUtils.getIntermediateFileTableDesc(PlanUtils.getFieldSchemasFromRowSchema(parent.getSchema(), "temporarycol"));
    // generate the temporary file
    Context baseCtx = parseCtx.getContext();
    Path taskTmpDir = baseCtx.getMRTmpPath();
    // Create the temporary file, its corresponding FileSinkOperaotr, and
    // its corresponding TableScanOperator.
    TableScanOperator tableScanOp = GenMapRedUtils.createTemporaryFile(parent, child, taskTmpDir, tt_desc, parseCtx);
    // Add the path to alias mapping
    uCtxTask.addTaskTmpDir(taskTmpDir.toUri().toString());
    uCtxTask.addTTDesc(tt_desc);
    uCtxTask.addListTopOperators(tableScanOp);
    // The union task is empty. The files created for all the inputs are
    // assembled in the union context and later used to initialize the union
    // plan
    Task<? extends Serializable> currTask = ctx.getCurrTask();
    currTask.addDependentTask(uTask);
    if (ctx.getRootTasks().contains(uTask)) {
        ctx.getRootTasks().remove(uTask);
        if (!ctx.getRootTasks().contains(currTask) && shouldBeRootTask(currTask)) {
            ctx.getRootTasks().add(currTask);
        }
    }
}
Also used : ParseContext(org.apache.hadoop.hive.ql.parse.ParseContext) Context(org.apache.hadoop.hive.ql.Context) UnionProcContext(org.apache.hadoop.hive.ql.optimizer.unionproc.UnionProcContext) UnionParseContext(org.apache.hadoop.hive.ql.optimizer.unionproc.UnionProcContext.UnionParseContext) Path(org.apache.hadoop.fs.Path) TableScanOperator(org.apache.hadoop.hive.ql.exec.TableScanOperator) ParseContext(org.apache.hadoop.hive.ql.parse.ParseContext) UnionParseContext(org.apache.hadoop.hive.ql.optimizer.unionproc.UnionProcContext.UnionParseContext) TableDesc(org.apache.hadoop.hive.ql.plan.TableDesc)

Example 28 with Context

use of org.apache.hadoop.hive.ql.Context in project hive by apache.

the class PartialScanTask method execute.

@Override
public /**
   * start a new map-reduce job to do partial scan to calculate Stats,
   * almost the same as BlockMergeTask or ExecDriver.
   */
int execute(DriverContext driverContext) {
    HiveConf.setVar(job, HiveConf.ConfVars.HIVEINPUTFORMAT, CombineHiveInputFormat.class.getName());
    success = true;
    HiveFileFormatUtils.prepareJobOutput(job);
    job.setOutputFormat(HiveOutputFormatImpl.class);
    job.setMapperClass(work.getMapperClass());
    Context ctx = driverContext.getCtx();
    boolean ctxCreated = false;
    try {
        if (ctx == null) {
            ctx = new Context(job);
            ctxCreated = true;
        }
    } catch (IOException e) {
        e.printStackTrace();
        console.printError("Error launching map-reduce job", "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
        return 5;
    }
    job.setMapOutputKeyClass(NullWritable.class);
    job.setMapOutputValueClass(NullWritable.class);
    if (work.getNumMapTasks() != null) {
        job.setNumMapTasks(work.getNumMapTasks());
    }
    // zero reducers
    job.setNumReduceTasks(0);
    if (work.getMinSplitSize() != null) {
        HiveConf.setLongVar(job, HiveConf.ConfVars.MAPREDMINSPLITSIZE, work.getMinSplitSize().longValue());
    }
    if (work.getInputformat() != null) {
        HiveConf.setVar(job, HiveConf.ConfVars.HIVEINPUTFORMAT, work.getInputformat());
    }
    String inpFormat = HiveConf.getVar(job, HiveConf.ConfVars.HIVEINPUTFORMAT);
    LOG.info("Using " + inpFormat);
    try {
        job.setInputFormat(JavaUtils.loadClass(inpFormat));
    } catch (ClassNotFoundException e) {
        throw new RuntimeException(e.getMessage(), e);
    }
    job.setOutputKeyClass(NullWritable.class);
    job.setOutputValueClass(NullWritable.class);
    int returnVal = 0;
    RunningJob rj = null;
    boolean noName = StringUtils.isEmpty(job.get(MRJobConfig.JOB_NAME));
    String jobName = null;
    if (noName && this.getQueryPlan() != null) {
        int maxlen = conf.getIntVar(HiveConf.ConfVars.HIVEJOBNAMELENGTH);
        jobName = Utilities.abbreviate(this.getQueryPlan().getQueryStr(), maxlen - 6);
    }
    if (noName) {
        // This is for a special case to ensure unit tests pass
        job.set(MRJobConfig.JOB_NAME, jobName != null ? jobName : "JOB" + Utilities.randGen.nextInt());
    }
    // pass aggregation key to mapper
    HiveConf.setVar(job, HiveConf.ConfVars.HIVE_STATS_KEY_PREFIX, work.getAggKey());
    job.set(StatsSetupConst.STATS_TMP_LOC, work.getStatsTmpDir());
    try {
        addInputPaths(job, work);
        MapredWork mrWork = new MapredWork();
        mrWork.setMapWork(work);
        Utilities.setMapRedWork(job, mrWork, ctx.getMRTmpPath());
        // remove the pwd from conf file so that job tracker doesn't show this
        // logs
        String pwd = HiveConf.getVar(job, HiveConf.ConfVars.METASTOREPWD);
        if (pwd != null) {
            HiveConf.setVar(job, HiveConf.ConfVars.METASTOREPWD, "HIVE");
        }
        JobClient jc = new JobClient(job);
        String addedJars = Utilities.getResourceFiles(job, SessionState.ResourceType.JAR);
        if (!addedJars.isEmpty()) {
            job.set("tmpjars", addedJars);
        }
        // make this client wait if job trcker is not behaving well.
        Throttle.checkJobTracker(job, LOG);
        if (work.isGatheringStats()) {
            // initialize stats publishing table
            StatsPublisher statsPublisher;
            StatsFactory factory = StatsFactory.newFactory(job);
            if (factory != null) {
                statsPublisher = factory.getStatsPublisher();
                StatsCollectionContext sc = new StatsCollectionContext(job);
                sc.setStatsTmpDir(work.getStatsTmpDir());
                if (!statsPublisher.init(sc)) {
                    // creating stats table if not exists
                    if (HiveConf.getBoolVar(job, HiveConf.ConfVars.HIVE_STATS_RELIABLE)) {
                        throw new HiveException(ErrorMsg.STATSPUBLISHER_INITIALIZATION_ERROR.getErrorCodedMsg());
                    }
                }
            }
        }
        // Finally SUBMIT the JOB!
        rj = jc.submitJob(job);
        this.jobID = rj.getJobID();
        returnVal = jobExecHelper.progress(rj, jc, ctx);
        success = (returnVal == 0);
    } catch (Exception e) {
        e.printStackTrace();
        setException(e);
        String mesg = " with exception '" + Utilities.getNameMessage(e) + "'";
        if (rj != null) {
            mesg = "Ended Job = " + rj.getJobID() + mesg;
        } else {
            mesg = "Job Submission failed" + mesg;
        }
        // Has to use full name to make sure it does not conflict with
        // org.apache.commons.lang.StringUtils
        console.printError(mesg, "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
        success = false;
        returnVal = 1;
    } finally {
        try {
            if (ctxCreated) {
                ctx.clear();
            }
            if (rj != null) {
                if (returnVal != 0) {
                    rj.killJob();
                }
            }
        } catch (Exception e) {
            LOG.warn("Failed in cleaning up ", e);
        } finally {
            HadoopJobExecHelper.runningJobs.remove(rj);
        }
    }
    return (returnVal);
}
Also used : StatsCollectionContext(org.apache.hadoop.hive.ql.stats.StatsCollectionContext) Context(org.apache.hadoop.hive.ql.Context) DriverContext(org.apache.hadoop.hive.ql.DriverContext) CompilationOpContext(org.apache.hadoop.hive.ql.CompilationOpContext) StatsCollectionContext(org.apache.hadoop.hive.ql.stats.StatsCollectionContext) CombineHiveInputFormat(org.apache.hadoop.hive.ql.io.CombineHiveInputFormat) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) IOException(java.io.IOException) JobClient(org.apache.hadoop.mapred.JobClient) IOException(java.io.IOException) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) StatsPublisher(org.apache.hadoop.hive.ql.stats.StatsPublisher) StatsFactory(org.apache.hadoop.hive.ql.stats.StatsFactory) MapredWork(org.apache.hadoop.hive.ql.plan.MapredWork) RunningJob(org.apache.hadoop.mapred.RunningJob)

Example 29 with Context

use of org.apache.hadoop.hive.ql.Context in project hive by apache.

the class ColumnStatsAutoGatherContext method genSelOpForAnalyze.

@SuppressWarnings("rawtypes")
private Operator genSelOpForAnalyze(String analyzeCommand, Context origCtx) throws IOException, ParseException, SemanticException {
    //0. initialization
    Context ctx = new Context(conf);
    ctx.setExplainConfig(origCtx.getExplainConfig());
    ASTNode tree = ParseUtils.parse(analyzeCommand, ctx);
    //1. get the ColumnStatsSemanticAnalyzer
    BaseSemanticAnalyzer baseSem = SemanticAnalyzerFactory.get(new QueryState(conf), tree);
    ColumnStatsSemanticAnalyzer colSem = (ColumnStatsSemanticAnalyzer) baseSem;
    //2. get the rewritten AST
    ASTNode ast = colSem.rewriteAST(tree, this);
    baseSem = SemanticAnalyzerFactory.get(new QueryState(conf), ast);
    SemanticAnalyzer sem = (SemanticAnalyzer) baseSem;
    QB qb = new QB(null, null, false);
    ASTNode child = ast;
    ParseContext subPCtx = ((SemanticAnalyzer) sem).getParseContext();
    subPCtx.setContext(ctx);
    ((SemanticAnalyzer) sem).initParseCtx(subPCtx);
    sem.doPhase1(child, qb, sem.initPhase1Ctx(), null);
    // This will trigger new calls to metastore to collect metadata
    // TODO: cache the information from the metastore
    sem.getMetaData(qb);
    Operator<?> operator = sem.genPlan(qb);
    //3. populate the load file work so that ColumnStatsTask can work
    loadFileWork.addAll(sem.getLoadFileWork());
    //4. because there is only one TS for analyze statement, we can get it.
    if (sem.topOps.values().size() != 1) {
        throw new SemanticException("ColumnStatsAutoGatherContext is expecting exactly one TS, but finds " + sem.topOps.values().size());
    }
    operator = sem.topOps.values().iterator().next();
    //5. get the first SEL after TS
    while (!(operator instanceof SelectOperator)) {
        operator = operator.getChildOperators().get(0);
    }
    return operator;
}
Also used : VectorizationContext(org.apache.hadoop.hive.ql.exec.vector.VectorizationContext) Context(org.apache.hadoop.hive.ql.Context) AnalyzeRewriteContext(org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.AnalyzeRewriteContext) SelectOperator(org.apache.hadoop.hive.ql.exec.SelectOperator) QueryState(org.apache.hadoop.hive.ql.QueryState)

Example 30 with Context

use of org.apache.hadoop.hive.ql.Context in project hive by apache.

the class TestHiveAuthorizationTaskFactory method setup.

@Before
public void setup() throws Exception {
    queryState = new QueryState(null);
    HiveConf conf = queryState.getConf();
    conf.setVar(ConfVars.HIVE_AUTHORIZATION_TASK_FACTORY, TestHiveAuthorizationTaskFactory.DummyHiveAuthorizationTaskFactoryImpl.class.getName());
    conf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory");
    db = Mockito.mock(Hive.class);
    table = new Table(DB, TABLE);
    partition = new Partition(table);
    SessionState.start(conf);
    context = new Context(conf);
    parseDriver = new ParseDriver();
    analyzer = new DDLSemanticAnalyzer(queryState, db);
    Mockito.when(db.getTable(DB, TABLE, false)).thenReturn(table);
    Mockito.when(db.getTable(TABLE_QNAME, false)).thenReturn(table);
    Mockito.when(db.getPartition(table, new HashMap<String, String>(), false)).thenReturn(partition);
    HadoopDefaultAuthenticator auth = new HadoopDefaultAuthenticator();
    auth.setConf(conf);
    currentUser = auth.getUserName();
    DummyHiveAuthorizationTaskFactoryImpl.reset();
}
Also used : Context(org.apache.hadoop.hive.ql.Context) Partition(org.apache.hadoop.hive.ql.metadata.Partition) Hive(org.apache.hadoop.hive.ql.metadata.Hive) Table(org.apache.hadoop.hive.ql.metadata.Table) ParseDriver(org.apache.hadoop.hive.ql.parse.ParseDriver) HiveConf(org.apache.hadoop.hive.conf.HiveConf) QueryState(org.apache.hadoop.hive.ql.QueryState) HadoopDefaultAuthenticator(org.apache.hadoop.hive.ql.security.HadoopDefaultAuthenticator) DDLSemanticAnalyzer(org.apache.hadoop.hive.ql.parse.DDLSemanticAnalyzer) Before(org.junit.Before)

Aggregations

Context (org.apache.hadoop.hive.ql.Context)47 Path (org.apache.hadoop.fs.Path)19 IOException (java.io.IOException)15 DriverContext (org.apache.hadoop.hive.ql.DriverContext)15 CompilationOpContext (org.apache.hadoop.hive.ql.CompilationOpContext)13 FileSystem (org.apache.hadoop.fs.FileSystem)9 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)9 Serializable (java.io.Serializable)8 Task (org.apache.hadoop.hive.ql.exec.Task)7 HiveConf (org.apache.hadoop.hive.conf.HiveConf)5 Table (org.apache.hadoop.hive.ql.metadata.Table)5 ParseContext (org.apache.hadoop.hive.ql.parse.ParseContext)5 TableDesc (org.apache.hadoop.hive.ql.plan.TableDesc)5 ArrayList (java.util.ArrayList)4 EnvironmentContext (org.apache.hadoop.hive.metastore.api.EnvironmentContext)4 TableScanOperator (org.apache.hadoop.hive.ql.exec.TableScanOperator)4 HiveTxnManager (org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager)4 MapWork (org.apache.hadoop.hive.ql.plan.MapWork)4 JobClient (org.apache.hadoop.mapred.JobClient)4 JobConf (org.apache.hadoop.mapred.JobConf)4