Search in sources :

Example 1 with ParseException

use of org.apache.hadoop.hive.ql.parse.ParseException in project hive by apache.

the class RewriteParseContextGenerator method generateOperatorTree.

/**
   * Parse the input {@link String} command and generate an operator tree.
   * @param conf
   * @param command
   * @throws SemanticException
   */
public static Operator<? extends OperatorDesc> generateOperatorTree(QueryState queryState, String command) throws SemanticException {
    Operator<? extends OperatorDesc> operatorTree;
    try {
        Context ctx = new Context(queryState.getConf());
        ASTNode tree = ParseUtils.parse(command, ctx);
        BaseSemanticAnalyzer sem = SemanticAnalyzerFactory.get(queryState, tree);
        assert (sem instanceof SemanticAnalyzer);
        operatorTree = doSemanticAnalysis((SemanticAnalyzer) sem, tree, ctx);
        LOG.info("Sub-query Semantic Analysis Completed");
    } catch (IOException e) {
        LOG.error("IOException in generating the operator " + "tree for input command - " + command + " ", e);
        LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e));
        throw new SemanticException(e.getMessage(), e);
    } catch (ParseException e) {
        LOG.error("ParseException in generating the operator " + "tree for input command - " + command + " ", e);
        LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e));
        throw new SemanticException(e.getMessage(), e);
    } catch (SemanticException e) {
        LOG.error("SemanticException in generating the operator " + "tree for input command - " + command + " ", e);
        LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e));
        throw new SemanticException(e.getMessage(), e);
    }
    return operatorTree;
}
Also used : ParseContext(org.apache.hadoop.hive.ql.parse.ParseContext) Context(org.apache.hadoop.hive.ql.Context) BaseSemanticAnalyzer(org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer) ASTNode(org.apache.hadoop.hive.ql.parse.ASTNode) SemanticAnalyzer(org.apache.hadoop.hive.ql.parse.SemanticAnalyzer) BaseSemanticAnalyzer(org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer) IOException(java.io.IOException) ParseException(org.apache.hadoop.hive.ql.parse.ParseException) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException)

Example 2 with ParseException

use of org.apache.hadoop.hive.ql.parse.ParseException in project hive by apache.

the class Driver method compile.

// deferClose indicates if the close/destroy should be deferred when the process has been
// interrupted, it should be set to true if the compile is called within another method like
// runInternal, which defers the close to the called in that method.
private void compile(String command, boolean resetTaskIds, boolean deferClose) throws CommandProcessorResponse {
    PerfLogger perfLogger = SessionState.getPerfLogger(true);
    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.DRIVER_RUN);
    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.COMPILE);
    lDrvState.stateLock.lock();
    try {
        lDrvState.driverState = DriverState.COMPILING;
    } finally {
        lDrvState.stateLock.unlock();
    }
    command = new VariableSubstitution(new HiveVariableSource() {

        @Override
        public Map<String, String> getHiveVariable() {
            return SessionState.get().getHiveVariables();
        }
    }).substitute(conf, command);
    String queryStr = command;
    try {
        // command should be redacted to avoid to logging sensitive data
        queryStr = HookUtils.redactLogString(conf, command);
    } catch (Exception e) {
        LOG.warn("WARNING! Query command could not be redacted." + e);
    }
    checkInterrupted("at beginning of compilation.", null, null);
    if (ctx != null && ctx.getExplainAnalyze() != AnalyzeState.RUNNING) {
        // close the existing ctx etc before compiling a new query, but does not destroy driver
        closeInProcess(false);
    }
    if (resetTaskIds) {
        TaskFactory.resetId();
    }
    LockedDriverState.setLockedDriverState(lDrvState);
    String queryId = queryState.getQueryId();
    if (ctx != null) {
        setTriggerContext(queryId);
    }
    // save some info for webUI for use after plan is freed
    this.queryDisplay.setQueryStr(queryStr);
    this.queryDisplay.setQueryId(queryId);
    LOG.info("Compiling command(queryId=" + queryId + "): " + queryStr);
    conf.setQueryString(queryStr);
    // FIXME: sideeffect will leave the last query set at the session level
    SessionState.get().getConf().setQueryString(queryStr);
    SessionState.get().setupQueryCurrentTimestamp();
    // Whether any error occurred during query compilation. Used for query lifetime hook.
    boolean compileError = false;
    boolean parseError = false;
    try {
        // Initialize the transaction manager.  This must be done before analyze is called.
        if (initTxnMgr != null) {
            queryTxnMgr = initTxnMgr;
        } else {
            queryTxnMgr = SessionState.get().initTxnMgr(conf);
        }
        if (queryTxnMgr instanceof Configurable) {
            ((Configurable) queryTxnMgr).setConf(conf);
        }
        queryState.setTxnManager(queryTxnMgr);
        // In case when user Ctrl-C twice to kill Hive CLI JVM, we want to release locks
        // if compile is being called multiple times, clear the old shutdownhook
        ShutdownHookManager.removeShutdownHook(shutdownRunner);
        final HiveTxnManager txnMgr = queryTxnMgr;
        shutdownRunner = new Runnable() {

            @Override
            public void run() {
                try {
                    releaseLocksAndCommitOrRollback(false, txnMgr);
                } catch (LockException e) {
                    LOG.warn("Exception when releasing locks in ShutdownHook for Driver: " + e.getMessage());
                }
            }
        };
        ShutdownHookManager.addShutdownHook(shutdownRunner, SHUTDOWN_HOOK_PRIORITY);
        checkInterrupted("before parsing and analysing the query", null, null);
        if (ctx == null) {
            ctx = new Context(conf);
            setTriggerContext(queryId);
        }
        ctx.setRuntimeStatsSource(runtimeStatsSource);
        ctx.setCmd(command);
        ctx.setHDFSCleanup(true);
        perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.PARSE);
        // Trigger query hook before compilation
        hookRunner.runBeforeParseHook(command);
        ASTNode tree;
        try {
            tree = ParseUtils.parse(command, ctx);
        } catch (ParseException e) {
            parseError = true;
            throw e;
        } finally {
            hookRunner.runAfterParseHook(command, parseError);
        }
        perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.PARSE);
        hookRunner.runBeforeCompileHook(command);
        perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.ANALYZE);
        // Flush the metastore cache.  This assures that we don't pick up objects from a previous
        // query running in this same thread.  This has to be done after we get our semantic
        // analyzer (this is when the connection to the metastore is made) but before we analyze,
        // because at that point we need access to the objects.
        Hive.get().getMSC().flushCache();
        BaseSemanticAnalyzer sem;
        // Do semantic analysis and plan generation
        if (hookRunner.hasPreAnalyzeHooks()) {
            HiveSemanticAnalyzerHookContext hookCtx = new HiveSemanticAnalyzerHookContextImpl();
            hookCtx.setConf(conf);
            hookCtx.setUserName(userName);
            hookCtx.setIpAddress(SessionState.get().getUserIpAddress());
            hookCtx.setCommand(command);
            hookCtx.setHiveOperation(queryState.getHiveOperation());
            tree = hookRunner.runPreAnalyzeHooks(hookCtx, tree);
            sem = SemanticAnalyzerFactory.get(queryState, tree);
            openTransaction();
            sem.analyze(tree, ctx);
            hookCtx.update(sem);
            hookRunner.runPostAnalyzeHooks(hookCtx, sem.getAllRootTasks());
        } else {
            sem = SemanticAnalyzerFactory.get(queryState, tree);
            openTransaction();
            sem.analyze(tree, ctx);
        }
        LOG.info("Semantic Analysis Completed");
        // Retrieve information about cache usage for the query.
        if (conf.getBoolVar(HiveConf.ConfVars.HIVE_QUERY_RESULTS_CACHE_ENABLED)) {
            cacheUsage = sem.getCacheUsage();
        }
        // validate the plan
        sem.validate();
        perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.ANALYZE);
        checkInterrupted("after analyzing query.", null, null);
        // get the output schema
        schema = getSchema(sem, conf);
        plan = new QueryPlan(queryStr, sem, perfLogger.getStartTime(PerfLogger.DRIVER_RUN), queryId, queryState.getHiveOperation(), schema);
        conf.set("mapreduce.workflow.id", "hive_" + queryId);
        conf.set("mapreduce.workflow.name", queryStr);
        // initialize FetchTask right here
        if (plan.getFetchTask() != null) {
            plan.getFetchTask().initialize(queryState, plan, null, ctx.getOpContext());
        }
        // do the authorization check
        if (!sem.skipAuthorization() && HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED)) {
            try {
                perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.DO_AUTHORIZATION);
                doAuthorization(queryState.getHiveOperation(), sem, command);
            } catch (AuthorizationException authExp) {
                console.printError("Authorization failed:" + authExp.getMessage() + ". Use SHOW GRANT to get more details.");
                errorMessage = authExp.getMessage();
                SQLState = "42000";
                throw createProcessorResponse(403);
            } finally {
                perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.DO_AUTHORIZATION);
            }
        }
        if (conf.getBoolVar(ConfVars.HIVE_LOG_EXPLAIN_OUTPUT)) {
            String explainOutput = getExplainOutput(sem, plan, tree);
            if (explainOutput != null) {
                LOG.info("EXPLAIN output for queryid " + queryId + " : " + explainOutput);
                if (conf.isWebUiQueryInfoCacheEnabled()) {
                    queryDisplay.setExplainPlan(explainOutput);
                }
            }
        }
    } catch (CommandProcessorResponse cpr) {
        throw cpr;
    } catch (Exception e) {
        checkInterrupted("during query compilation: " + e.getMessage(), null, null);
        compileError = true;
        ErrorMsg error = ErrorMsg.getErrorMsg(e.getMessage());
        errorMessage = "FAILED: " + e.getClass().getSimpleName();
        if (error != ErrorMsg.GENERIC_ERROR) {
            errorMessage += " [Error " + error.getErrorCode() + "]:";
        }
        // HIVE-4889
        if ((e instanceof IllegalArgumentException) && e.getMessage() == null && e.getCause() != null) {
            errorMessage += " " + e.getCause().getMessage();
        } else {
            errorMessage += " " + e.getMessage();
        }
        if (error == ErrorMsg.TXNMGR_NOT_ACID) {
            errorMessage += ". Failed command: " + queryStr;
        }
        SQLState = error.getSQLState();
        downstreamError = e;
        console.printError(errorMessage, "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
        throw createProcessorResponse(error.getErrorCode());
    } finally {
        // before/after execution hook will never be executed.
        if (!parseError) {
            try {
                hookRunner.runAfterCompilationHook(command, compileError);
            } catch (Exception e) {
                LOG.warn("Failed when invoking query after-compilation hook.", e);
            }
        }
        double duration = perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.COMPILE) / 1000.00;
        ImmutableMap<String, Long> compileHMSTimings = dumpMetaCallTimingWithoutEx("compilation");
        queryDisplay.setHmsTimings(QueryDisplay.Phase.COMPILATION, compileHMSTimings);
        boolean isInterrupted = lDrvState.isAborted();
        if (isInterrupted && !deferClose) {
            closeInProcess(true);
        }
        lDrvState.stateLock.lock();
        try {
            if (isInterrupted) {
                lDrvState.driverState = deferClose ? DriverState.EXECUTING : DriverState.ERROR;
            } else {
                lDrvState.driverState = compileError ? DriverState.ERROR : DriverState.COMPILED;
            }
        } finally {
            lDrvState.stateLock.unlock();
        }
        if (isInterrupted) {
            LOG.info("Compiling command(queryId=" + queryId + ") has been interrupted after " + duration + " seconds");
        } else {
            LOG.info("Completed compiling command(queryId=" + queryId + "); Time taken: " + duration + " seconds");
        }
    }
}
Also used : HiveSemanticAnalyzerHookContext(org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContext) BaseSemanticAnalyzer(org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer) AuthorizationException(org.apache.hadoop.hive.ql.metadata.AuthorizationException) HiveVariableSource(org.apache.hadoop.hive.conf.HiveVariableSource) CommandProcessorResponse(org.apache.hadoop.hive.ql.processors.CommandProcessorResponse) PerfLogger(org.apache.hadoop.hive.ql.log.PerfLogger) Configurable(org.apache.hadoop.conf.Configurable) LockException(org.apache.hadoop.hive.ql.lockmgr.LockException) ASTNode(org.apache.hadoop.hive.ql.parse.ASTNode) PrivateHookContext(org.apache.hadoop.hive.ql.hooks.PrivateHookContext) ParseContext(org.apache.hadoop.hive.ql.parse.ParseContext) HiveAuthzContext(org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzContext) WmContext(org.apache.hadoop.hive.ql.wm.WmContext) HookContext(org.apache.hadoop.hive.ql.hooks.HookContext) HiveSemanticAnalyzerHookContext(org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContext) VariableSubstitution(org.apache.hadoop.hive.conf.VariableSubstitution) LockException(org.apache.hadoop.hive.ql.lockmgr.LockException) IOException(java.io.IOException) ParseException(org.apache.hadoop.hive.ql.parse.ParseException) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) AuthorizationException(org.apache.hadoop.hive.ql.metadata.AuthorizationException) HiveSemanticAnalyzerHookContextImpl(org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContextImpl) HiveTxnManager(org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager) ParseException(org.apache.hadoop.hive.ql.parse.ParseException) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap)

Example 3 with ParseException

use of org.apache.hadoop.hive.ql.parse.ParseException in project hive by apache.

the class QTestUtil method checkNegativeResults.

public QTestProcessExecResult checkNegativeResults(String tname, Exception e) throws Exception {
    String outFileExtension = getOutFileExtension();
    File qf = new File(outDir, tname);
    String expf = outPath(outDir.toString(), tname.concat(outFileExtension));
    File outf = null;
    outf = new File(logDir);
    outf = new File(outf, qf.getName().concat(outFileExtension));
    FileWriter outfd = new FileWriter(outf);
    if (e instanceof ParseException) {
        outfd.write("Parse Error: ");
    } else if (e instanceof SemanticException) {
        outfd.write("Semantic Exception: \n");
    } else {
        outfd.close();
        throw e;
    }
    outfd.write(e.getMessage());
    outfd.close();
    QTestProcessExecResult result = qTestResultProcessor.executeDiffCommand(outf.getPath(), expf, false);
    if (QTestSystemProperties.shouldOverwriteResults()) {
        qTestResultProcessor.overwriteResults(outf.getPath(), expf);
        return QTestProcessExecResult.createWithoutOutput(0);
    }
    return result;
}
Also used : FileWriter(java.io.FileWriter) ParseException(org.apache.hadoop.hive.ql.parse.ParseException) File(java.io.File) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException)

Example 4 with ParseException

use of org.apache.hadoop.hive.ql.parse.ParseException in project hive by apache.

the class Action method fromMetastoreExpression.

public static Action fromMetastoreExpression(String metastoreActionExpression) {
    ParseDriver driver = new ParseDriver();
    ASTNode node = null;
    try {
        node = driver.parseTriggerActionExpression(metastoreActionExpression);
    } catch (ParseException e) {
        throw new IllegalArgumentException("Invalid action expression: " + metastoreActionExpression, e);
    }
    if (node == null || node.getChildCount() != 2 || node.getChild(1).getType() != HiveParser.EOF) {
        throw new IllegalArgumentException("Invalid action expression: " + metastoreActionExpression);
    }
    node = (ASTNode) node.getChild(0);
    switch(node.getType()) {
        case HiveParser.KW_KILL:
            if (node.getChildCount() != 0) {
                throw new IllegalArgumentException("Invalid KILL action");
            }
            return new Action(Type.KILL_QUERY);
        case HiveParser.KW_MOVE:
            {
                if (node.getChildCount() != 1) {
                    throw new IllegalArgumentException("Invalid move to action, expected poolPath");
                }
                Tree poolNode = node.getChild(0);
                StringBuilder poolPath = new StringBuilder(poolNode.getText());
                for (int i = 0; i < poolNode.getChildCount(); ++i) {
                    poolPath.append(poolNode.getChild(i).getText());
                }
                return new Action(Type.MOVE_TO_POOL, poolPath.toString());
            }
        default:
            throw new IllegalArgumentException("Unhandled action expression, type: " + node.getType() + ": " + metastoreActionExpression);
    }
}
Also used : ParseDriver(org.apache.hadoop.hive.ql.parse.ParseDriver) ASTNode(org.apache.hadoop.hive.ql.parse.ASTNode) Tree(org.antlr.runtime.tree.Tree) ParseException(org.apache.hadoop.hive.ql.parse.ParseException)

Example 5 with ParseException

use of org.apache.hadoop.hive.ql.parse.ParseException in project hive by apache.

the class GenericUDTFGetSplits method createPlanFragment.

private PlanFragment createPlanFragment(String query, ApplicationId splitsAppId) throws HiveException {
    HiveConf conf = new HiveConf(SessionState.get().getConf());
    HiveConf.setVar(conf, ConfVars.HIVEFETCHTASKCONVERSION, "none");
    HiveConf.setVar(conf, ConfVars.HIVEQUERYRESULTFILEFORMAT, PlanUtils.LLAP_OUTPUT_FORMAT_KEY);
    String originalMode = HiveConf.getVar(conf, ConfVars.HIVE_EXECUTION_MODE);
    HiveConf.setVar(conf, ConfVars.HIVE_EXECUTION_MODE, "llap");
    HiveConf.setBoolVar(conf, ConfVars.HIVE_TEZ_GENERATE_CONSISTENT_SPLITS, true);
    HiveConf.setBoolVar(conf, ConfVars.LLAP_CLIENT_CONSISTENT_SPLITS, true);
    conf.setBoolean(TezSplitGrouper.TEZ_GROUPING_NODE_LOCAL_ONLY, true);
    // Tez/LLAP requires RPC query plan
    HiveConf.setBoolVar(conf, ConfVars.HIVE_RPC_QUERY_PLAN, true);
    HiveConf.setBoolVar(conf, ConfVars.HIVE_QUERY_RESULTS_CACHE_ENABLED, false);
    if (schemaSplitOnly) {
        // Schema only
        try {
            List<FieldSchema> fieldSchemas = ParseUtils.parseQueryAndGetSchema(conf, query);
            Schema schema = new Schema(convertSchema(fieldSchemas));
            return new PlanFragment(null, schema, null);
        } catch (ParseException e) {
            throw new HiveException(e);
        }
    }
    try {
        jc = DagUtils.getInstance().createConfiguration(conf);
    } catch (IOException e) {
        throw new HiveException(e);
    }
    // Instantiate Driver to compile the query passed in.
    // This UDF is running as part of an existing query, which may already be using the
    // SessionState TxnManager. If this new Driver also tries to use the same TxnManager
    // then this may mess up the existing state of the TxnManager.
    // So initialize the new Driver with a new TxnManager so that it does not use the
    // Session TxnManager that is already in use.
    HiveTxnManager txnManager = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf);
    Driver driver = new Driver(new QueryState.Builder().withHiveConf(conf).nonIsolated().build(), null, txnManager);
    DriverCleanup driverCleanup = new DriverCleanup(driver, txnManager, splitsAppId.toString());
    boolean needsCleanup = true;
    try {
        try {
            driver.compileAndRespond(query, false);
        } catch (CommandProcessorException e) {
            throw new HiveException("Failed to compile query", e);
        }
        QueryPlan plan = driver.getPlan();
        limitQuery = plan.getQueryProperties().getOuterQueryLimit() != -1;
        List<Task<?>> roots = plan.getRootTasks();
        Schema schema = convertSchema(plan.getResultSchema());
        boolean fetchTask = plan.getFetchTask() != null;
        TezWork tezWork;
        if (roots == null || roots.size() != 1 || !(roots.get(0) instanceof TezTask)) {
            // fetch task query
            if (fetchTask) {
                tezWork = null;
            } else {
                throw new HiveException("Was expecting a single TezTask or FetchTask.");
            }
        } else {
            tezWork = ((TezTask) roots.get(0)).getWork();
        }
        // return more than "n" rows. Therefore, a limit query needs to be materialized.
        if (tezWork == null || tezWork.getAllWork().size() != 1 || limitQuery) {
            String tableName = "table_" + UUID.randomUUID().toString().replaceAll("-", "");
            String storageFormatString = getTempTableStorageFormatString(conf);
            String ctas = "create temporary table " + tableName + " " + storageFormatString + " as " + query;
            LOG.info("Materializing the query for LLAPIF; CTAS: " + ctas);
            driver.releaseLocksAndCommitOrRollback(false);
            driver.releaseResources();
            HiveConf.setVar(conf, ConfVars.HIVE_EXECUTION_MODE, originalMode);
            try {
                driver.run(ctas);
            } catch (CommandProcessorException e) {
                throw new HiveException("Failed to create temp table [" + tableName + "]", e);
            }
            HiveConf.setVar(conf, ConfVars.HIVE_EXECUTION_MODE, "llap");
            query = "select * from " + tableName;
            try {
                driver.compileAndRespond(query, true);
            } catch (CommandProcessorException e) {
                throw new HiveException("Failed to select from table [" + tableName + "]", e);
            }
            plan = driver.getPlan();
            roots = plan.getRootTasks();
            schema = convertSchema(plan.getResultSchema());
            if (roots == null || roots.size() != 1 || !(roots.get(0) instanceof TezTask)) {
                throw new HiveException("Was expecting a single TezTask.");
            }
            tezWork = ((TezTask) roots.get(0)).getWork();
        } else {
            // The read will have READ_COMMITTED level semantics.
            try {
                driver.lockAndRespond();
            } catch (CommandProcessorException cpr1) {
                throw new HiveException("Failed to acquire locks", cpr1);
            }
            // Attach the resources to the session cleanup.
            SessionState.get().addCleanupItem(driverCleanup);
            needsCleanup = false;
        }
        // Pass the ValidTxnList and ValidTxnWriteIdList snapshot configurations corresponding to the input query
        HiveConf driverConf = driver.getConf();
        String validTxnString = driverConf.get(ValidTxnList.VALID_TXNS_KEY);
        if (validTxnString != null) {
            jc.set(ValidTxnList.VALID_TXNS_KEY, validTxnString);
        }
        String validWriteIdString = driverConf.get(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY);
        if (validWriteIdString != null) {
            assert validTxnString != null;
            jc.set(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY, validWriteIdString);
        }
        return new PlanFragment(tezWork, schema, jc);
    } finally {
        if (needsCleanup) {
            if (driverCleanup != null) {
                try {
                    driverCleanup.close();
                } catch (IOException err) {
                    throw new HiveException(err);
                }
            } else if (driver != null) {
                driver.close();
                driver.destroy();
            }
        }
    }
}
Also used : TezTask(org.apache.hadoop.hive.ql.exec.tez.TezTask) Task(org.apache.hadoop.hive.ql.exec.Task) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) CommandProcessorException(org.apache.hadoop.hive.ql.processors.CommandProcessorException) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) Schema(org.apache.hadoop.hive.llap.Schema) TaskSpecBuilder(org.apache.tez.dag.api.TaskSpecBuilder) Driver(org.apache.hadoop.hive.ql.Driver) IOException(java.io.IOException) QueryPlan(org.apache.hadoop.hive.ql.QueryPlan) TezTask(org.apache.hadoop.hive.ql.exec.tez.TezTask) HiveTxnManager(org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager) HiveConf(org.apache.hadoop.hive.conf.HiveConf) ParseException(org.apache.hadoop.hive.ql.parse.ParseException) TezWork(org.apache.hadoop.hive.ql.plan.TezWork)

Aggregations

ParseException (org.apache.hadoop.hive.ql.parse.ParseException)8 ASTNode (org.apache.hadoop.hive.ql.parse.ASTNode)5 IOException (java.io.IOException)3 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)3 ParseDriver (org.apache.hadoop.hive.ql.parse.ParseDriver)3 HiveConf (org.apache.hadoop.hive.conf.HiveConf)2 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)2 HiveTxnManager (org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager)2 BaseSemanticAnalyzer (org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer)2 ParseContext (org.apache.hadoop.hive.ql.parse.ParseContext)2 SemanticException (org.apache.hadoop.hive.ql.parse.SemanticException)2 ImmutableMap (com.google.common.collect.ImmutableMap)1 File (java.io.File)1 FileWriter (java.io.FileWriter)1 HashMap (java.util.HashMap)1 LinkedHashMap (java.util.LinkedHashMap)1 Map (java.util.Map)1 Tree (org.antlr.runtime.tree.Tree)1 Configurable (org.apache.hadoop.conf.Configurable)1 HiveVariableSource (org.apache.hadoop.hive.conf.HiveVariableSource)1