Search in sources :

Example 61 with CommandProcessorException

use of org.apache.hadoop.hive.ql.processors.CommandProcessorException in project hive by apache.

the class DriverTxnHandler method acquireLocks.

/**
 * Acquire read and write locks needed by the statement. The list of objects to be locked are obtained from the inputs
 * and outputs populated by the compiler. Locking strategy depends on HiveTxnManager and HiveLockManager configured.
 *
 * This method also records the list of valid transactions. This must be done after any transactions have been opened.
 */
private void acquireLocks() throws CommandProcessorException {
    PerfLogger perfLogger = SessionState.getPerfLogger();
    perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.ACQUIRE_READ_WRITE_LOCKS);
    if (!driverContext.getTxnManager().isTxnOpen() && driverContext.getTxnManager().supportsAcid()) {
        /* non acid txn managers don't support txns but fwd lock requests to lock managers
         acid txn manager requires all locks to be associated with a txn so if we end up here w/o an open txn
         it's because we are processing something like "use <database> which by definition needs no locks */
        return;
    }
    try {
        // Ensure we answer any metadata calls with fresh responses
        driverContext.getQueryState().disableHMSCache();
        setWriteIdForAcidFileSinks();
        allocateWriteIdForAcidAnalyzeTable();
        boolean hasAcidDdl = setWriteIdForAcidDdl();
        acquireLocksInternal();
        if (driverContext.getPlan().hasAcidResourcesInQuery() || hasAcidDdl) {
            recordValidWriteIds();
        }
    } catch (Exception e) {
        String errorMessage;
        if (driverState.isDestroyed() || driverState.isAborted() || driverState.isClosed()) {
            errorMessage = String.format("Ignore lock acquisition related exception in terminal state (%s): %s", driverState.toString(), e.getMessage());
            CONSOLE.printInfo(errorMessage);
        } else {
            errorMessage = String.format("FAILED: Error in acquiring locks: %s", e.getMessage());
            CONSOLE.printError(errorMessage, "\n" + StringUtils.stringifyException(e));
        }
        throw DriverUtils.createProcessorException(driverContext, 10, errorMessage, ErrorMsg.findSQLState(e.getMessage()), e);
    } finally {
        driverContext.getQueryState().enableHMSCache();
        perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.ACQUIRE_READ_WRITE_LOCKS);
    }
}
Also used : PerfLogger(org.apache.hadoop.hive.ql.log.PerfLogger) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) LockException(org.apache.hadoop.hive.ql.lockmgr.LockException) CommandProcessorException(org.apache.hadoop.hive.ql.processors.CommandProcessorException)

Example 62 with CommandProcessorException

use of org.apache.hadoop.hive.ql.processors.CommandProcessorException in project hive by apache.

the class Executor method execute.

public void execute() throws CommandProcessorException {
    SessionState.getPerfLogger().perfLogBegin(CLASS_NAME, PerfLogger.DRIVER_EXECUTE);
    boolean noName = Strings.isNullOrEmpty(driverContext.getConf().get(MRJobConfig.JOB_NAME));
    checkState();
    // Whether there's any error occurred during query execution. Used for query lifetime hook.
    boolean executionError = false;
    try {
        LOG.info("Executing command(queryId=" + driverContext.getQueryId() + "): " + driverContext.getQueryString());
        // TODO: should this use getUserFromAuthenticator?
        hookContext = new PrivateHookContext(driverContext, context);
        preExecutionActions();
        preExecutionCacheActions();
        // Disable HMS cache so any metadata calls during execution get fresh responses.
        driverContext.getQueryState().disableHMSCache();
        runTasks(noName);
        driverContext.getQueryState().enableHMSCache();
        postExecutionCacheActions();
        postExecutionActions();
    } catch (CommandProcessorException cpe) {
        executionError = true;
        throw cpe;
    } catch (Throwable e) {
        executionError = true;
        DriverUtils.checkInterrupted(driverState, driverContext, "during query execution: \n" + e.getMessage(), hookContext, SessionState.getPerfLogger());
        handleException(hookContext, e);
    } finally {
        cleanUp(noName, hookContext, executionError);
        driverContext.getQueryState().enableHMSCache();
    }
}
Also used : CommandProcessorException(org.apache.hadoop.hive.ql.processors.CommandProcessorException) PrivateHookContext(org.apache.hadoop.hive.ql.hooks.PrivateHookContext)

Example 63 with CommandProcessorException

use of org.apache.hadoop.hive.ql.processors.CommandProcessorException in project hive by apache.

the class ExplainSemanticAnalyzer method analyzeInternal.

@Override
public void analyzeInternal(ASTNode ast) throws SemanticException {
    final int childCount = ast.getChildCount();
    // Skip TOK_QUERY.
    int i = 1;
    while (i < childCount) {
        int explainOptions = ast.getChild(i).getType();
        if (explainOptions == HiveParser.KW_FORMATTED) {
            config.setFormatted(true);
        } else if (explainOptions == HiveParser.KW_EXTENDED) {
            config.setExtended(true);
        } else if (explainOptions == HiveParser.KW_DEPENDENCY) {
            config.setDependency(true);
        } else if (explainOptions == HiveParser.KW_CBO) {
            config.setCbo(true);
        } else if (explainOptions == HiveParser.KW_COST) {
            config.setCboCost(true);
        } else if (explainOptions == HiveParser.KW_JOINCOST) {
            config.setCboJoinCost(true);
        } else if (explainOptions == HiveParser.KW_LOGICAL) {
            config.setLogical(true);
        } else if (explainOptions == HiveParser.KW_AUTHORIZATION) {
            config.setAuthorize(true);
        } else if (explainOptions == HiveParser.KW_ANALYZE) {
            config.setAnalyze(AnalyzeState.RUNNING);
            config.setExplainRootPath(ctx.getMRTmpPath());
        } else if (explainOptions == HiveParser.KW_VECTORIZATION) {
            config.setVectorization(true);
            if (i + 1 < childCount) {
                int vectorizationOption = ast.getChild(i + 1).getType();
                // [ONLY]
                if (vectorizationOption == HiveParser.TOK_ONLY) {
                    config.setVectorizationOnly(true);
                    i++;
                    if (i + 1 >= childCount) {
                        break;
                    }
                    vectorizationOption = ast.getChild(i + 1).getType();
                }
                // [SUMMARY|OPERATOR|EXPRESSION|DETAIL]
                if (vectorizationOption == HiveParser.TOK_SUMMARY) {
                    config.setVectorizationDetailLevel(VectorizationDetailLevel.SUMMARY);
                    i++;
                } else if (vectorizationOption == HiveParser.TOK_OPERATOR) {
                    config.setVectorizationDetailLevel(VectorizationDetailLevel.OPERATOR);
                    i++;
                } else if (vectorizationOption == HiveParser.TOK_EXPRESSION) {
                    config.setVectorizationDetailLevel(VectorizationDetailLevel.EXPRESSION);
                    i++;
                } else if (vectorizationOption == HiveParser.TOK_DETAIL) {
                    config.setVectorizationDetailLevel(VectorizationDetailLevel.DETAIL);
                    i++;
                }
            }
        } else if (explainOptions == HiveParser.KW_LOCKS) {
            config.setLocks(true);
        } else if (explainOptions == HiveParser.KW_AST) {
            config.setAst(true);
        } else if (explainOptions == HiveParser.KW_DEBUG) {
            config.setDebug(true);
        } else if (explainOptions == HiveParser.KW_DDL) {
            config.setDDL(true);
            config.setCbo(true);
            config.setVectorization(true);
        } else {
        // UNDONE: UNKNOWN OPTION?
        }
        i++;
    }
    ctx.setExplainConfig(config);
    ctx.setExplainPlan(true);
    ASTNode input = (ASTNode) ast.getChild(0);
    // step 2 (ANALYZE_STATE.ANALYZING), explain the query and provide the runtime #rows collected.
    if (config.getAnalyze() == AnalyzeState.RUNNING) {
        String query = ctx.getTokenRewriteStream().toString(input.getTokenStartIndex(), input.getTokenStopIndex());
        LOG.info("Explain analyze (running phase) for query " + query);
        conf.unset(ValidTxnList.VALID_TXNS_KEY);
        conf.unset(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY);
        Context runCtx = null;
        try {
            runCtx = new Context(conf);
            // runCtx and ctx share the configuration, but not isExplainPlan()
            runCtx.setExplainConfig(config);
            try (Driver driver = new Driver(conf, runCtx, queryState.getLineageState())) {
                driver.run(query);
                while (driver.getResults(new ArrayList<String>())) {
                }
            } catch (CommandProcessorException e) {
                if (e.getCause() instanceof ReCompileException) {
                    throw (ReCompileException) e.getCause();
                } else {
                    throw new SemanticException(e.getMessage(), e);
                }
            }
            config.setOpIdToRuntimeNumRows(aggregateStats(config.getExplainRootPath()));
        } catch (IOException e1) {
            throw new SemanticException(e1);
        }
        ctx.resetOpContext();
        ctx.resetStream();
        TaskFactory.resetId();
        LOG.info("Explain analyze (analyzing phase) for query " + query);
        config.setAnalyze(AnalyzeState.ANALYZING);
    }
    // Creating new QueryState unfortunately causes all .q.out to change - do this in a separate ticket
    // Sharing QueryState between generating the plan and executing the query seems bad
    // BaseSemanticAnalyzer sem = SemanticAnalyzerFactory.get(new QueryState(queryState.getConf()), input);
    BaseSemanticAnalyzer sem = SemanticAnalyzerFactory.get(queryState, input);
    sem.analyze(input, ctx);
    sem.validate();
    inputs = sem.getInputs();
    outputs = sem.getOutputs();
    ctx.setResFile(ctx.getLocalTmpPath());
    List<Task<?>> tasks = sem.getAllRootTasks();
    if (tasks == null) {
        tasks = Collections.emptyList();
    }
    FetchTask fetchTask = sem.getFetchTask();
    if (fetchTask != null) {
        // Initialize fetch work such that operator tree will be constructed.
        fetchTask.getWork().initializeForFetch(ctx.getOpContext());
    }
    if (sem instanceof SemanticAnalyzer) {
        pCtx = sem.getParseContext();
    }
    config.setUserLevelExplain(!config.isExtended() && !config.isFormatted() && !config.isDependency() && !config.isCbo() && !config.isLogical() && !config.isVectorization() && !config.isAuthorize() && ((HiveConf.getBoolVar(ctx.getConf(), HiveConf.ConfVars.HIVE_EXPLAIN_USER) && HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) || (HiveConf.getBoolVar(ctx.getConf(), HiveConf.ConfVars.HIVE_SPARK_EXPLAIN_USER) && HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("spark"))));
    ExplainWork work = new ExplainWork(ctx.getResFile(), pCtx, tasks, fetchTask, input, sem, config, ctx.getCboInfo(), ctx.getOptimizedSql(), ctx.getCalcitePlan());
    work.setAppendTaskType(HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVEEXPLAINDEPENDENCYAPPENDTASKTYPES));
    ExplainTask explTask = (ExplainTask) TaskFactory.get(work);
    fieldList = ExplainTask.getResultSchema();
    rootTasks.add(explTask);
}
Also used : StatsCollectionContext(org.apache.hadoop.hive.ql.stats.StatsCollectionContext) Context(org.apache.hadoop.hive.ql.Context) Task(org.apache.hadoop.hive.ql.exec.Task) FetchTask(org.apache.hadoop.hive.ql.exec.FetchTask) ExplainTask(org.apache.hadoop.hive.ql.exec.ExplainTask) CommandProcessorException(org.apache.hadoop.hive.ql.processors.CommandProcessorException) ExplainTask(org.apache.hadoop.hive.ql.exec.ExplainTask) Driver(org.apache.hadoop.hive.ql.Driver) ExplainWork(org.apache.hadoop.hive.ql.plan.ExplainWork) ReCompileException(org.apache.hadoop.hive.ql.reexec.ReCompileException) IOException(java.io.IOException) FetchTask(org.apache.hadoop.hive.ql.exec.FetchTask)

Example 64 with CommandProcessorException

use of org.apache.hadoop.hive.ql.processors.CommandProcessorException in project hive by apache.

the class ReExecDriver method checkHookConfig.

private void checkHookConfig() throws CommandProcessorException {
    String strategies = coreDriver.getConf().getVar(ConfVars.HIVE_QUERY_REEXECUTION_STRATEGIES);
    CBOFallbackStrategy fallbackStrategy = CBOFallbackStrategy.valueOf(coreDriver.getConf().getVar(ConfVars.HIVE_CBO_FALLBACK_STRATEGY));
    if (fallbackStrategy.allowsRetry() && (strategies == null || !Arrays.stream(strategies.split(",")).anyMatch("recompile_without_cbo"::equals))) {
        String errorMsg = "Invalid configuration. If fallbackStrategy is set to " + fallbackStrategy.name() + " then " + ConfVars.HIVE_QUERY_REEXECUTION_STRATEGIES.varname + " should contain 'recompile_without_cbo'";
        CONSOLE.printError(errorMsg);
        throw new CommandProcessorException(errorMsg);
    }
}
Also used : CommandProcessorException(org.apache.hadoop.hive.ql.processors.CommandProcessorException) CBOFallbackStrategy(org.apache.hadoop.hive.ql.parse.CBOFallbackStrategy)

Example 65 with CommandProcessorException

use of org.apache.hadoop.hive.ql.processors.CommandProcessorException in project hive by apache.

the class ReExecDriver method run.

@Override
public CommandProcessorResponse run() throws CommandProcessorException {
    executionIndex = 0;
    int maxExecutions = 1 + coreDriver.getConf().getIntVar(ConfVars.HIVE_QUERY_MAX_REEXECUTION_COUNT);
    while (true) {
        executionIndex++;
        for (IReExecutionPlugin p : plugins) {
            p.beforeExecute(executionIndex, explainReOptimization);
        }
        coreDriver.getContext().setExecutionIndex(executionIndex);
        LOG.info("Execution #{} of query", executionIndex);
        CommandProcessorResponse cpr = null;
        CommandProcessorException cpe = null;
        try {
            cpr = coreDriver.run();
        } catch (CommandProcessorException e) {
            cpe = e;
        }
        PlanMapper oldPlanMapper = coreDriver.getPlanMapper();
        boolean success = cpr != null;
        plugins.forEach(p -> p.afterExecute(oldPlanMapper, success));
        boolean shouldReExecute = explainReOptimization && executionIndex == 1;
        shouldReExecute |= cpr == null && plugins.stream().anyMatch(p -> p.shouldReExecute(executionIndex));
        if (executionIndex >= maxExecutions || !shouldReExecute) {
            if (cpr != null) {
                return cpr;
            } else {
                throw cpe;
            }
        }
        LOG.info("Preparing to re-execute query");
        plugins.forEach(IReExecutionPlugin::prepareToReExecute);
        try {
            coreDriver.compileAndRespond(currentQuery);
        } catch (CommandProcessorException e) {
            LOG.error("Recompilation of the query failed; this is unexpected.");
            // FIXME: somehow place pointers that re-execution compilation have failed; the query have been successfully compiled before?
            throw e;
        }
        PlanMapper newPlanMapper = coreDriver.getPlanMapper();
        if (!explainReOptimization && !plugins.stream().anyMatch(p -> p.shouldReExecuteAfterCompile(executionIndex, oldPlanMapper, newPlanMapper))) {
            LOG.info("re-running the query would probably not yield better results; returning with last error");
            // FIXME: retain old error; or create a new one?
            return cpr;
        }
    }
}
Also used : CommandProcessorException(org.apache.hadoop.hive.ql.processors.CommandProcessorException) CommandProcessorResponse(org.apache.hadoop.hive.ql.processors.CommandProcessorResponse) PlanMapper(org.apache.hadoop.hive.ql.plan.mapper.PlanMapper)

Aggregations

CommandProcessorException (org.apache.hadoop.hive.ql.processors.CommandProcessorException)85 Test (org.junit.Test)42 IOException (java.io.IOException)14 CommandProcessorResponse (org.apache.hadoop.hive.ql.processors.CommandProcessorResponse)14 Driver (org.apache.hadoop.hive.ql.Driver)12 ArrayList (java.util.ArrayList)10 HiveConf (org.apache.hadoop.hive.conf.HiveConf)10 QTestProcessExecResult (org.apache.hadoop.hive.ql.QTestProcessExecResult)9 Path (org.apache.hadoop.fs.Path)8 FileSystem (org.apache.hadoop.fs.FileSystem)7 CliSessionState (org.apache.hadoop.hive.cli.CliSessionState)6 File (java.io.File)5 IDriver (org.apache.hadoop.hive.ql.IDriver)5 FileNotFoundException (java.io.FileNotFoundException)4 LockException (org.apache.hadoop.hive.ql.lockmgr.LockException)4 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)4 UnsupportedEncodingException (java.io.UnsupportedEncodingException)3 Map (java.util.Map)3 Nullable (javax.annotation.Nullable)3 Database (org.apache.hadoop.hive.metastore.api.Database)3