Search in sources :

Example 6 with HiveTxnManager

use of org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager in project hive by apache.

the class Driver method compile.

// deferClose indicates if the close/destroy should be deferred when the process has been
// interrupted, it should be set to true if the compile is called within another method like
// runInternal, which defers the close to the called in that method.
public int compile(String command, boolean resetTaskIds, boolean deferClose) {
    PerfLogger perfLogger = SessionState.getPerfLogger(true);
    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.DRIVER_RUN);
    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.COMPILE);
    lDrvState.stateLock.lock();
    try {
        lDrvState.driverState = DriverState.COMPILING;
    } finally {
        lDrvState.stateLock.unlock();
    }
    command = new VariableSubstitution(new HiveVariableSource() {

        @Override
        public Map<String, String> getHiveVariable() {
            return SessionState.get().getHiveVariables();
        }
    }).substitute(conf, command);
    String queryStr = command;
    try {
        // command should be redacted to avoid to logging sensitive data
        queryStr = HookUtils.redactLogString(conf, command);
    } catch (Exception e) {
        LOG.warn("WARNING! Query command could not be redacted." + e);
    }
    if (isInterrupted()) {
        //indicate if need clean resource
        return handleInterruption("at beginning of compilation.");
    }
    if (ctx != null && ctx.getExplainAnalyze() != AnalyzeState.RUNNING) {
        // close the existing ctx etc before compiling a new query, but does not destroy driver
        closeInProcess(false);
    }
    if (resetTaskIds) {
        TaskFactory.resetId();
    }
    String queryId = conf.getVar(HiveConf.ConfVars.HIVEQUERYID);
    //save some info for webUI for use after plan is freed
    this.queryDisplay.setQueryStr(queryStr);
    this.queryDisplay.setQueryId(queryId);
    LOG.info("Compiling command(queryId=" + queryId + "): " + queryStr);
    SessionState.get().setupQueryCurrentTimestamp();
    // Whether any error occurred during query compilation. Used for query lifetime hook.
    boolean compileError = false;
    try {
        // Initialize the transaction manager.  This must be done before analyze is called.
        final HiveTxnManager txnManager = SessionState.get().initTxnMgr(conf);
        // In case when user Ctrl-C twice to kill Hive CLI JVM, we want to release locks
        // if compile is being called multiple times, clear the old shutdownhook
        ShutdownHookManager.removeShutdownHook(shutdownRunner);
        shutdownRunner = new Runnable() {

            @Override
            public void run() {
                try {
                    releaseLocksAndCommitOrRollback(false, txnManager);
                } catch (LockException e) {
                    LOG.warn("Exception when releasing locks in ShutdownHook for Driver: " + e.getMessage());
                }
            }
        };
        ShutdownHookManager.addShutdownHook(shutdownRunner, SHUTDOWN_HOOK_PRIORITY);
        if (isInterrupted()) {
            return handleInterruption("before parsing and analysing the query");
        }
        if (ctx == null) {
            ctx = new Context(conf);
        }
        ctx.setTryCount(getTryCount());
        ctx.setCmd(command);
        ctx.setHDFSCleanup(true);
        perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.PARSE);
        ASTNode tree = ParseUtils.parse(command, ctx);
        perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.PARSE);
        // Trigger query hook before compilation
        queryHooks = loadQueryHooks();
        if (queryHooks != null && !queryHooks.isEmpty()) {
            QueryLifeTimeHookContext qhc = new QueryLifeTimeHookContextImpl();
            qhc.setHiveConf(conf);
            qhc.setCommand(command);
            for (QueryLifeTimeHook hook : queryHooks) {
                hook.beforeCompile(qhc);
            }
        }
        perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.ANALYZE);
        BaseSemanticAnalyzer sem = SemanticAnalyzerFactory.get(queryState, tree);
        List<HiveSemanticAnalyzerHook> saHooks = getHooks(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK, HiveSemanticAnalyzerHook.class);
        // Flush the metastore cache.  This assures that we don't pick up objects from a previous
        // query running in this same thread.  This has to be done after we get our semantic
        // analyzer (this is when the connection to the metastore is made) but before we analyze,
        // because at that point we need access to the objects.
        Hive.get().getMSC().flushCache();
        // Do semantic analysis and plan generation
        if (saHooks != null && !saHooks.isEmpty()) {
            HiveSemanticAnalyzerHookContext hookCtx = new HiveSemanticAnalyzerHookContextImpl();
            hookCtx.setConf(conf);
            hookCtx.setUserName(userName);
            hookCtx.setIpAddress(SessionState.get().getUserIpAddress());
            hookCtx.setCommand(command);
            for (HiveSemanticAnalyzerHook hook : saHooks) {
                tree = hook.preAnalyze(hookCtx, tree);
            }
            sem.analyze(tree, ctx);
            hookCtx.update(sem);
            for (HiveSemanticAnalyzerHook hook : saHooks) {
                hook.postAnalyze(hookCtx, sem.getAllRootTasks());
            }
        } else {
            sem.analyze(tree, ctx);
        }
        // Record any ACID compliant FileSinkOperators we saw so we can add our transaction ID to
        // them later.
        acidSinks = sem.getAcidFileSinks();
        LOG.info("Semantic Analysis Completed");
        // validate the plan
        sem.validate();
        acidInQuery = sem.hasAcidInQuery();
        perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.ANALYZE);
        if (isInterrupted()) {
            return handleInterruption("after analyzing query.");
        }
        // get the output schema
        schema = getSchema(sem, conf);
        plan = new QueryPlan(queryStr, sem, perfLogger.getStartTime(PerfLogger.DRIVER_RUN), queryId, queryState.getHiveOperation(), schema);
        conf.setQueryString(queryStr);
        conf.set("mapreduce.workflow.id", "hive_" + queryId);
        conf.set("mapreduce.workflow.name", queryStr);
        // initialize FetchTask right here
        if (plan.getFetchTask() != null) {
            plan.getFetchTask().initialize(queryState, plan, null, ctx.getOpContext());
        }
        //do the authorization check
        if (!sem.skipAuthorization() && HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED)) {
            try {
                perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.DO_AUTHORIZATION);
                doAuthorization(queryState.getHiveOperation(), sem, command);
            } catch (AuthorizationException authExp) {
                console.printError("Authorization failed:" + authExp.getMessage() + ". Use SHOW GRANT to get more details.");
                errorMessage = authExp.getMessage();
                SQLState = "42000";
                return 403;
            } finally {
                perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.DO_AUTHORIZATION);
            }
        }
        if (conf.getBoolVar(ConfVars.HIVE_LOG_EXPLAIN_OUTPUT)) {
            String explainOutput = getExplainOutput(sem, plan, tree);
            if (explainOutput != null) {
                if (conf.getBoolVar(ConfVars.HIVE_LOG_EXPLAIN_OUTPUT)) {
                    LOG.info("EXPLAIN output for queryid " + queryId + " : " + explainOutput);
                }
                if (conf.isWebUiQueryInfoCacheEnabled()) {
                    queryDisplay.setExplainPlan(explainOutput);
                }
            }
        }
        return 0;
    } catch (Exception e) {
        if (isInterrupted()) {
            return handleInterruption("during query compilation: " + e.getMessage());
        }
        compileError = true;
        ErrorMsg error = ErrorMsg.getErrorMsg(e.getMessage());
        errorMessage = "FAILED: " + e.getClass().getSimpleName();
        if (error != ErrorMsg.GENERIC_ERROR) {
            errorMessage += " [Error " + error.getErrorCode() + "]:";
        }
        // HIVE-4889
        if ((e instanceof IllegalArgumentException) && e.getMessage() == null && e.getCause() != null) {
            errorMessage += " " + e.getCause().getMessage();
        } else {
            errorMessage += " " + e.getMessage();
        }
        if (error == ErrorMsg.TXNMGR_NOT_ACID) {
            errorMessage += ". Failed command: " + queryStr;
        }
        SQLState = error.getSQLState();
        downstreamError = e;
        console.printError(errorMessage, "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
        //todo: this is bad if returned as cmd shell exit
        return error.getErrorCode();
    // since it exceeds valid range of shell return values
    } finally {
        // before/after execution hook will never be executed.
        try {
            if (queryHooks != null && !queryHooks.isEmpty()) {
                QueryLifeTimeHookContext qhc = new QueryLifeTimeHookContextImpl();
                qhc.setHiveConf(conf);
                qhc.setCommand(command);
                for (QueryLifeTimeHook hook : queryHooks) {
                    hook.afterCompile(qhc, compileError);
                }
            }
        } catch (Exception e) {
            LOG.warn("Failed when invoking query after-compilation hook.", e);
        }
        double duration = perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.COMPILE) / 1000.00;
        ImmutableMap<String, Long> compileHMSTimings = dumpMetaCallTimingWithoutEx("compilation");
        queryDisplay.setHmsTimings(QueryDisplay.Phase.COMPILATION, compileHMSTimings);
        boolean isInterrupted = isInterrupted();
        if (isInterrupted && !deferClose) {
            closeInProcess(true);
        }
        lDrvState.stateLock.lock();
        try {
            if (isInterrupted) {
                lDrvState.driverState = deferClose ? DriverState.EXECUTING : DriverState.ERROR;
            } else {
                lDrvState.driverState = compileError ? DriverState.ERROR : DriverState.COMPILED;
            }
        } finally {
            lDrvState.stateLock.unlock();
        }
        if (isInterrupted) {
            LOG.info("Compiling command(queryId=" + queryId + ") has been interrupted after " + duration + " seconds");
        } else {
            LOG.info("Completed compiling command(queryId=" + queryId + "); Time taken: " + duration + " seconds");
        }
    }
}
Also used : HiveSemanticAnalyzerHookContext(org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContext) BaseSemanticAnalyzer(org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer) AuthorizationException(org.apache.hadoop.hive.ql.metadata.AuthorizationException) HiveVariableSource(org.apache.hadoop.hive.conf.HiveVariableSource) QueryLifeTimeHookContextImpl(org.apache.hadoop.hive.ql.hooks.QueryLifeTimeHookContextImpl) PerfLogger(org.apache.hadoop.hive.ql.log.PerfLogger) QueryLifeTimeHook(org.apache.hadoop.hive.ql.hooks.QueryLifeTimeHook) MetricsQueryLifeTimeHook(org.apache.hadoop.hive.ql.hooks.MetricsQueryLifeTimeHook) LockException(org.apache.hadoop.hive.ql.lockmgr.LockException) HiveSemanticAnalyzerHook(org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHook) ASTNode(org.apache.hadoop.hive.ql.parse.ASTNode) ParseContext(org.apache.hadoop.hive.ql.parse.ParseContext) HiveAuthzContext(org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzContext) ExecuteWithHookContext(org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext) HookContext(org.apache.hadoop.hive.ql.hooks.HookContext) QueryLifeTimeHookContext(org.apache.hadoop.hive.ql.hooks.QueryLifeTimeHookContext) HiveSemanticAnalyzerHookContext(org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContext) VariableSubstitution(org.apache.hadoop.hive.conf.VariableSubstitution) LockException(org.apache.hadoop.hive.ql.lockmgr.LockException) IOException(java.io.IOException) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) AuthorizationException(org.apache.hadoop.hive.ql.metadata.AuthorizationException) HiveSemanticAnalyzerHookContextImpl(org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContextImpl) HiveTxnManager(org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager) QueryLifeTimeHookContext(org.apache.hadoop.hive.ql.hooks.QueryLifeTimeHookContext) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap)

Example 7 with HiveTxnManager

use of org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager in project hive by apache.

the class Driver method releaseLocksAndCommitOrRollback.

/**
   * @param commit if there is an open transaction and if true, commit,
   *               if false rollback.  If there is no open transaction this parameter is ignored.
   * @param txnManager an optional existing transaction manager retrieved earlier from the session
   *
   **/
private void releaseLocksAndCommitOrRollback(boolean commit, HiveTxnManager txnManager) throws LockException {
    PerfLogger perfLogger = SessionState.getPerfLogger();
    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.RELEASE_LOCKS);
    HiveTxnManager txnMgr;
    if (txnManager == null) {
        SessionState ss = SessionState.get();
        txnMgr = ss.getTxnMgr();
    } else {
        txnMgr = txnManager;
    }
    // releasing the locks.
    if (txnMgr.isTxnOpen()) {
        if (commit) {
            if (conf.getBoolVar(ConfVars.HIVE_IN_TEST) && conf.getBoolVar(ConfVars.HIVETESTMODEROLLBACKTXN)) {
                txnMgr.rollbackTxn();
            } else {
                //both commit & rollback clear ALL locks for this tx
                txnMgr.commitTxn();
            }
        } else {
            txnMgr.rollbackTxn();
        }
    } else {
        //since there is no tx, we only have locks for current query (if any)
        if (ctx != null && ctx.getHiveLocks() != null) {
            hiveLocks.addAll(ctx.getHiveLocks());
        }
        txnMgr.releaseLocks(hiveLocks);
    }
    hiveLocks.clear();
    if (ctx != null) {
        ctx.setHiveLocks(null);
    }
    perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.RELEASE_LOCKS);
}
Also used : SessionState(org.apache.hadoop.hive.ql.session.SessionState) PerfLogger(org.apache.hadoop.hive.ql.log.PerfLogger) HiveTxnManager(org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager)

Example 8 with HiveTxnManager

use of org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager in project hive by apache.

the class HouseKeeperServiceBase method start.

@Override
public void start(HiveConf hiveConf) throws Exception {
    this.hiveConf = hiveConf;
    HiveTxnManager mgr = TxnManagerFactory.getTxnManagerFactory().getTxnManager(hiveConf);
    if (!mgr.supportsAcid()) {
        LOG.info(this.getClass().getName() + " not started since " + mgr.getClass().getName() + " does not support Acid.");
        //there are no transactions in this case
        return;
    }
    pool = Executors.newScheduledThreadPool(1, new ThreadFactory() {

        private final AtomicInteger threadCounter = new AtomicInteger();

        @Override
        public Thread newThread(Runnable r) {
            return new Thread(r, HouseKeeperServiceBase.this.getClass().getName() + "-" + threadCounter.getAndIncrement());
        }
    });
    TimeUnit tu = TimeUnit.MILLISECONDS;
    pool.scheduleAtFixedRate(getScheduedAction(hiveConf, isAliveCounter), getStartDelayMs(), getIntervalMs(), tu);
    LOG.info("Started " + this.getClass().getName() + " with delay/interval = " + getStartDelayMs() + "/" + getIntervalMs() + " " + tu);
}
Also used : ThreadFactory(java.util.concurrent.ThreadFactory) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) HiveTxnManager(org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager) TimeUnit(java.util.concurrent.TimeUnit)

Example 9 with HiveTxnManager

use of org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager in project hive by apache.

the class DDLSemanticAnalyzer method analyzeShowLocks.

/**
   * Add the task according to the parsed command tree. This is used for the CLI
   * command "SHOW LOCKS;".
   *
   * @param ast
   *          The parsed command tree.
   * @throws SemanticException
   *           Parsing failed
   */
private void analyzeShowLocks(ASTNode ast) throws SemanticException {
    String tableName = null;
    HashMap<String, String> partSpec = null;
    boolean isExtended = false;
    if (ast.getChildCount() >= 1) {
        // table for which show locks is being executed
        for (int i = 0; i < ast.getChildCount(); i++) {
            ASTNode child = (ASTNode) ast.getChild(i);
            if (child.getType() == HiveParser.TOK_TABTYPE) {
                ASTNode tableTypeExpr = child;
                tableName = QualifiedNameUtil.getFullyQualifiedName((ASTNode) tableTypeExpr.getChild(0));
                // get partition metadata if partition specified
                if (tableTypeExpr.getChildCount() == 2) {
                    ASTNode partSpecNode = (ASTNode) tableTypeExpr.getChild(1);
                    partSpec = getValidatedPartSpec(getTable(tableName), partSpecNode, conf, false);
                }
            } else if (child.getType() == HiveParser.KW_EXTENDED) {
                isExtended = true;
            }
        }
    }
    HiveTxnManager txnManager = null;
    try {
        txnManager = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf);
    } catch (LockException e) {
        throw new SemanticException(e.getMessage());
    }
    ShowLocksDesc showLocksDesc = new ShowLocksDesc(ctx.getResFile(), tableName, partSpec, isExtended, txnManager.useNewShowLocksFormat());
    rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), showLocksDesc), conf));
    setFetchTask(createFetchTask(showLocksDesc.getSchema()));
    // Need to initialize the lock manager
    ctx.setNeedLockMgr(true);
}
Also used : DDLWork(org.apache.hadoop.hive.ql.plan.DDLWork) LockException(org.apache.hadoop.hive.ql.lockmgr.LockException) ShowLocksDesc(org.apache.hadoop.hive.ql.plan.ShowLocksDesc) HiveTxnManager(org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager)

Example 10 with HiveTxnManager

use of org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager in project hive by apache.

the class Driver method runInternal.

private CommandProcessorResponse runInternal(String command, boolean alreadyCompiled) throws CommandNeedRetryException {
    errorMessage = null;
    SQLState = null;
    downstreamError = null;
    lDrvState.stateLock.lock();
    try {
        if (alreadyCompiled) {
            if (lDrvState.driverState == DriverState.COMPILED) {
                lDrvState.driverState = DriverState.EXECUTING;
            } else {
                errorMessage = "FAILED: Precompiled query has been cancelled or closed.";
                console.printError(errorMessage);
                return createProcessorResponse(12);
            }
        } else {
            lDrvState.driverState = DriverState.COMPILING;
        }
    } finally {
        lDrvState.stateLock.unlock();
    }
    // a flag that helps to set the correct driver state in finally block by tracking if
    // the method has been returned by an error or not.
    boolean isFinishedWithError = true;
    try {
        HiveDriverRunHookContext hookContext = new HiveDriverRunHookContextImpl(conf, alreadyCompiled ? ctx.getCmd() : command);
        // Get all the driver run hooks and pre-execute them.
        List<HiveDriverRunHook> driverRunHooks;
        try {
            driverRunHooks = getHooks(HiveConf.ConfVars.HIVE_DRIVER_RUN_HOOKS, HiveDriverRunHook.class);
            for (HiveDriverRunHook driverRunHook : driverRunHooks) {
                driverRunHook.preDriverRun(hookContext);
            }
        } catch (Exception e) {
            errorMessage = "FAILED: Hive Internal Error: " + Utilities.getNameMessage(e);
            SQLState = ErrorMsg.findSQLState(e.getMessage());
            downstreamError = e;
            console.printError(errorMessage + "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
            return createProcessorResponse(12);
        }
        PerfLogger perfLogger = null;
        int ret;
        if (!alreadyCompiled) {
            // compile internal will automatically reset the perf logger
            ret = compileInternal(command, true);
            // then we continue to use this perf logger
            perfLogger = SessionState.getPerfLogger();
            if (ret != 0) {
                return createProcessorResponse(ret);
            }
        } else {
            // reuse existing perf logger.
            perfLogger = SessionState.getPerfLogger();
            // Since we're reusing the compiled plan, we need to update its start time for current run
            plan.setQueryStartTime(perfLogger.getStartTime(PerfLogger.DRIVER_RUN));
        }
        // the reason that we set the txn manager for the cxt here is because each
        // query has its own ctx object. The txn mgr is shared across the
        // same instance of Driver, which can run multiple queries.
        HiveTxnManager txnManager = SessionState.get().getTxnMgr();
        ctx.setHiveTxnManager(txnManager);
        boolean startTxnImplicitly = false;
        {
            //an error in an open txn does a rollback of the txn
            if (txnManager.isTxnOpen() && !plan.getOperation().isAllowedInTransaction()) {
                assert !txnManager.getAutoCommit() : "didn't expect AC=true";
                return rollback(new CommandProcessorResponse(12, ErrorMsg.OP_NOT_ALLOWED_IN_TXN, null, plan.getOperationName(), Long.toString(txnManager.getCurrentTxnId())));
            }
            if (!txnManager.isTxnOpen() && plan.getOperation().isRequiresOpenTransaction()) {
                return rollback(new CommandProcessorResponse(12, ErrorMsg.OP_NOT_ALLOWED_WITHOUT_TXN, null, plan.getOperationName()));
            }
            if (!txnManager.isTxnOpen() && plan.getOperation() == HiveOperation.QUERY && !txnManager.getAutoCommit()) {
                //this effectively makes START TRANSACTION optional and supports JDBC setAutoCommit(false) semantics
                //also, indirectly allows DDL to be executed outside a txn context
                startTxnImplicitly = true;
            }
            if (txnManager.getAutoCommit() && plan.getOperation() == HiveOperation.START_TRANSACTION) {
                return rollback(new CommandProcessorResponse(12, ErrorMsg.OP_NOT_ALLOWED_IN_AUTOCOMMIT, null, plan.getOperationName()));
            }
        }
        if (plan.getOperation() == HiveOperation.SET_AUTOCOMMIT) {
            try {
                if (plan.getAutoCommitValue() && !txnManager.getAutoCommit()) {
                    /*here, if there is an open txn, we want to commit it; this behavior matches
            * https://docs.oracle.com/javase/6/docs/api/java/sql/Connection.html#setAutoCommit(boolean)*/
                    releaseLocksAndCommitOrRollback(true, null);
                    txnManager.setAutoCommit(true);
                } else if (!plan.getAutoCommitValue() && txnManager.getAutoCommit()) {
                    txnManager.setAutoCommit(false);
                } else {
                /*didn't change autoCommit value - no-op*/
                }
            } catch (LockException e) {
                return handleHiveException(e, 12);
            }
        }
        if (requiresLock()) {
            // a checkpoint to see if the thread is interrupted or not before an expensive operation
            if (isInterrupted()) {
                ret = handleInterruption("at acquiring the lock.");
            } else {
                ret = acquireLocksAndOpenTxn(startTxnImplicitly);
            }
            if (ret != 0) {
                return rollback(createProcessorResponse(ret));
            }
        }
        ret = execute(true);
        if (ret != 0) {
            //if needRequireLock is false, the release here will do nothing because there is no lock
            return rollback(createProcessorResponse(ret));
        }
        //if needRequireLock is false, the release here will do nothing because there is no lock
        try {
            if (txnManager.getAutoCommit() || plan.getOperation() == HiveOperation.COMMIT) {
                releaseLocksAndCommitOrRollback(true, null);
            } else if (plan.getOperation() == HiveOperation.ROLLBACK) {
                releaseLocksAndCommitOrRollback(false, null);
            } else {
            //txn (if there is one started) is not finished
            }
        } catch (LockException e) {
            return handleHiveException(e, 12);
        }
        perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.DRIVER_RUN);
        queryDisplay.setPerfLogStarts(QueryDisplay.Phase.EXECUTION, perfLogger.getStartTimes());
        queryDisplay.setPerfLogEnds(QueryDisplay.Phase.EXECUTION, perfLogger.getEndTimes());
        // Take all the driver run hooks and post-execute them.
        try {
            for (HiveDriverRunHook driverRunHook : driverRunHooks) {
                driverRunHook.postDriverRun(hookContext);
            }
        } catch (Exception e) {
            errorMessage = "FAILED: Hive Internal Error: " + Utilities.getNameMessage(e);
            SQLState = ErrorMsg.findSQLState(e.getMessage());
            downstreamError = e;
            console.printError(errorMessage + "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
            return createProcessorResponse(12);
        }
        isFinishedWithError = false;
        return createProcessorResponse(ret);
    } finally {
        if (isInterrupted()) {
            closeInProcess(true);
        } else {
            // only release the related resources ctx, driverContext as normal
            releaseResources();
        }
        lDrvState.stateLock.lock();
        try {
            if (lDrvState.driverState == DriverState.INTERRUPT) {
                lDrvState.driverState = DriverState.ERROR;
            } else {
                lDrvState.driverState = isFinishedWithError ? DriverState.ERROR : DriverState.EXECUTED;
            }
        } finally {
            lDrvState.stateLock.unlock();
        }
    }
}
Also used : LockException(org.apache.hadoop.hive.ql.lockmgr.LockException) CommandProcessorResponse(org.apache.hadoop.hive.ql.processors.CommandProcessorResponse) PerfLogger(org.apache.hadoop.hive.ql.log.PerfLogger) HiveTxnManager(org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager) LockException(org.apache.hadoop.hive.ql.lockmgr.LockException) IOException(java.io.IOException) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) AuthorizationException(org.apache.hadoop.hive.ql.metadata.AuthorizationException)

Aggregations

HiveTxnManager (org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager)13 EnvironmentContext (org.apache.hadoop.hive.metastore.api.EnvironmentContext)5 CompilationOpContext (org.apache.hadoop.hive.ql.CompilationOpContext)5 Context (org.apache.hadoop.hive.ql.Context)5 DriverContext (org.apache.hadoop.hive.ql.DriverContext)5 LockException (org.apache.hadoop.hive.ql.lockmgr.LockException)5 IOException (java.io.IOException)4 PerfLogger (org.apache.hadoop.hive.ql.log.PerfLogger)4 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)4 AuthorizationException (org.apache.hadoop.hive.ql.metadata.AuthorizationException)3 ImmutableMap (com.google.common.collect.ImmutableMap)1 DataOutputStream (java.io.DataOutputStream)1 FileNotFoundException (java.io.FileNotFoundException)1 URISyntaxException (java.net.URISyntaxException)1 SQLException (java.sql.SQLException)1 HashMap (java.util.HashMap)1 LinkedHashMap (java.util.LinkedHashMap)1 Map (java.util.Map)1 ThreadFactory (java.util.concurrent.ThreadFactory)1 TimeUnit (java.util.concurrent.TimeUnit)1