Search in sources :

Example 1 with PrivateHookContext

use of org.apache.hadoop.hive.ql.hooks.PrivateHookContext in project hive by apache.

the class OperatorStatsReaderHook method run.

@Override
public void run(HookContext hookContext) throws Exception {
    if (hookContext.getHookType() == HookType.PRE_EXEC_HOOK) {
        return;
    }
    if (hookContext.getHookType() == HookType.POST_EXEC_HOOK && !isCollectOnSuccess()) {
        return;
    }
    HiveConf conf = hookContext.getConf();
    QueryPlan plan = hookContext.getQueryPlan();
    List<TezTask> rootTasks = Utilities.getTezTasks(plan.getRootTasks());
    for (TezTask tezTask : rootTasks) {
        List<BaseWork> baseWorks = tezTask.getWork().getAllWork();
        for (BaseWork baseWork : baseWorks) {
            String vertexName = baseWork.getName();
            LOG.debug("Reading runtime statistics for tez vertex task: {}", vertexName);
            TezCounters counters = tezTask.getTezCounters();
            if (counters != null) {
                String groupName = HiveConf.getVar(conf, HiveConf.ConfVars.HIVECOUNTERGROUP);
                for (Operator<? extends OperatorDesc> op : baseWork.getAllOperators()) {
                    String operatorId = op.getOperatorId();
                    OperatorStats operatorStats = null;
                    String counterName = Operator.Counter.RECORDS_OUT_OPERATOR.toString() + "_" + operatorId;
                    TezCounter tezCounter = counters.getGroup(groupName).findCounter(counterName, false);
                    if (tezCounter != null) {
                        if (operatorStats == null) {
                            operatorStats = new OperatorStats(operatorId);
                        }
                        operatorStats.setOutputRecords(tezCounter.getValue());
                    }
                    if (operatorStats != null) {
                        ((PrivateHookContext) hookContext).getContext().getPlanMapper().link(op, operatorStats);
                    } else {
                        LOG.debug("Unable to get statistics for vertex: {} opId: {} groupName: {}", vertexName, operatorId, groupName);
                    }
                }
            }
        }
    }
}
Also used : HiveConf(org.apache.hadoop.hive.conf.HiveConf) TezCounter(org.apache.tez.common.counters.TezCounter) QueryPlan(org.apache.hadoop.hive.ql.QueryPlan) PrivateHookContext(org.apache.hadoop.hive.ql.hooks.PrivateHookContext) TezTask(org.apache.hadoop.hive.ql.exec.tez.TezTask) BaseWork(org.apache.hadoop.hive.ql.plan.BaseWork) TezCounters(org.apache.tez.common.counters.TezCounters)

Example 2 with PrivateHookContext

use of org.apache.hadoop.hive.ql.hooks.PrivateHookContext in project hive by apache.

the class Driver method execute.

private void execute() throws CommandProcessorResponse {
    PerfLogger perfLogger = SessionState.getPerfLogger();
    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.DRIVER_EXECUTE);
    boolean noName = StringUtils.isEmpty(conf.get(MRJobConfig.JOB_NAME));
    int maxlen = conf.getIntVar(HiveConf.ConfVars.HIVEJOBNAMELENGTH);
    Metrics metrics = MetricsFactory.getInstance();
    String queryId = queryState.getQueryId();
    // Get the query string from the conf file as the compileInternal() method might
    // hide sensitive information during query redaction.
    String queryStr = conf.getQueryString();
    lDrvState.stateLock.lock();
    try {
        // a combined compile/execute in runInternal, throws the error
        if (lDrvState.driverState != DriverState.COMPILED && lDrvState.driverState != DriverState.EXECUTING) {
            SQLState = "HY008";
            errorMessage = "FAILED: unexpected driverstate: " + lDrvState + ", for query " + queryStr;
            console.printError(errorMessage);
            throw createProcessorResponse(1000);
        } else {
            lDrvState.driverState = DriverState.EXECUTING;
        }
    } finally {
        lDrvState.stateLock.unlock();
    }
    maxthreads = HiveConf.getIntVar(conf, HiveConf.ConfVars.EXECPARALLETHREADNUMBER);
    HookContext hookContext = null;
    // Whether there's any error occurred during query execution. Used for query lifetime hook.
    boolean executionError = false;
    try {
        LOG.info("Executing command(queryId=" + queryId + "): " + queryStr);
        // compile and execute can get called from different threads in case of HS2
        // so clear timing in this thread's Hive object before proceeding.
        Hive.get().clearMetaCallTiming();
        plan.setStarted();
        if (SessionState.get() != null) {
            SessionState.get().getHiveHistory().startQuery(queryStr, queryId);
            SessionState.get().getHiveHistory().logPlanProgress(plan);
        }
        resStream = null;
        SessionState ss = SessionState.get();
        hookContext = new PrivateHookContext(plan, queryState, ctx.getPathToCS(), SessionState.get().getUserName(), ss.getUserIpAddress(), InetAddress.getLocalHost().getHostAddress(), operationId, ss.getSessionId(), Thread.currentThread().getName(), ss.isHiveServerQuery(), perfLogger, queryInfo, ctx);
        hookContext.setHookType(HookContext.HookType.PRE_EXEC_HOOK);
        hookRunner.runPreHooks(hookContext);
        // Trigger query hooks before query execution.
        hookRunner.runBeforeExecutionHook(queryStr, hookContext);
        setQueryDisplays(plan.getRootTasks());
        int mrJobs = Utilities.getMRTasks(plan.getRootTasks()).size();
        int jobs = mrJobs + Utilities.getTezTasks(plan.getRootTasks()).size() + Utilities.getSparkTasks(plan.getRootTasks()).size();
        if (jobs > 0) {
            logMrWarning(mrJobs);
            console.printInfo("Query ID = " + queryId);
            console.printInfo("Total jobs = " + jobs);
        }
        if (SessionState.get() != null) {
            SessionState.get().getHiveHistory().setQueryProperty(queryId, Keys.QUERY_NUM_TASKS, String.valueOf(jobs));
            SessionState.get().getHiveHistory().setIdToTableMap(plan.getIdToTableNameMap());
        }
        String jobname = Utilities.abbreviate(queryStr, maxlen - 6);
        // A runtime that launches runnable tasks as separate Threads through
        // TaskRunners
        // As soon as a task isRunnable, it is put in a queue
        // At any time, at most maxthreads tasks can be running
        // The main thread polls the TaskRunners to check if they have finished.
        checkInterrupted("before running tasks.", hookContext, perfLogger);
        DriverContext driverCxt = new DriverContext(ctx);
        driverCxt.prepare(plan);
        ctx.setHDFSCleanup(true);
        // for canceling the query (should be bound to session?)
        this.driverCxt = driverCxt;
        SessionState.get().setMapRedStats(new LinkedHashMap<>());
        SessionState.get().setStackTraces(new HashMap<>());
        SessionState.get().setLocalMapRedErrors(new HashMap<>());
        // Add root Tasks to runnable
        for (Task<? extends Serializable> tsk : plan.getRootTasks()) {
            // incorrect results.
            assert tsk.getParentTasks() == null || tsk.getParentTasks().isEmpty();
            driverCxt.addToRunnable(tsk);
            if (metrics != null) {
                tsk.updateTaskMetrics(metrics);
            }
        }
        preExecutionCacheActions();
        perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.RUN_TASKS);
        // Loop while you either have tasks running, or tasks queued up
        while (driverCxt.isRunning()) {
            // Launch upto maxthreads tasks
            Task<? extends Serializable> task;
            while ((task = driverCxt.getRunnable(maxthreads)) != null) {
                TaskRunner runner = launchTask(task, queryId, noName, jobname, jobs, driverCxt);
                if (!runner.isRunning()) {
                    break;
                }
            }
            // poll the Tasks to see which one completed
            TaskRunner tskRun = driverCxt.pollFinished();
            if (tskRun == null) {
                continue;
            }
            /*
          This should be removed eventually. HIVE-17814 gives more detail
          explanation of whats happening and HIVE-17815 as to why this is done.
          Briefly for replication the graph is huge and so memory pressure is going to be huge if
          we keep a lot of references around.
        */
            String opName = plan.getOperationName();
            boolean isReplicationOperation = opName.equals(HiveOperation.REPLDUMP.getOperationName()) || opName.equals(HiveOperation.REPLLOAD.getOperationName());
            if (!isReplicationOperation) {
                hookContext.addCompleteTask(tskRun);
            }
            queryDisplay.setTaskResult(tskRun.getTask().getId(), tskRun.getTaskResult());
            Task<? extends Serializable> tsk = tskRun.getTask();
            TaskResult result = tskRun.getTaskResult();
            int exitVal = result.getExitVal();
            checkInterrupted("when checking the execution result.", hookContext, perfLogger);
            if (exitVal != 0) {
                Task<? extends Serializable> backupTask = tsk.getAndInitBackupTask();
                if (backupTask != null) {
                    setErrorMsgAndDetail(exitVal, result.getTaskError(), tsk);
                    console.printError(errorMessage);
                    errorMessage = "ATTEMPT: Execute BackupTask: " + backupTask.getClass().getName();
                    console.printError(errorMessage);
                    // add backup task to runnable
                    if (DriverContext.isLaunchable(backupTask)) {
                        driverCxt.addToRunnable(backupTask);
                    }
                    continue;
                } else {
                    setErrorMsgAndDetail(exitVal, result.getTaskError(), tsk);
                    if (driverCxt.isShutdown()) {
                        errorMessage = "FAILED: Operation cancelled. " + errorMessage;
                    }
                    invokeFailureHooks(perfLogger, hookContext, errorMessage + Strings.nullToEmpty(tsk.getDiagnosticsMessage()), result.getTaskError());
                    SQLState = "08S01";
                    // based on the ErrorMsg set in HiveException.
                    if (result.getTaskError() instanceof HiveException) {
                        ErrorMsg errorMsg = ((HiveException) result.getTaskError()).getCanonicalErrorMsg();
                        if (errorMsg != ErrorMsg.GENERIC_ERROR) {
                            SQLState = errorMsg.getSQLState();
                        }
                    }
                    console.printError(errorMessage);
                    driverCxt.shutdown();
                    // in case we decided to run everything in local mode, restore the
                    // the jobtracker setting to its initial value
                    ctx.restoreOriginalTracker();
                    throw createProcessorResponse(exitVal);
                }
            }
            driverCxt.finished(tskRun);
            if (SessionState.get() != null) {
                SessionState.get().getHiveHistory().setTaskProperty(queryId, tsk.getId(), Keys.TASK_RET_CODE, String.valueOf(exitVal));
                SessionState.get().getHiveHistory().endTask(queryId, tsk);
            }
            if (tsk.getChildTasks() != null) {
                for (Task<? extends Serializable> child : tsk.getChildTasks()) {
                    if (DriverContext.isLaunchable(child)) {
                        driverCxt.addToRunnable(child);
                    }
                }
            }
        }
        perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.RUN_TASKS);
        postExecutionCacheActions();
        // in case we decided to run everything in local mode, restore the
        // the jobtracker setting to its initial value
        ctx.restoreOriginalTracker();
        if (driverCxt.isShutdown()) {
            SQLState = "HY008";
            errorMessage = "FAILED: Operation cancelled";
            invokeFailureHooks(perfLogger, hookContext, errorMessage, null);
            console.printError(errorMessage);
            throw createProcessorResponse(1000);
        }
        // remove incomplete outputs.
        // Some incomplete outputs may be added at the beginning, for eg: for dynamic partitions.
        // remove them
        HashSet<WriteEntity> remOutputs = new LinkedHashSet<WriteEntity>();
        for (WriteEntity output : plan.getOutputs()) {
            if (!output.isComplete()) {
                remOutputs.add(output);
            }
        }
        for (WriteEntity output : remOutputs) {
            plan.getOutputs().remove(output);
        }
        hookContext.setHookType(HookContext.HookType.POST_EXEC_HOOK);
        hookRunner.runPostExecHooks(hookContext);
        if (SessionState.get() != null) {
            SessionState.get().getHiveHistory().setQueryProperty(queryId, Keys.QUERY_RET_CODE, String.valueOf(0));
            SessionState.get().getHiveHistory().printRowCount(queryId);
        }
        releasePlan(plan);
    } catch (CommandProcessorResponse cpr) {
        executionError = true;
        throw cpr;
    } catch (Throwable e) {
        executionError = true;
        checkInterrupted("during query execution: \n" + e.getMessage(), hookContext, perfLogger);
        ctx.restoreOriginalTracker();
        if (SessionState.get() != null) {
            SessionState.get().getHiveHistory().setQueryProperty(queryId, Keys.QUERY_RET_CODE, String.valueOf(12));
        }
        // TODO: do better with handling types of Exception here
        errorMessage = "FAILED: Hive Internal Error: " + Utilities.getNameMessage(e);
        if (hookContext != null) {
            try {
                invokeFailureHooks(perfLogger, hookContext, errorMessage, e);
            } catch (Exception t) {
                LOG.warn("Failed to invoke failure hook", t);
            }
        }
        SQLState = "08S01";
        downstreamError = e;
        console.printError(errorMessage + "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
        throw createProcessorResponse(12);
    } finally {
        // Trigger query hooks after query completes its execution.
        try {
            hookRunner.runAfterExecutionHook(queryStr, hookContext, executionError);
        } catch (Exception e) {
            LOG.warn("Failed when invoking query after execution hook", e);
        }
        if (SessionState.get() != null) {
            SessionState.get().getHiveHistory().endQuery(queryId);
        }
        if (noName) {
            conf.set(MRJobConfig.JOB_NAME, "");
        }
        double duration = perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.DRIVER_EXECUTE) / 1000.00;
        ImmutableMap<String, Long> executionHMSTimings = dumpMetaCallTimingWithoutEx("execution");
        queryDisplay.setHmsTimings(QueryDisplay.Phase.EXECUTION, executionHMSTimings);
        Map<String, MapRedStats> stats = SessionState.get().getMapRedStats();
        if (stats != null && !stats.isEmpty()) {
            long totalCpu = 0;
            console.printInfo("MapReduce Jobs Launched: ");
            for (Map.Entry<String, MapRedStats> entry : stats.entrySet()) {
                console.printInfo("Stage-" + entry.getKey() + ": " + entry.getValue());
                totalCpu += entry.getValue().getCpuMSec();
            }
            console.printInfo("Total MapReduce CPU Time Spent: " + Utilities.formatMsecToStr(totalCpu));
        }
        lDrvState.stateLock.lock();
        try {
            lDrvState.driverState = executionError ? DriverState.ERROR : DriverState.EXECUTED;
        } finally {
            lDrvState.stateLock.unlock();
        }
        if (lDrvState.isAborted()) {
            LOG.info("Executing command(queryId=" + queryId + ") has been interrupted after " + duration + " seconds");
        } else {
            LOG.info("Completed executing command(queryId=" + queryId + "); Time taken: " + duration + " seconds");
        }
    }
    if (console != null) {
        console.printInfo("OK");
    }
}
Also used : LinkedHashSet(java.util.LinkedHashSet) SessionState(org.apache.hadoop.hive.ql.session.SessionState) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) CommandProcessorResponse(org.apache.hadoop.hive.ql.processors.CommandProcessorResponse) PerfLogger(org.apache.hadoop.hive.ql.log.PerfLogger) PrivateHookContext(org.apache.hadoop.hive.ql.hooks.PrivateHookContext) HookContext(org.apache.hadoop.hive.ql.hooks.HookContext) HiveSemanticAnalyzerHookContext(org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContext) TaskRunner(org.apache.hadoop.hive.ql.exec.TaskRunner) Metrics(org.apache.hadoop.hive.common.metrics.common.Metrics) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity) LockException(org.apache.hadoop.hive.ql.lockmgr.LockException) IOException(java.io.IOException) ParseException(org.apache.hadoop.hive.ql.parse.ParseException) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) AuthorizationException(org.apache.hadoop.hive.ql.metadata.AuthorizationException) TaskResult(org.apache.hadoop.hive.ql.exec.TaskResult) PrivateHookContext(org.apache.hadoop.hive.ql.hooks.PrivateHookContext) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap)

Aggregations

PrivateHookContext (org.apache.hadoop.hive.ql.hooks.PrivateHookContext)2 ImmutableMap (com.google.common.collect.ImmutableMap)1 IOException (java.io.IOException)1 HashMap (java.util.HashMap)1 LinkedHashMap (java.util.LinkedHashMap)1 LinkedHashSet (java.util.LinkedHashSet)1 Map (java.util.Map)1 Metrics (org.apache.hadoop.hive.common.metrics.common.Metrics)1 HiveConf (org.apache.hadoop.hive.conf.HiveConf)1 QueryPlan (org.apache.hadoop.hive.ql.QueryPlan)1 TaskResult (org.apache.hadoop.hive.ql.exec.TaskResult)1 TaskRunner (org.apache.hadoop.hive.ql.exec.TaskRunner)1 TezTask (org.apache.hadoop.hive.ql.exec.tez.TezTask)1 HookContext (org.apache.hadoop.hive.ql.hooks.HookContext)1 WriteEntity (org.apache.hadoop.hive.ql.hooks.WriteEntity)1 LockException (org.apache.hadoop.hive.ql.lockmgr.LockException)1 PerfLogger (org.apache.hadoop.hive.ql.log.PerfLogger)1 AuthorizationException (org.apache.hadoop.hive.ql.metadata.AuthorizationException)1 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)1 HiveSemanticAnalyzerHookContext (org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContext)1