Search in sources :

Example 21 with Metrics

use of org.apache.hadoop.hive.common.metrics.common.Metrics in project hive by apache.

the class Driver method execute.

private void execute() throws CommandProcessorResponse {
    PerfLogger perfLogger = SessionState.getPerfLogger();
    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.DRIVER_EXECUTE);
    boolean noName = StringUtils.isEmpty(conf.get(MRJobConfig.JOB_NAME));
    int maxlen = conf.getIntVar(HiveConf.ConfVars.HIVEJOBNAMELENGTH);
    Metrics metrics = MetricsFactory.getInstance();
    String queryId = queryState.getQueryId();
    // Get the query string from the conf file as the compileInternal() method might
    // hide sensitive information during query redaction.
    String queryStr = conf.getQueryString();
    lDrvState.stateLock.lock();
    try {
        // a combined compile/execute in runInternal, throws the error
        if (lDrvState.driverState != DriverState.COMPILED && lDrvState.driverState != DriverState.EXECUTING) {
            SQLState = "HY008";
            errorMessage = "FAILED: unexpected driverstate: " + lDrvState + ", for query " + queryStr;
            console.printError(errorMessage);
            throw createProcessorResponse(1000);
        } else {
            lDrvState.driverState = DriverState.EXECUTING;
        }
    } finally {
        lDrvState.stateLock.unlock();
    }
    maxthreads = HiveConf.getIntVar(conf, HiveConf.ConfVars.EXECPARALLETHREADNUMBER);
    HookContext hookContext = null;
    // Whether there's any error occurred during query execution. Used for query lifetime hook.
    boolean executionError = false;
    try {
        LOG.info("Executing command(queryId=" + queryId + "): " + queryStr);
        // compile and execute can get called from different threads in case of HS2
        // so clear timing in this thread's Hive object before proceeding.
        Hive.get().clearMetaCallTiming();
        plan.setStarted();
        if (SessionState.get() != null) {
            SessionState.get().getHiveHistory().startQuery(queryStr, queryId);
            SessionState.get().getHiveHistory().logPlanProgress(plan);
        }
        resStream = null;
        SessionState ss = SessionState.get();
        hookContext = new PrivateHookContext(plan, queryState, ctx.getPathToCS(), SessionState.get().getUserName(), ss.getUserIpAddress(), InetAddress.getLocalHost().getHostAddress(), operationId, ss.getSessionId(), Thread.currentThread().getName(), ss.isHiveServerQuery(), perfLogger, queryInfo, ctx);
        hookContext.setHookType(HookContext.HookType.PRE_EXEC_HOOK);
        hookRunner.runPreHooks(hookContext);
        // Trigger query hooks before query execution.
        hookRunner.runBeforeExecutionHook(queryStr, hookContext);
        setQueryDisplays(plan.getRootTasks());
        int mrJobs = Utilities.getMRTasks(plan.getRootTasks()).size();
        int jobs = mrJobs + Utilities.getTezTasks(plan.getRootTasks()).size() + Utilities.getSparkTasks(plan.getRootTasks()).size();
        if (jobs > 0) {
            logMrWarning(mrJobs);
            console.printInfo("Query ID = " + queryId);
            console.printInfo("Total jobs = " + jobs);
        }
        if (SessionState.get() != null) {
            SessionState.get().getHiveHistory().setQueryProperty(queryId, Keys.QUERY_NUM_TASKS, String.valueOf(jobs));
            SessionState.get().getHiveHistory().setIdToTableMap(plan.getIdToTableNameMap());
        }
        String jobname = Utilities.abbreviate(queryStr, maxlen - 6);
        // A runtime that launches runnable tasks as separate Threads through
        // TaskRunners
        // As soon as a task isRunnable, it is put in a queue
        // At any time, at most maxthreads tasks can be running
        // The main thread polls the TaskRunners to check if they have finished.
        checkInterrupted("before running tasks.", hookContext, perfLogger);
        DriverContext driverCxt = new DriverContext(ctx);
        driverCxt.prepare(plan);
        ctx.setHDFSCleanup(true);
        // for canceling the query (should be bound to session?)
        this.driverCxt = driverCxt;
        SessionState.get().setMapRedStats(new LinkedHashMap<>());
        SessionState.get().setStackTraces(new HashMap<>());
        SessionState.get().setLocalMapRedErrors(new HashMap<>());
        // Add root Tasks to runnable
        for (Task<? extends Serializable> tsk : plan.getRootTasks()) {
            // incorrect results.
            assert tsk.getParentTasks() == null || tsk.getParentTasks().isEmpty();
            driverCxt.addToRunnable(tsk);
            if (metrics != null) {
                tsk.updateTaskMetrics(metrics);
            }
        }
        preExecutionCacheActions();
        perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.RUN_TASKS);
        // Loop while you either have tasks running, or tasks queued up
        while (driverCxt.isRunning()) {
            // Launch upto maxthreads tasks
            Task<? extends Serializable> task;
            while ((task = driverCxt.getRunnable(maxthreads)) != null) {
                TaskRunner runner = launchTask(task, queryId, noName, jobname, jobs, driverCxt);
                if (!runner.isRunning()) {
                    break;
                }
            }
            // poll the Tasks to see which one completed
            TaskRunner tskRun = driverCxt.pollFinished();
            if (tskRun == null) {
                continue;
            }
            /*
          This should be removed eventually. HIVE-17814 gives more detail
          explanation of whats happening and HIVE-17815 as to why this is done.
          Briefly for replication the graph is huge and so memory pressure is going to be huge if
          we keep a lot of references around.
        */
            String opName = plan.getOperationName();
            boolean isReplicationOperation = opName.equals(HiveOperation.REPLDUMP.getOperationName()) || opName.equals(HiveOperation.REPLLOAD.getOperationName());
            if (!isReplicationOperation) {
                hookContext.addCompleteTask(tskRun);
            }
            queryDisplay.setTaskResult(tskRun.getTask().getId(), tskRun.getTaskResult());
            Task<? extends Serializable> tsk = tskRun.getTask();
            TaskResult result = tskRun.getTaskResult();
            int exitVal = result.getExitVal();
            checkInterrupted("when checking the execution result.", hookContext, perfLogger);
            if (exitVal != 0) {
                Task<? extends Serializable> backupTask = tsk.getAndInitBackupTask();
                if (backupTask != null) {
                    setErrorMsgAndDetail(exitVal, result.getTaskError(), tsk);
                    console.printError(errorMessage);
                    errorMessage = "ATTEMPT: Execute BackupTask: " + backupTask.getClass().getName();
                    console.printError(errorMessage);
                    // add backup task to runnable
                    if (DriverContext.isLaunchable(backupTask)) {
                        driverCxt.addToRunnable(backupTask);
                    }
                    continue;
                } else {
                    setErrorMsgAndDetail(exitVal, result.getTaskError(), tsk);
                    if (driverCxt.isShutdown()) {
                        errorMessage = "FAILED: Operation cancelled. " + errorMessage;
                    }
                    invokeFailureHooks(perfLogger, hookContext, errorMessage + Strings.nullToEmpty(tsk.getDiagnosticsMessage()), result.getTaskError());
                    SQLState = "08S01";
                    // based on the ErrorMsg set in HiveException.
                    if (result.getTaskError() instanceof HiveException) {
                        ErrorMsg errorMsg = ((HiveException) result.getTaskError()).getCanonicalErrorMsg();
                        if (errorMsg != ErrorMsg.GENERIC_ERROR) {
                            SQLState = errorMsg.getSQLState();
                        }
                    }
                    console.printError(errorMessage);
                    driverCxt.shutdown();
                    // in case we decided to run everything in local mode, restore the
                    // the jobtracker setting to its initial value
                    ctx.restoreOriginalTracker();
                    throw createProcessorResponse(exitVal);
                }
            }
            driverCxt.finished(tskRun);
            if (SessionState.get() != null) {
                SessionState.get().getHiveHistory().setTaskProperty(queryId, tsk.getId(), Keys.TASK_RET_CODE, String.valueOf(exitVal));
                SessionState.get().getHiveHistory().endTask(queryId, tsk);
            }
            if (tsk.getChildTasks() != null) {
                for (Task<? extends Serializable> child : tsk.getChildTasks()) {
                    if (DriverContext.isLaunchable(child)) {
                        driverCxt.addToRunnable(child);
                    }
                }
            }
        }
        perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.RUN_TASKS);
        postExecutionCacheActions();
        // in case we decided to run everything in local mode, restore the
        // the jobtracker setting to its initial value
        ctx.restoreOriginalTracker();
        if (driverCxt.isShutdown()) {
            SQLState = "HY008";
            errorMessage = "FAILED: Operation cancelled";
            invokeFailureHooks(perfLogger, hookContext, errorMessage, null);
            console.printError(errorMessage);
            throw createProcessorResponse(1000);
        }
        // remove incomplete outputs.
        // Some incomplete outputs may be added at the beginning, for eg: for dynamic partitions.
        // remove them
        HashSet<WriteEntity> remOutputs = new LinkedHashSet<WriteEntity>();
        for (WriteEntity output : plan.getOutputs()) {
            if (!output.isComplete()) {
                remOutputs.add(output);
            }
        }
        for (WriteEntity output : remOutputs) {
            plan.getOutputs().remove(output);
        }
        hookContext.setHookType(HookContext.HookType.POST_EXEC_HOOK);
        hookRunner.runPostExecHooks(hookContext);
        if (SessionState.get() != null) {
            SessionState.get().getHiveHistory().setQueryProperty(queryId, Keys.QUERY_RET_CODE, String.valueOf(0));
            SessionState.get().getHiveHistory().printRowCount(queryId);
        }
        releasePlan(plan);
    } catch (CommandProcessorResponse cpr) {
        executionError = true;
        throw cpr;
    } catch (Throwable e) {
        executionError = true;
        checkInterrupted("during query execution: \n" + e.getMessage(), hookContext, perfLogger);
        ctx.restoreOriginalTracker();
        if (SessionState.get() != null) {
            SessionState.get().getHiveHistory().setQueryProperty(queryId, Keys.QUERY_RET_CODE, String.valueOf(12));
        }
        // TODO: do better with handling types of Exception here
        errorMessage = "FAILED: Hive Internal Error: " + Utilities.getNameMessage(e);
        if (hookContext != null) {
            try {
                invokeFailureHooks(perfLogger, hookContext, errorMessage, e);
            } catch (Exception t) {
                LOG.warn("Failed to invoke failure hook", t);
            }
        }
        SQLState = "08S01";
        downstreamError = e;
        console.printError(errorMessage + "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
        throw createProcessorResponse(12);
    } finally {
        // Trigger query hooks after query completes its execution.
        try {
            hookRunner.runAfterExecutionHook(queryStr, hookContext, executionError);
        } catch (Exception e) {
            LOG.warn("Failed when invoking query after execution hook", e);
        }
        if (SessionState.get() != null) {
            SessionState.get().getHiveHistory().endQuery(queryId);
        }
        if (noName) {
            conf.set(MRJobConfig.JOB_NAME, "");
        }
        double duration = perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.DRIVER_EXECUTE) / 1000.00;
        ImmutableMap<String, Long> executionHMSTimings = dumpMetaCallTimingWithoutEx("execution");
        queryDisplay.setHmsTimings(QueryDisplay.Phase.EXECUTION, executionHMSTimings);
        Map<String, MapRedStats> stats = SessionState.get().getMapRedStats();
        if (stats != null && !stats.isEmpty()) {
            long totalCpu = 0;
            console.printInfo("MapReduce Jobs Launched: ");
            for (Map.Entry<String, MapRedStats> entry : stats.entrySet()) {
                console.printInfo("Stage-" + entry.getKey() + ": " + entry.getValue());
                totalCpu += entry.getValue().getCpuMSec();
            }
            console.printInfo("Total MapReduce CPU Time Spent: " + Utilities.formatMsecToStr(totalCpu));
        }
        lDrvState.stateLock.lock();
        try {
            lDrvState.driverState = executionError ? DriverState.ERROR : DriverState.EXECUTED;
        } finally {
            lDrvState.stateLock.unlock();
        }
        if (lDrvState.isAborted()) {
            LOG.info("Executing command(queryId=" + queryId + ") has been interrupted after " + duration + " seconds");
        } else {
            LOG.info("Completed executing command(queryId=" + queryId + "); Time taken: " + duration + " seconds");
        }
    }
    if (console != null) {
        console.printInfo("OK");
    }
}
Also used : LinkedHashSet(java.util.LinkedHashSet) SessionState(org.apache.hadoop.hive.ql.session.SessionState) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) CommandProcessorResponse(org.apache.hadoop.hive.ql.processors.CommandProcessorResponse) PerfLogger(org.apache.hadoop.hive.ql.log.PerfLogger) PrivateHookContext(org.apache.hadoop.hive.ql.hooks.PrivateHookContext) HookContext(org.apache.hadoop.hive.ql.hooks.HookContext) HiveSemanticAnalyzerHookContext(org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContext) TaskRunner(org.apache.hadoop.hive.ql.exec.TaskRunner) Metrics(org.apache.hadoop.hive.common.metrics.common.Metrics) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity) LockException(org.apache.hadoop.hive.ql.lockmgr.LockException) IOException(java.io.IOException) ParseException(org.apache.hadoop.hive.ql.parse.ParseException) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) AuthorizationException(org.apache.hadoop.hive.ql.metadata.AuthorizationException) TaskResult(org.apache.hadoop.hive.ql.exec.TaskResult) PrivateHookContext(org.apache.hadoop.hive.ql.hooks.PrivateHookContext) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap)

Example 22 with Metrics

use of org.apache.hadoop.hive.common.metrics.common.Metrics in project hive by apache.

the class OperationManager method removeTimedOutOperation.

private Operation removeTimedOutOperation(OperationHandle operationHandle) {
    Operation operation = handleToOperation.get(operationHandle);
    if (operation != null && operation.isTimedOut(System.currentTimeMillis())) {
        LOG.info("Operation is timed out,operation=" + operation.getHandle() + ",state=" + operation.getState().toString());
        Metrics metrics = MetricsFactory.getInstance();
        if (metrics != null) {
            try {
                metrics.decrementCounter(MetricsConstant.OPEN_OPERATIONS);
            } catch (Exception e) {
                LOG.warn("Error decrementing open_operations metric, reported values may be incorrect", e);
            }
        }
        return removeOperation(operationHandle);
    }
    return null;
}
Also used : Metrics(org.apache.hadoop.hive.common.metrics.common.Metrics) SQLException(java.sql.SQLException) HiveSQLException(org.apache.hive.service.cli.HiveSQLException)

Example 23 with Metrics

use of org.apache.hadoop.hive.common.metrics.common.Metrics in project hive by apache.

the class SQLOperation method onNewState.

@Override
protected void onNewState(OperationState state, OperationState prevState) {
    super.onNewState(state, prevState);
    currentSQLStateScope = updateOperationStateMetrics(currentSQLStateScope, MetricsConstant.SQL_OPERATION_PREFIX, MetricsConstant.COMPLETED_SQL_OPERATION_PREFIX, state);
    Metrics metrics = MetricsFactory.getInstance();
    if (metrics != null) {
        // New state is changed to running from something else (user is active)
        if (state == OperationState.RUNNING && prevState != state) {
            incrementUserQueries(metrics);
        }
        // New state is not running (user not active) any more
        if (prevState == OperationState.RUNNING && prevState != state) {
            decrementUserQueries(metrics);
        }
    }
    if (state == OperationState.FINISHED || state == OperationState.CANCELED || state == OperationState.ERROR) {
        // update runtime
        queryInfo.setRuntime(getOperationComplete() - getOperationStart());
        if (metrics != null && submittedQryScp != null) {
            metrics.endScope(submittedQryScp);
        }
    }
    if (state == OperationState.CLOSED) {
        queryInfo.setEndTime();
    } else {
        // CLOSED state not interesting, state before (FINISHED, ERROR) is.
        queryInfo.updateState(state.toString());
    }
    if (state == OperationState.ERROR) {
        markQueryMetric(MetricsFactory.getInstance(), MetricsConstant.HS2_FAILED_QUERIES);
    }
    if (state == OperationState.FINISHED) {
        markQueryMetric(MetricsFactory.getInstance(), MetricsConstant.HS2_SUCCEEDED_QUERIES);
    }
}
Also used : Metrics(org.apache.hadoop.hive.common.metrics.common.Metrics)

Aggregations

Metrics (org.apache.hadoop.hive.common.metrics.common.Metrics)23 IOException (java.io.IOException)5 ArrayList (java.util.ArrayList)5 HiveSQLException (org.apache.hive.service.cli.HiveSQLException)4 Test (org.junit.Test)4 LockException (org.apache.hadoop.hive.ql.lockmgr.LockException)3 PerfLogger (org.apache.hadoop.hive.ql.log.PerfLogger)3 ImmutableMap (com.google.common.collect.ImmutableMap)2 SQLException (java.sql.SQLException)2 HashMap (java.util.HashMap)2 LinkedHashMap (java.util.LinkedHashMap)2 LinkedHashSet (java.util.LinkedHashSet)2 Map (java.util.Map)2 MetricsFactory (org.apache.hadoop.hive.common.metrics.common.MetricsFactory)2 MetricsScope (org.apache.hadoop.hive.common.metrics.common.MetricsScope)2 TaskResult (org.apache.hadoop.hive.ql.exec.TaskResult)2 TaskRunner (org.apache.hadoop.hive.ql.exec.TaskRunner)2 HookContext (org.apache.hadoop.hive.ql.hooks.HookContext)2 WriteEntity (org.apache.hadoop.hive.ql.hooks.WriteEntity)2 AuthorizationException (org.apache.hadoop.hive.ql.metadata.AuthorizationException)2