Search in sources :

Example 1 with Metrics

use of org.apache.hadoop.hive.common.metrics.common.Metrics in project hive by apache.

the class Driver method execute.

public int execute(boolean deferClose) throws CommandNeedRetryException {
    PerfLogger perfLogger = SessionState.getPerfLogger();
    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.DRIVER_EXECUTE);
    boolean noName = StringUtils.isEmpty(conf.get(MRJobConfig.JOB_NAME));
    int maxlen = conf.getIntVar(HiveConf.ConfVars.HIVEJOBNAMELENGTH);
    Metrics metrics = MetricsFactory.getInstance();
    String queryId = conf.getVar(HiveConf.ConfVars.HIVEQUERYID);
    // Get the query string from the conf file as the compileInternal() method might
    // hide sensitive information during query redaction.
    String queryStr = conf.getQueryString();
    lDrvState.stateLock.lock();
    try {
        // a combined compile/execute in runInternal, throws the error
        if (lDrvState.driverState != DriverState.COMPILED && lDrvState.driverState != DriverState.EXECUTING) {
            SQLState = "HY008";
            errorMessage = "FAILED: query " + queryStr + " has " + (lDrvState.driverState == DriverState.INTERRUPT ? "been cancelled" : "not been compiled.");
            console.printError(errorMessage);
            return 1000;
        } else {
            lDrvState.driverState = DriverState.EXECUTING;
        }
    } finally {
        lDrvState.stateLock.unlock();
    }
    maxthreads = HiveConf.getIntVar(conf, HiveConf.ConfVars.EXECPARALLETHREADNUMBER);
    HookContext hookContext = null;
    // Whether there's any error occurred during query execution. Used for query lifetime hook.
    boolean executionError = false;
    try {
        LOG.info("Executing command(queryId=" + queryId + "): " + queryStr);
        // compile and execute can get called from different threads in case of HS2
        // so clear timing in this thread's Hive object before proceeding.
        Hive.get().clearMetaCallTiming();
        plan.setStarted();
        if (SessionState.get() != null) {
            SessionState.get().getHiveHistory().startQuery(queryStr, conf.getVar(HiveConf.ConfVars.HIVEQUERYID));
            SessionState.get().getHiveHistory().logPlanProgress(plan);
        }
        resStream = null;
        SessionState ss = SessionState.get();
        hookContext = new HookContext(plan, queryState, ctx.getPathToCS(), ss.getUserFromAuthenticator(), ss.getUserIpAddress(), InetAddress.getLocalHost().getHostAddress(), operationId, ss.getSessionId(), Thread.currentThread().getName(), ss.isHiveServerQuery(), perfLogger);
        hookContext.setHookType(HookContext.HookType.PRE_EXEC_HOOK);
        for (Hook peh : getHooks(HiveConf.ConfVars.PREEXECHOOKS)) {
            if (peh instanceof ExecuteWithHookContext) {
                perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.PRE_HOOK + peh.getClass().getName());
                ((ExecuteWithHookContext) peh).run(hookContext);
                perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.PRE_HOOK + peh.getClass().getName());
            } else if (peh instanceof PreExecute) {
                perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.PRE_HOOK + peh.getClass().getName());
                ((PreExecute) peh).run(SessionState.get(), plan.getInputs(), plan.getOutputs(), Utils.getUGI());
                perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.PRE_HOOK + peh.getClass().getName());
            }
        }
        // Trigger query hooks before query execution.
        if (queryHooks != null && !queryHooks.isEmpty()) {
            QueryLifeTimeHookContext qhc = new QueryLifeTimeHookContextImpl();
            qhc.setHiveConf(conf);
            qhc.setCommand(queryStr);
            qhc.setHookContext(hookContext);
            for (QueryLifeTimeHook hook : queryHooks) {
                hook.beforeExecution(qhc);
            }
        }
        setQueryDisplays(plan.getRootTasks());
        int mrJobs = Utilities.getMRTasks(plan.getRootTasks()).size();
        int jobs = mrJobs + Utilities.getTezTasks(plan.getRootTasks()).size() + Utilities.getSparkTasks(plan.getRootTasks()).size();
        if (jobs > 0) {
            logMrWarning(mrJobs);
            console.printInfo("Query ID = " + queryId);
            console.printInfo("Total jobs = " + jobs);
        }
        if (SessionState.get() != null) {
            SessionState.get().getHiveHistory().setQueryProperty(queryId, Keys.QUERY_NUM_TASKS, String.valueOf(jobs));
            SessionState.get().getHiveHistory().setIdToTableMap(plan.getIdToTableNameMap());
        }
        String jobname = Utilities.abbreviate(queryStr, maxlen - 6);
        if (isInterrupted()) {
            return handleInterruption("before running tasks.");
        }
        DriverContext driverCxt = new DriverContext(ctx);
        driverCxt.prepare(plan);
        ctx.setHDFSCleanup(true);
        // for canceling the query (should be bound to session?)
        this.driverCxt = driverCxt;
        SessionState.get().setMapRedStats(new LinkedHashMap<String, MapRedStats>());
        SessionState.get().setStackTraces(new HashMap<String, List<List<String>>>());
        SessionState.get().setLocalMapRedErrors(new HashMap<String, List<String>>());
        // Add root Tasks to runnable
        for (Task<? extends Serializable> tsk : plan.getRootTasks()) {
            // incorrect results.
            assert tsk.getParentTasks() == null || tsk.getParentTasks().isEmpty();
            driverCxt.addToRunnable(tsk);
            if (metrics != null) {
                tsk.updateTaskMetrics(metrics);
            }
        }
        perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.RUN_TASKS);
        // Loop while you either have tasks running, or tasks queued up
        while (driverCxt.isRunning()) {
            // Launch upto maxthreads tasks
            Task<? extends Serializable> task;
            while ((task = driverCxt.getRunnable(maxthreads)) != null) {
                TaskRunner runner = launchTask(task, queryId, noName, jobname, jobs, driverCxt);
                if (!runner.isRunning()) {
                    break;
                }
            }
            // poll the Tasks to see which one completed
            TaskRunner tskRun = driverCxt.pollFinished();
            if (tskRun == null) {
                continue;
            }
            hookContext.addCompleteTask(tskRun);
            queryDisplay.setTaskResult(tskRun.getTask().getId(), tskRun.getTaskResult());
            Task<? extends Serializable> tsk = tskRun.getTask();
            TaskResult result = tskRun.getTaskResult();
            int exitVal = result.getExitVal();
            if (isInterrupted()) {
                return handleInterruption("when checking the execution result.");
            }
            if (exitVal != 0) {
                if (tsk.ifRetryCmdWhenFail()) {
                    driverCxt.shutdown();
                    // in case we decided to run everything in local mode, restore the
                    // the jobtracker setting to its initial value
                    ctx.restoreOriginalTracker();
                    throw new CommandNeedRetryException();
                }
                Task<? extends Serializable> backupTask = tsk.getAndInitBackupTask();
                if (backupTask != null) {
                    setErrorMsgAndDetail(exitVal, result.getTaskError(), tsk);
                    console.printError(errorMessage);
                    errorMessage = "ATTEMPT: Execute BackupTask: " + backupTask.getClass().getName();
                    console.printError(errorMessage);
                    // add backup task to runnable
                    if (DriverContext.isLaunchable(backupTask)) {
                        driverCxt.addToRunnable(backupTask);
                    }
                    continue;
                } else {
                    setErrorMsgAndDetail(exitVal, result.getTaskError(), tsk);
                    invokeFailureHooks(perfLogger, hookContext, errorMessage + Strings.nullToEmpty(tsk.getDiagnosticsMessage()), result.getTaskError());
                    SQLState = "08S01";
                    console.printError(errorMessage);
                    driverCxt.shutdown();
                    // in case we decided to run everything in local mode, restore the
                    // the jobtracker setting to its initial value
                    ctx.restoreOriginalTracker();
                    return exitVal;
                }
            }
            driverCxt.finished(tskRun);
            if (SessionState.get() != null) {
                SessionState.get().getHiveHistory().setTaskProperty(queryId, tsk.getId(), Keys.TASK_RET_CODE, String.valueOf(exitVal));
                SessionState.get().getHiveHistory().endTask(queryId, tsk);
            }
            if (tsk.getChildTasks() != null) {
                for (Task<? extends Serializable> child : tsk.getChildTasks()) {
                    if (DriverContext.isLaunchable(child)) {
                        driverCxt.addToRunnable(child);
                    }
                }
            }
        }
        perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.RUN_TASKS);
        // in case we decided to run everything in local mode, restore the
        // the jobtracker setting to its initial value
        ctx.restoreOriginalTracker();
        if (driverCxt.isShutdown()) {
            SQLState = "HY008";
            errorMessage = "FAILED: Operation cancelled";
            invokeFailureHooks(perfLogger, hookContext, errorMessage, null);
            console.printError(errorMessage);
            return 1000;
        }
        // remove incomplete outputs.
        // Some incomplete outputs may be added at the beginning, for eg: for dynamic partitions.
        // remove them
        HashSet<WriteEntity> remOutputs = new LinkedHashSet<WriteEntity>();
        for (WriteEntity output : plan.getOutputs()) {
            if (!output.isComplete()) {
                remOutputs.add(output);
            }
        }
        for (WriteEntity output : remOutputs) {
            plan.getOutputs().remove(output);
        }
        hookContext.setHookType(HookContext.HookType.POST_EXEC_HOOK);
        // Get all the post execution hooks and execute them.
        for (Hook peh : getHooks(HiveConf.ConfVars.POSTEXECHOOKS)) {
            if (peh instanceof ExecuteWithHookContext) {
                perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.POST_HOOK + peh.getClass().getName());
                ((ExecuteWithHookContext) peh).run(hookContext);
                perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.POST_HOOK + peh.getClass().getName());
            } else if (peh instanceof PostExecute) {
                perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.POST_HOOK + peh.getClass().getName());
                ((PostExecute) peh).run(SessionState.get(), plan.getInputs(), plan.getOutputs(), (SessionState.get() != null ? SessionState.get().getLineageState().getLineageInfo() : null), Utils.getUGI());
                perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.POST_HOOK + peh.getClass().getName());
            }
        }
        if (SessionState.get() != null) {
            SessionState.get().getHiveHistory().setQueryProperty(queryId, Keys.QUERY_RET_CODE, String.valueOf(0));
            SessionState.get().getHiveHistory().printRowCount(queryId);
        }
        releasePlan(plan);
    } catch (CommandNeedRetryException e) {
        executionError = true;
        throw e;
    } catch (Throwable e) {
        executionError = true;
        if (isInterrupted()) {
            return handleInterruption("during query execution: \n" + e.getMessage());
        }
        ctx.restoreOriginalTracker();
        if (SessionState.get() != null) {
            SessionState.get().getHiveHistory().setQueryProperty(queryId, Keys.QUERY_RET_CODE, String.valueOf(12));
        }
        // TODO: do better with handling types of Exception here
        errorMessage = "FAILED: Hive Internal Error: " + Utilities.getNameMessage(e);
        if (hookContext != null) {
            try {
                invokeFailureHooks(perfLogger, hookContext, errorMessage, e);
            } catch (Exception t) {
                LOG.warn("Failed to invoke failure hook", t);
            }
        }
        SQLState = "08S01";
        downstreamError = e;
        console.printError(errorMessage + "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
        return (12);
    } finally {
        // Trigger query hooks after query completes its execution.
        try {
            if (queryHooks != null && !queryHooks.isEmpty()) {
                QueryLifeTimeHookContext qhc = new QueryLifeTimeHookContextImpl();
                qhc.setHiveConf(conf);
                qhc.setCommand(queryStr);
                qhc.setHookContext(hookContext);
                for (QueryLifeTimeHook hook : queryHooks) {
                    hook.afterExecution(qhc, executionError);
                }
            }
        } catch (Exception e) {
            LOG.warn("Failed when invoking query after execution hook", e);
        }
        if (SessionState.get() != null) {
            SessionState.get().getHiveHistory().endQuery(queryId);
        }
        if (noName) {
            conf.set(MRJobConfig.JOB_NAME, "");
        }
        double duration = perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.DRIVER_EXECUTE) / 1000.00;
        ImmutableMap<String, Long> executionHMSTimings = dumpMetaCallTimingWithoutEx("execution");
        queryDisplay.setHmsTimings(QueryDisplay.Phase.EXECUTION, executionHMSTimings);
        Map<String, MapRedStats> stats = SessionState.get().getMapRedStats();
        if (stats != null && !stats.isEmpty()) {
            long totalCpu = 0;
            console.printInfo("MapReduce Jobs Launched: ");
            for (Map.Entry<String, MapRedStats> entry : stats.entrySet()) {
                console.printInfo("Stage-" + entry.getKey() + ": " + entry.getValue());
                totalCpu += entry.getValue().getCpuMSec();
            }
            console.printInfo("Total MapReduce CPU Time Spent: " + Utilities.formatMsecToStr(totalCpu));
        }
        boolean isInterrupted = isInterrupted();
        if (isInterrupted && !deferClose) {
            closeInProcess(true);
        }
        lDrvState.stateLock.lock();
        try {
            if (isInterrupted) {
                if (!deferClose) {
                    lDrvState.driverState = DriverState.ERROR;
                }
            } else {
                lDrvState.driverState = executionError ? DriverState.ERROR : DriverState.EXECUTED;
            }
        } finally {
            lDrvState.stateLock.unlock();
        }
        if (isInterrupted) {
            LOG.info("Executing command(queryId=" + queryId + ") has been interrupted after " + duration + " seconds");
        } else {
            LOG.info("Completed executing command(queryId=" + queryId + "); Time taken: " + duration + " seconds");
        }
    }
    if (console != null) {
        console.printInfo("OK");
    }
    return (0);
}
Also used : LinkedHashSet(java.util.LinkedHashSet) SessionState(org.apache.hadoop.hive.ql.session.SessionState) QueryLifeTimeHookContextImpl(org.apache.hadoop.hive.ql.hooks.QueryLifeTimeHookContextImpl) PerfLogger(org.apache.hadoop.hive.ql.log.PerfLogger) ExecuteWithHookContext(org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext) HookContext(org.apache.hadoop.hive.ql.hooks.HookContext) QueryLifeTimeHookContext(org.apache.hadoop.hive.ql.hooks.QueryLifeTimeHookContext) HiveSemanticAnalyzerHookContext(org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContext) QueryLifeTimeHook(org.apache.hadoop.hive.ql.hooks.QueryLifeTimeHook) MetricsQueryLifeTimeHook(org.apache.hadoop.hive.ql.hooks.MetricsQueryLifeTimeHook) TaskRunner(org.apache.hadoop.hive.ql.exec.TaskRunner) Metrics(org.apache.hadoop.hive.common.metrics.common.Metrics) PrunedPartitionList(org.apache.hadoop.hive.ql.parse.PrunedPartitionList) ArrayList(java.util.ArrayList) ValidTxnList(org.apache.hadoop.hive.common.ValidTxnList) List(java.util.List) LinkedList(java.util.LinkedList) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity) HiveSemanticAnalyzerHook(org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHook) QueryLifeTimeHook(org.apache.hadoop.hive.ql.hooks.QueryLifeTimeHook) Hook(org.apache.hadoop.hive.ql.hooks.Hook) MetricsQueryLifeTimeHook(org.apache.hadoop.hive.ql.hooks.MetricsQueryLifeTimeHook) PreExecute(org.apache.hadoop.hive.ql.hooks.PreExecute) LockException(org.apache.hadoop.hive.ql.lockmgr.LockException) IOException(java.io.IOException) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) AuthorizationException(org.apache.hadoop.hive.ql.metadata.AuthorizationException) PostExecute(org.apache.hadoop.hive.ql.hooks.PostExecute) ExecuteWithHookContext(org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext) TaskResult(org.apache.hadoop.hive.ql.exec.TaskResult) QueryLifeTimeHookContext(org.apache.hadoop.hive.ql.hooks.QueryLifeTimeHookContext) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap)

Example 2 with Metrics

use of org.apache.hadoop.hive.common.metrics.common.Metrics in project hive by apache.

the class ThriftBinaryCLIService method run.

@Override
public void run() {
    try {
        // Server thread pool
        String threadPoolName = "HiveServer2-Handler-Pool";
        ExecutorService executorService = new ThreadPoolExecutorWithOomHook(minWorkerThreads, maxWorkerThreads, workerKeepAliveTime, TimeUnit.SECONDS, new SynchronousQueue<Runnable>(), new ThreadFactoryWithGarbageCleanup(threadPoolName), oomHook);
        // Thrift configs
        hiveAuthFactory = new HiveAuthFactory(hiveConf);
        TTransportFactory transportFactory = hiveAuthFactory.getAuthTransFactory();
        TProcessorFactory processorFactory = hiveAuthFactory.getAuthProcFactory(this);
        TServerSocket serverSocket = null;
        List<String> sslVersionBlacklist = new ArrayList<String>();
        for (String sslVersion : hiveConf.getVar(ConfVars.HIVE_SSL_PROTOCOL_BLACKLIST).split(",")) {
            sslVersionBlacklist.add(sslVersion);
        }
        if (!hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_USE_SSL)) {
            serverSocket = HiveAuthUtils.getServerSocket(hiveHost, portNum);
        } else {
            String keyStorePath = hiveConf.getVar(ConfVars.HIVE_SERVER2_SSL_KEYSTORE_PATH).trim();
            if (keyStorePath.isEmpty()) {
                throw new IllegalArgumentException(ConfVars.HIVE_SERVER2_SSL_KEYSTORE_PATH.varname + " Not configured for SSL connection");
            }
            String keyStorePassword = ShimLoader.getHadoopShims().getPassword(hiveConf, HiveConf.ConfVars.HIVE_SERVER2_SSL_KEYSTORE_PASSWORD.varname);
            serverSocket = HiveAuthUtils.getServerSSLSocket(hiveHost, portNum, keyStorePath, keyStorePassword, sslVersionBlacklist);
        }
        // Server args
        int maxMessageSize = hiveConf.getIntVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_MAX_MESSAGE_SIZE);
        int requestTimeout = (int) hiveConf.getTimeVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_LOGIN_TIMEOUT, TimeUnit.SECONDS);
        int beBackoffSlotLength = (int) hiveConf.getTimeVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_LOGIN_BEBACKOFF_SLOT_LENGTH, TimeUnit.MILLISECONDS);
        TThreadPoolServer.Args sargs = new TThreadPoolServer.Args(serverSocket).processorFactory(processorFactory).transportFactory(transportFactory).protocolFactory(new TBinaryProtocol.Factory()).inputProtocolFactory(new TBinaryProtocol.Factory(true, true, maxMessageSize, maxMessageSize)).requestTimeout(requestTimeout).requestTimeoutUnit(TimeUnit.SECONDS).beBackoffSlotLength(beBackoffSlotLength).beBackoffSlotLengthUnit(TimeUnit.MILLISECONDS).executorService(executorService);
        // TCP Server
        server = new TThreadPoolServer(sargs);
        server.setServerEventHandler(new TServerEventHandler() {

            @Override
            public ServerContext createContext(TProtocol input, TProtocol output) {
                Metrics metrics = MetricsFactory.getInstance();
                if (metrics != null) {
                    try {
                        metrics.incrementCounter(MetricsConstant.OPEN_CONNECTIONS);
                        metrics.incrementCounter(MetricsConstant.CUMULATIVE_CONNECTION_COUNT);
                    } catch (Exception e) {
                        LOG.warn("Error Reporting JDO operation to Metrics system", e);
                    }
                }
                return new ThriftCLIServerContext();
            }

            @Override
            public void deleteContext(ServerContext serverContext, TProtocol input, TProtocol output) {
                Metrics metrics = MetricsFactory.getInstance();
                if (metrics != null) {
                    try {
                        metrics.decrementCounter(MetricsConstant.OPEN_CONNECTIONS);
                    } catch (Exception e) {
                        LOG.warn("Error Reporting JDO operation to Metrics system", e);
                    }
                }
                ThriftCLIServerContext context = (ThriftCLIServerContext) serverContext;
                SessionHandle sessionHandle = context.getSessionHandle();
                if (sessionHandle != null) {
                    LOG.info("Session disconnected without closing properly. ");
                    try {
                        boolean close = cliService.getSessionManager().getSession(sessionHandle).getHiveConf().getBoolVar(ConfVars.HIVE_SERVER2_CLOSE_SESSION_ON_DISCONNECT);
                        LOG.info((close ? "" : "Not ") + "Closing the session: " + sessionHandle);
                        if (close) {
                            cliService.closeSession(sessionHandle);
                        }
                    } catch (HiveSQLException e) {
                        LOG.warn("Failed to close session: " + e, e);
                    }
                }
            }

            @Override
            public void preServe() {
            }

            @Override
            public void processContext(ServerContext serverContext, TTransport input, TTransport output) {
                currentServerContext.set(serverContext);
            }
        });
        String msg = "Starting " + ThriftBinaryCLIService.class.getSimpleName() + " on port " + portNum + " with " + minWorkerThreads + "..." + maxWorkerThreads + " worker threads";
        LOG.info(msg);
        server.serve();
    } catch (Throwable t) {
        LOG.error("Error starting HiveServer2: could not start " + ThriftBinaryCLIService.class.getSimpleName(), t);
        System.exit(-1);
    }
}
Also used : ThreadFactoryWithGarbageCleanup(org.apache.hive.service.server.ThreadFactoryWithGarbageCleanup) TServerEventHandler(org.apache.thrift.server.TServerEventHandler) ArrayList(java.util.ArrayList) HiveAuthFactory(org.apache.hive.service.auth.HiveAuthFactory) TProcessorFactory(org.apache.thrift.TProcessorFactory) MetricsFactory(org.apache.hadoop.hive.common.metrics.common.MetricsFactory) TTransportFactory(org.apache.thrift.transport.TTransportFactory) TProcessorFactory(org.apache.thrift.TProcessorFactory) TServerSocket(org.apache.thrift.transport.TServerSocket) Metrics(org.apache.hadoop.hive.common.metrics.common.Metrics) TProtocol(org.apache.thrift.protocol.TProtocol) HiveSQLException(org.apache.hive.service.cli.HiveSQLException) SessionHandle(org.apache.hive.service.cli.SessionHandle) TTransportFactory(org.apache.thrift.transport.TTransportFactory) HiveSQLException(org.apache.hive.service.cli.HiveSQLException) TBinaryProtocol(org.apache.thrift.protocol.TBinaryProtocol) ServerContext(org.apache.thrift.server.ServerContext) ExecutorService(java.util.concurrent.ExecutorService) HiveAuthFactory(org.apache.hive.service.auth.HiveAuthFactory) TTransport(org.apache.thrift.transport.TTransport) TThreadPoolServer(org.apache.thrift.server.TThreadPoolServer)

Example 3 with Metrics

use of org.apache.hadoop.hive.common.metrics.common.Metrics in project hive by apache.

the class Driver method compileInternal.

private void compileInternal(String command, boolean deferClose) throws CommandProcessorException {
    Metrics metrics = MetricsFactory.getInstance();
    if (metrics != null) {
        metrics.incrementCounter(MetricsConstant.WAITING_COMPILE_OPS, 1);
    }
    PerfLogger perfLogger = SessionState.getPerfLogger(true);
    perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.WAIT_COMPILE);
    try (CompileLock compileLock = CompileLockFactory.newInstance(driverContext.getConf(), command)) {
        boolean success = compileLock.tryAcquire();
        perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.WAIT_COMPILE);
        if (metrics != null) {
            metrics.decrementCounter(MetricsConstant.WAITING_COMPILE_OPS, 1);
        }
        if (!success) {
            String errorMessage = ErrorMsg.COMPILE_LOCK_TIMED_OUT.getErrorCodedMsg();
            throw DriverUtils.createProcessorException(driverContext, ErrorMsg.COMPILE_LOCK_TIMED_OUT.getErrorCode(), errorMessage, null, null);
        }
        try {
            compile(command, true, deferClose);
        } catch (CommandProcessorException cpe) {
            try {
                driverTxnHandler.endTransactionAndCleanup(false);
            } catch (LockException e) {
                LOG.warn("Exception in releasing locks", e);
            }
            throw cpe;
        }
    }
    // Save compile-time PerfLogging for WebUI.
    // Execution-time Perf logs are done by either another thread's PerfLogger or a reset PerfLogger.
    driverContext.getQueryDisplay().setPerfLogStarts(QueryDisplay.Phase.COMPILATION, perfLogger.getStartTimes());
    driverContext.getQueryDisplay().setPerfLogEnds(QueryDisplay.Phase.COMPILATION, perfLogger.getEndTimes());
}
Also used : Metrics(org.apache.hadoop.hive.common.metrics.common.Metrics) CommandProcessorException(org.apache.hadoop.hive.ql.processors.CommandProcessorException) LockException(org.apache.hadoop.hive.ql.lockmgr.LockException) PerfLogger(org.apache.hadoop.hive.ql.log.PerfLogger) CompileLock(org.apache.hadoop.hive.ql.lock.CompileLock)

Example 4 with Metrics

use of org.apache.hadoop.hive.common.metrics.common.Metrics in project hive by apache.

the class WmPoolMetrics method initAfterRegister.

public void initAfterRegister() {
    // Make sure we capture the same metrics as Hadoop2 metrics system, via annotations.
    if (allMetrics != null)
        return;
    allMetrics = new HashMap<>();
    for (Field field : this.getClass().getDeclaredFields()) {
        for (Annotation annotation : field.getAnnotations()) {
            if (!(annotation instanceof Metric))
                continue;
            try {
                field.setAccessible(true);
                allMetrics.put(field.getName(), (MutableMetric) field.get(this));
            } catch (IllegalAccessException ex) {
                // Not expected, access by the same class.
                break;
            }
            break;
        }
    }
    // Set up codahale if enabled; we cannot tag the values so just prefix them for the JMX view.
    Metrics chMetrics = MetricsFactory.getInstance();
    if (!(chMetrics instanceof CodahaleMetrics))
        return;
    List<String> codahaleNames = new ArrayList<>();
    for (Map.Entry<String, MutableMetric> e : allMetrics.entrySet()) {
        MutableMetric metric = e.getValue();
        MetricsVariable<?> var = null;
        if (metric instanceof MutableCounterInt) {
            var = new CodahaleCounterWrapper((MutableCounterInt) metric);
        } else if (metric instanceof MutableGaugeInt) {
            var = new CodahaleGaugeWrapper((MutableGaugeInt) metric);
        }
        // Unexpected metric type.
        if (var == null)
            continue;
        String name = "WM_" + poolName + "_" + e.getKey();
        codahaleNames.add(name);
        chMetrics.addGauge(name, var);
    }
    this.codahaleGaugeNames = codahaleNames;
}
Also used : MutableCounterInt(org.apache.hadoop.metrics2.lib.MutableCounterInt) ArrayList(java.util.ArrayList) Annotation(java.lang.annotation.Annotation) CodahaleMetrics(org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics) Field(java.lang.reflect.Field) Metrics(org.apache.hadoop.hive.common.metrics.common.Metrics) CodahaleMetrics(org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics) MutableMetric(org.apache.hadoop.metrics2.lib.MutableMetric) MutableGaugeInt(org.apache.hadoop.metrics2.lib.MutableGaugeInt) MutableMetric(org.apache.hadoop.metrics2.lib.MutableMetric) Metric(org.apache.hadoop.metrics2.annotation.Metric) HashMap(java.util.HashMap) Map(java.util.Map)

Example 5 with Metrics

use of org.apache.hadoop.hive.common.metrics.common.Metrics in project hive by apache.

the class PerfLogger method cleanupPerfLogMetrics.

/**
 * Cleans up any dangling perfLog metric call scopes.
 */
public void cleanupPerfLogMetrics() {
    Metrics metrics = MetricsFactory.getInstance();
    if (metrics != null) {
        for (MetricsScope openScope : openScopes.values()) {
            metrics.endScope(openScope);
        }
    }
    openScopes.clear();
}
Also used : Metrics(org.apache.hadoop.hive.common.metrics.common.Metrics) MetricsScope(org.apache.hadoop.hive.common.metrics.common.MetricsScope)

Aggregations

Metrics (org.apache.hadoop.hive.common.metrics.common.Metrics)29 ArrayList (java.util.ArrayList)7 IOException (java.io.IOException)6 HiveSQLException (org.apache.hive.service.cli.HiveSQLException)5 Test (org.junit.Test)4 HashMap (java.util.HashMap)3 Map (java.util.Map)3 MetricsFactory (org.apache.hadoop.hive.common.metrics.common.MetricsFactory)3 LockException (org.apache.hadoop.hive.ql.lockmgr.LockException)3 PerfLogger (org.apache.hadoop.hive.ql.log.PerfLogger)3 ThreadFactoryWithGarbageCleanup (org.apache.hive.service.server.ThreadFactoryWithGarbageCleanup)3 TBinaryProtocol (org.apache.thrift.protocol.TBinaryProtocol)3 TProtocol (org.apache.thrift.protocol.TProtocol)3 ImmutableMap (com.google.common.collect.ImmutableMap)2 SQLException (java.sql.SQLException)2 LinkedHashMap (java.util.LinkedHashMap)2 LinkedHashSet (java.util.LinkedHashSet)2 ExecutorService (java.util.concurrent.ExecutorService)2 ThreadPoolExecutor (java.util.concurrent.ThreadPoolExecutor)2 MetricsScope (org.apache.hadoop.hive.common.metrics.common.MetricsScope)2