Search in sources :

Example 11 with Metrics

use of org.apache.hadoop.hive.common.metrics.common.Metrics in project hive by apache.

the class PerfLogger method cleanupPerfLogMetrics.

/**
 * Cleans up any dangling perfLog metric call scopes.
 */
public void cleanupPerfLogMetrics() {
    Metrics metrics = MetricsFactory.getInstance();
    if (metrics != null) {
        for (MetricsScope openScope : openScopes.values()) {
            metrics.endScope(openScope);
        }
    }
    openScopes.clear();
}
Also used : Metrics(org.apache.hadoop.hive.common.metrics.common.Metrics) MetricsScope(org.apache.hadoop.hive.common.metrics.common.MetricsScope)

Example 12 with Metrics

use of org.apache.hadoop.hive.common.metrics.common.Metrics in project hive by apache.

the class ZooKeeperHiveLockManager method unlockPrimitive.

/* Remove the lock specified */
@VisibleForTesting
static void unlockPrimitive(HiveLock hiveLock, String parent, CuratorFramework curatorFramework) throws LockException {
    ZooKeeperHiveLock zLock = (ZooKeeperHiveLock) hiveLock;
    HiveLockMode lMode = hiveLock.getHiveLockMode();
    HiveLockObject obj = zLock.getHiveLockObject();
    String name = getLastObjectName(parent, obj);
    try {
        // catch InterruptedException to make sure locks can be released when the query is cancelled.
        try {
            curatorFramework.delete().forPath(zLock.getPath());
        } catch (InterruptedException ie) {
            curatorFramework.delete().forPath(zLock.getPath());
        }
        // Delete the parent node if all the children have been deleted
        List<String> children = null;
        try {
            children = curatorFramework.getChildren().forPath(name);
        } catch (InterruptedException ie) {
            children = curatorFramework.getChildren().forPath(name);
        }
        if (children == null || children.isEmpty()) {
            try {
                curatorFramework.delete().forPath(name);
            } catch (InterruptedException ie) {
                curatorFramework.delete().forPath(name);
            }
        }
        Metrics metrics = MetricsFactory.getInstance();
        if (metrics != null) {
            try {
                switch(lMode) {
                    case EXCLUSIVE:
                        metrics.decrementCounter(MetricsConstant.ZOOKEEPER_HIVE_EXCLUSIVELOCKS);
                        break;
                    case SEMI_SHARED:
                        metrics.decrementCounter(MetricsConstant.ZOOKEEPER_HIVE_SEMISHAREDLOCKS);
                        break;
                    default:
                        metrics.decrementCounter(MetricsConstant.ZOOKEEPER_HIVE_SHAREDLOCKS);
                        break;
                }
            } catch (Exception e) {
                LOG.warn("Error Reporting hive client zookeeper unlock operation to Metrics system", e);
            }
        }
    } catch (KeeperException.NoNodeException nne) {
        // can happen in retrying deleting the zLock after exceptions like InterruptedException
        // or in a race condition where parent has already been deleted by other process when it
        // is to be deleted. Both cases should not raise error
        LOG.debug("Node " + zLock.getPath() + " or its parent has already been deleted.");
    } catch (KeeperException.NotEmptyException nee) {
        // can happen in a race condition where another process adds a zLock under this parent
        // just before it is about to be deleted. It should not be a problem since this parent
        // can eventually be deleted by the process which hold its last child zLock
        LOG.debug("Node " + name + " to be deleted is not empty.");
    } catch (Exception e) {
        // exceptions including InterruptException and other KeeperException
        LOG.error("Failed to release ZooKeeper lock: ", e);
        throw new LockException(e);
    }
}
Also used : Metrics(org.apache.hadoop.hive.common.metrics.common.Metrics) KeeperException(org.apache.zookeeper.KeeperException) KeeperException(org.apache.zookeeper.KeeperException) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 13 with Metrics

use of org.apache.hadoop.hive.common.metrics.common.Metrics in project hive by apache.

the class DbLockManager method lock.

/**
 * Send a lock request to the metastore.  This is intended for use by
 * {@link DbTxnManager}.
 * @param lock lock request
 * @param isBlocking if true, will block until locks have been acquired
 * @throws LockException
 * @return the result of the lock attempt
 */
LockState lock(LockRequest lock, String queryId, boolean isBlocking, List<HiveLock> acquiredLocks) throws LockException {
    Objects.requireNonNull(queryId, "queryId cannot be null");
    nextSleep = 50;
    /*
     * get from conf to pick up changes; make sure not to set too low and kill the metastore
     * MAX_SLEEP is the max time each backoff() will wait for, thus the total time to wait for
     * successful lock acquisition is approximately (see backoff()) maxNumWaits * MAX_SLEEP.
     */
    MAX_SLEEP = Math.max(15000, conf.getTimeVar(HiveConf.ConfVars.HIVE_LOCK_SLEEP_BETWEEN_RETRIES, TimeUnit.MILLISECONDS));
    int maxNumWaits = Math.max(0, conf.getIntVar(HiveConf.ConfVars.HIVE_LOCK_NUMRETRIES));
    try {
        LOG.info("Requesting: queryId=" + queryId + " " + lock);
        LockResponse res = txnManager.getMS().lock(lock);
        // link lockId to queryId
        LOG.info("Response to queryId=" + queryId + " " + res);
        if (!isBlocking) {
            if (res.getState() == LockState.WAITING) {
                return LockState.WAITING;
            }
        }
        int numRetries = 0;
        long startRetry = System.currentTimeMillis();
        while (res.getState() == LockState.WAITING && numRetries++ < maxNumWaits) {
            backoff();
            res = txnManager.getMS().checkLock(res.getLockid());
        }
        long retryDuration = System.currentTimeMillis() - startRetry;
        DbHiveLock hl = new DbHiveLock(res.getLockid(), queryId, lock.getTxnid());
        if (locks.size() > 0) {
            boolean logMsg = false;
            for (DbHiveLock l : locks) {
                if (l.txnId != hl.txnId) {
                    // locks from different transactions detected (or from transaction and read-only query in autocommit)
                    logMsg = true;
                    break;
                } else if (l.txnId == 0) {
                    if (!l.queryId.equals(hl.queryId)) {
                        // here means no open transaction, but different queries
                        logMsg = true;
                        break;
                    }
                }
            }
            if (logMsg) {
                LOG.warn("adding new DbHiveLock(" + hl + ") while we are already tracking locks: " + locks);
            }
        }
        locks.add(hl);
        if (res.getState() != LockState.ACQUIRED) {
            if (res.getState() == LockState.WAITING) {
                /**
                 * the {@link #unlock(HiveLock)} here is more about future proofing when support for
                 * multi-statement txns is added.  In that case it's reasonable for the client
                 * to retry this part of txn or try something else w/o aborting the whole txn.
                 * Also for READ_COMMITTED (when and if that is supported).
                 */
                // remove the locks in Waiting state
                unlock(hl);
                LockException le = new LockException(null, ErrorMsg.LOCK_ACQUIRE_TIMEDOUT, lock.toString(), Long.toString(retryDuration), res.toString());
                if (conf.getBoolVar(HiveConf.ConfVars.TXN_MGR_DUMP_LOCK_STATE_ON_ACQUIRE_TIMEOUT)) {
                    showLocksNewFormat(le.getMessage());
                }
                throw le;
            }
            throw new LockException(ErrorMsg.LOCK_CANNOT_BE_ACQUIRED.getMsg() + " " + res);
        }
        acquiredLocks.add(hl);
        Metrics metrics = MetricsFactory.getInstance();
        if (metrics != null) {
            try {
                metrics.incrementCounter(MetricsConstant.METASTORE_HIVE_LOCKS);
            } catch (Exception e) {
                LOG.warn("Error Reporting hive client metastore lock operation to Metrics system", e);
            }
        }
        return res.getState();
    } catch (NoSuchTxnException e) {
        LOG.error("Metastore could not find " + JavaUtils.txnIdToString(lock.getTxnid()));
        throw new LockException(e, ErrorMsg.TXN_NO_SUCH_TRANSACTION, JavaUtils.txnIdToString(lock.getTxnid()));
    } catch (TxnAbortedException e) {
        LockException le = new LockException(e, ErrorMsg.TXN_ABORTED, JavaUtils.txnIdToString(lock.getTxnid()), e.getMessage());
        LOG.error(le.getMessage());
        throw le;
    } catch (TException e) {
        throw new LockException(ErrorMsg.METASTORE_COMMUNICATION_FAILED.getMsg(), e);
    }
}
Also used : TException(org.apache.thrift.TException) Metrics(org.apache.hadoop.hive.common.metrics.common.Metrics) TException(org.apache.thrift.TException) IOException(java.io.IOException)

Example 14 with Metrics

use of org.apache.hadoop.hive.common.metrics.common.Metrics in project hive by apache.

the class HiveMetaStore method startMetaStore.

/**
   * Start Metastore based on a passed {@link HadoopThriftAuthBridge}
   *
   * @param port
   * @param bridge
   * @param conf
   *          configuration overrides
   * @throws Throwable
   */
public static void startMetaStore(int port, HadoopThriftAuthBridge bridge, HiveConf conf, Lock startLock, Condition startCondition, AtomicBoolean startedServing) throws Throwable {
    try {
        isMetaStoreRemote = true;
        // Server will create new threads up to max as necessary. After an idle
        // period, it will destroy threads to keep the number of threads in the
        // pool to min.
        long maxMessageSize = conf.getLongVar(HiveConf.ConfVars.METASTORESERVERMAXMESSAGESIZE);
        int minWorkerThreads = conf.getIntVar(HiveConf.ConfVars.METASTORESERVERMINTHREADS);
        int maxWorkerThreads = conf.getIntVar(HiveConf.ConfVars.METASTORESERVERMAXTHREADS);
        boolean tcpKeepAlive = conf.getBoolVar(HiveConf.ConfVars.METASTORE_TCP_KEEP_ALIVE);
        boolean useFramedTransport = conf.getBoolVar(ConfVars.METASTORE_USE_THRIFT_FRAMED_TRANSPORT);
        boolean useCompactProtocol = conf.getBoolVar(ConfVars.METASTORE_USE_THRIFT_COMPACT_PROTOCOL);
        boolean useSSL = conf.getBoolVar(ConfVars.HIVE_METASTORE_USE_SSL);
        useSasl = conf.getBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL);
        TProcessor processor;
        TTransportFactory transFactory;
        final TProtocolFactory protocolFactory;
        final TProtocolFactory inputProtoFactory;
        if (useCompactProtocol) {
            protocolFactory = new TCompactProtocol.Factory();
            inputProtoFactory = new TCompactProtocol.Factory(maxMessageSize, maxMessageSize);
        } else {
            protocolFactory = new TBinaryProtocol.Factory();
            inputProtoFactory = new TBinaryProtocol.Factory(true, true, maxMessageSize, maxMessageSize);
        }
        HMSHandler baseHandler = new HiveMetaStore.HMSHandler("new db based metaserver", conf, false);
        IHMSHandler handler = newRetryingHMSHandler(baseHandler, conf);
        TServerSocket serverSocket = null;
        if (useSasl) {
            // we are in secure mode.
            if (useFramedTransport) {
                throw new HiveMetaException("Framed transport is not supported with SASL enabled.");
            }
            saslServer = bridge.createServer(conf.getVar(HiveConf.ConfVars.METASTORE_KERBEROS_KEYTAB_FILE), conf.getVar(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL));
            // Start delegation token manager
            delegationTokenManager = new HiveDelegationTokenManager();
            delegationTokenManager.startDelegationTokenSecretManager(conf, baseHandler, ServerMode.METASTORE);
            saslServer.setSecretManager(delegationTokenManager.getSecretManager());
            transFactory = saslServer.createTransportFactory(MetaStoreUtils.getMetaStoreSaslProperties(conf));
            processor = saslServer.wrapProcessor(new ThriftHiveMetastore.Processor<IHMSHandler>(handler));
            serverSocket = HiveAuthUtils.getServerSocket(null, port);
            LOG.info("Starting DB backed MetaStore Server in Secure Mode");
        } else {
            // we are in unsecure mode.
            if (conf.getBoolVar(ConfVars.METASTORE_EXECUTE_SET_UGI)) {
                transFactory = useFramedTransport ? new ChainedTTransportFactory(new TFramedTransport.Factory(), new TUGIContainingTransport.Factory()) : new TUGIContainingTransport.Factory();
                processor = new TUGIBasedProcessor<IHMSHandler>(handler);
                LOG.info("Starting DB backed MetaStore Server with SetUGI enabled");
            } else {
                transFactory = useFramedTransport ? new TFramedTransport.Factory() : new TTransportFactory();
                processor = new TSetIpAddressProcessor<IHMSHandler>(handler);
                LOG.info("Starting DB backed MetaStore Server");
            }
            // enable SSL support for HMS
            List<String> sslVersionBlacklist = new ArrayList<String>();
            for (String sslVersion : conf.getVar(ConfVars.HIVE_SSL_PROTOCOL_BLACKLIST).split(",")) {
                sslVersionBlacklist.add(sslVersion);
            }
            if (!useSSL) {
                serverSocket = HiveAuthUtils.getServerSocket(null, port);
            } else {
                String keyStorePath = conf.getVar(ConfVars.HIVE_METASTORE_SSL_KEYSTORE_PATH).trim();
                if (keyStorePath.isEmpty()) {
                    throw new IllegalArgumentException(ConfVars.HIVE_METASTORE_SSL_KEYSTORE_PASSWORD.varname + " Not configured for SSL connection");
                }
                String keyStorePassword = ShimLoader.getHadoopShims().getPassword(conf, HiveConf.ConfVars.HIVE_METASTORE_SSL_KEYSTORE_PASSWORD.varname);
                serverSocket = HiveAuthUtils.getServerSSLSocket(null, port, keyStorePath, keyStorePassword, sslVersionBlacklist);
            }
        }
        if (tcpKeepAlive) {
            serverSocket = new TServerSocketKeepAlive(serverSocket);
        }
        TThreadPoolServer.Args args = new TThreadPoolServer.Args(serverSocket).processor(processor).transportFactory(transFactory).protocolFactory(protocolFactory).inputProtocolFactory(inputProtoFactory).minWorkerThreads(minWorkerThreads).maxWorkerThreads(maxWorkerThreads);
        TServer tServer = new TThreadPoolServer(args);
        TServerEventHandler tServerEventHandler = new TServerEventHandler() {

            @Override
            public void preServe() {
            }

            @Override
            public ServerContext createContext(TProtocol tProtocol, TProtocol tProtocol1) {
                try {
                    Metrics metrics = MetricsFactory.getInstance();
                    if (metrics != null) {
                        metrics.incrementCounter(MetricsConstant.OPEN_CONNECTIONS);
                    }
                } catch (Exception e) {
                    LOG.warn("Error Reporting Metastore open connection to Metrics system", e);
                }
                return null;
            }

            @Override
            public void deleteContext(ServerContext serverContext, TProtocol tProtocol, TProtocol tProtocol1) {
                try {
                    Metrics metrics = MetricsFactory.getInstance();
                    if (metrics != null) {
                        metrics.decrementCounter(MetricsConstant.OPEN_CONNECTIONS);
                    }
                } catch (Exception e) {
                    LOG.warn("Error Reporting Metastore close connection to Metrics system", e);
                }
                // If the IMetaStoreClient#close was called, HMSHandler#shutdown would have already
                // cleaned up thread local RawStore. Otherwise, do it now.
                cleanupRawStore();
            }

            @Override
            public void processContext(ServerContext serverContext, TTransport tTransport, TTransport tTransport1) {
            }
        };
        tServer.setServerEventHandler(tServerEventHandler);
        HMSHandler.LOG.info("Started the new metaserver on port [" + port + "]...");
        HMSHandler.LOG.info("Options.minWorkerThreads = " + minWorkerThreads);
        HMSHandler.LOG.info("Options.maxWorkerThreads = " + maxWorkerThreads);
        HMSHandler.LOG.info("TCP keepalive = " + tcpKeepAlive);
        if (startLock != null) {
            signalOtherThreadsToStart(tServer, startLock, startCondition, startedServing);
        }
        tServer.serve();
    } catch (Throwable x) {
        x.printStackTrace();
        HMSHandler.LOG.error(StringUtils.stringifyException(x));
        throw x;
    }
}
Also used : TProtocolFactory(org.apache.thrift.protocol.TProtocolFactory) TProcessor(org.apache.thrift.TProcessor) TServerEventHandler(org.apache.thrift.server.TServerEventHandler) TServer(org.apache.thrift.server.TServer) ArrayList(java.util.ArrayList) TProtocolFactory(org.apache.thrift.protocol.TProtocolFactory) LoggerFactory(org.slf4j.LoggerFactory) TTransportFactory(org.apache.thrift.transport.TTransportFactory) MetricsFactory(org.apache.hadoop.hive.common.metrics.common.MetricsFactory) TCompactProtocol(org.apache.thrift.protocol.TCompactProtocol) TServerSocket(org.apache.thrift.transport.TServerSocket) Metrics(org.apache.hadoop.hive.common.metrics.common.Metrics) TProcessor(org.apache.thrift.TProcessor) TProtocol(org.apache.thrift.protocol.TProtocol) TFramedTransport(org.apache.thrift.transport.TFramedTransport) TUGIContainingTransport(org.apache.hadoop.hive.thrift.TUGIContainingTransport) TTransportFactory(org.apache.thrift.transport.TTransportFactory) JDOException(javax.jdo.JDOException) LogInitializationException(org.apache.hadoop.hive.common.LogUtils.LogInitializationException) TException(org.apache.thrift.TException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) SerDeException(org.apache.hadoop.hive.serde2.SerDeException) TBinaryProtocol(org.apache.thrift.protocol.TBinaryProtocol) ServerContext(org.apache.thrift.server.ServerContext) HiveDelegationTokenManager(org.apache.hadoop.hive.thrift.HiveDelegationTokenManager) TTransport(org.apache.thrift.transport.TTransport) TThreadPoolServer(org.apache.thrift.server.TThreadPoolServer)

Example 15 with Metrics

use of org.apache.hadoop.hive.common.metrics.common.Metrics in project hive by apache.

the class TestTezTask method tezTask_updates_Metrics.

@Test
public void tezTask_updates_Metrics() throws IOException {
    Metrics mockMetrics = Mockito.mock(Metrics.class);
    TezTask tezTask = new TezTask();
    tezTask.updateTaskMetrics(mockMetrics);
    verify(mockMetrics, times(1)).incrementCounter(MetricsConstant.HIVE_TEZ_TASKS);
    verify(mockMetrics, never()).incrementCounter(MetricsConstant.HIVE_SPARK_TASKS);
    verify(mockMetrics, never()).incrementCounter(MetricsConstant.HIVE_MR_TASKS);
}
Also used : Metrics(org.apache.hadoop.hive.common.metrics.common.Metrics) Test(org.junit.Test)

Aggregations

Metrics (org.apache.hadoop.hive.common.metrics.common.Metrics)23 IOException (java.io.IOException)5 ArrayList (java.util.ArrayList)5 HiveSQLException (org.apache.hive.service.cli.HiveSQLException)4 Test (org.junit.Test)4 LockException (org.apache.hadoop.hive.ql.lockmgr.LockException)3 PerfLogger (org.apache.hadoop.hive.ql.log.PerfLogger)3 ImmutableMap (com.google.common.collect.ImmutableMap)2 SQLException (java.sql.SQLException)2 HashMap (java.util.HashMap)2 LinkedHashMap (java.util.LinkedHashMap)2 LinkedHashSet (java.util.LinkedHashSet)2 Map (java.util.Map)2 MetricsFactory (org.apache.hadoop.hive.common.metrics.common.MetricsFactory)2 MetricsScope (org.apache.hadoop.hive.common.metrics.common.MetricsScope)2 TaskResult (org.apache.hadoop.hive.ql.exec.TaskResult)2 TaskRunner (org.apache.hadoop.hive.ql.exec.TaskRunner)2 HookContext (org.apache.hadoop.hive.ql.hooks.HookContext)2 WriteEntity (org.apache.hadoop.hive.ql.hooks.WriteEntity)2 AuthorizationException (org.apache.hadoop.hive.ql.metadata.AuthorizationException)2