Search in sources :

Example 16 with Metrics

use of org.apache.hadoop.hive.common.metrics.common.Metrics in project hive by apache.

the class TestMapredLocalTask method localMRTask_updates_Metrics.

@Test
public void localMRTask_updates_Metrics() throws IOException {
    Metrics mockMetrics = Mockito.mock(Metrics.class);
    MapredLocalTask localMrTask = new MapredLocalTask();
    localMrTask.updateTaskMetrics(mockMetrics);
    verify(mockMetrics, times(1)).incrementCounter(MetricsConstant.HIVE_MR_TASKS);
    verify(mockMetrics, never()).incrementCounter(MetricsConstant.HIVE_TEZ_TASKS);
    verify(mockMetrics, never()).incrementCounter(MetricsConstant.HIVE_SPARK_TASKS);
}
Also used : Metrics(org.apache.hadoop.hive.common.metrics.common.Metrics) Test(org.junit.Test)

Example 17 with Metrics

use of org.apache.hadoop.hive.common.metrics.common.Metrics in project hive by apache.

the class Operation method run.

public void run() throws HiveSQLException {
    beforeRun();
    try {
        Metrics metrics = MetricsFactory.getInstance();
        if (metrics != null) {
            metrics.incrementCounter(MetricsConstant.OPEN_OPERATIONS);
        }
        runInternal();
    } finally {
        afterRun();
    }
}
Also used : Metrics(org.apache.hadoop.hive.common.metrics.common.Metrics)

Example 18 with Metrics

use of org.apache.hadoop.hive.common.metrics.common.Metrics in project hive by apache.

the class SessionManager method startTimeoutChecker.

private void startTimeoutChecker() {
    // minimum 3 seconds
    final long interval = Math.max(checkInterval, 3000l);
    final Runnable timeoutChecker = new Runnable() {

        @Override
        public void run() {
            sleepFor(interval);
            while (!shutdown) {
                long current = System.currentTimeMillis();
                for (HiveSession session : new ArrayList<HiveSession>(handleToSession.values())) {
                    if (shutdown) {
                        break;
                    }
                    if (sessionTimeout > 0 && session.getLastAccessTime() + sessionTimeout <= current && (!checkOperation || session.getNoOperationTime() > sessionTimeout)) {
                        SessionHandle handle = session.getSessionHandle();
                        LOG.warn("Session " + handle + " is Timed-out (last access : " + new Date(session.getLastAccessTime()) + ") and will be closed");
                        try {
                            closeSession(handle);
                        } catch (HiveSQLException e) {
                            LOG.warn("Exception is thrown closing session " + handle, e);
                        } finally {
                            Metrics metrics = MetricsFactory.getInstance();
                            if (metrics != null) {
                                metrics.incrementCounter(MetricsConstant.HS2_ABANDONED_SESSIONS);
                            }
                        }
                    } else {
                        session.closeExpiredOperations();
                    }
                }
                sleepFor(interval);
            }
        }

        private void sleepFor(long interval) {
            synchronized (timeoutCheckerLock) {
                try {
                    timeoutCheckerLock.wait(interval);
                } catch (InterruptedException e) {
                // Ignore, and break.
                }
            }
        }
    };
    backgroundOperationPool.execute(timeoutChecker);
}
Also used : Metrics(org.apache.hadoop.hive.common.metrics.common.Metrics) HiveSQLException(org.apache.hive.service.cli.HiveSQLException) ArrayList(java.util.ArrayList) SessionHandle(org.apache.hive.service.cli.SessionHandle) Date(java.util.Date)

Example 19 with Metrics

use of org.apache.hadoop.hive.common.metrics.common.Metrics in project hive by apache.

the class ZooKeeperHiveLockManager method lockPrimitive.

/**
 * Creates a primitive lock object on ZooKeeper.
 * @param key The lock data
 * @param mode The lock mode (HiveLockMode - EXCLUSIVE/SHARED/SEMI_SHARED)
 * @param keepAlive If true creating PERSISTENT ZooKeeper locks, otherwise EPHEMERAL ZooKeeper
 *                  locks
 * @param parentCreated If we expect, that the parent is already created then true, otherwise
 *                      we will try to create the parents as well
 * @param conflictingLocks The set where we should collect the conflicting locks when
 *                         the logging level is set to DEBUG
 * @return The created ZooKeeperHiveLock object, null if there was a conflicting lock
 * @throws Exception If there was an unexpected Exception
 */
private ZooKeeperHiveLock lockPrimitive(HiveLockObject key, HiveLockMode mode, boolean keepAlive, boolean parentCreated, Set<String> conflictingLocks) throws Exception {
    String res;
    // If the parents have already been created, create the last child only
    List<String> names = new ArrayList<String>();
    String lastName;
    HiveLockObjectData lockData = key.getData();
    lockData.setClientIp(clientIp);
    if (parentCreated) {
        lastName = getLastObjectName(parent, key);
        names.add(lastName);
    } else {
        names = getObjectNames(key);
        lastName = names.get(names.size() - 1);
    }
    // Create the parents first
    for (String name : names) {
        try {
            res = createChild(name, new byte[0], CreateMode.PERSISTENT);
        } catch (Exception e) {
            if (!(e instanceof KeeperException) || ((KeeperException) e).code() != KeeperException.Code.NODEEXISTS) {
                // if the exception is not 'NODEEXISTS', re-throw it
                throw e;
            }
        }
    }
    res = createChild(getLockName(lastName, mode), key.getData().toString().getBytes(), keepAlive ? CreateMode.PERSISTENT_SEQUENTIAL : CreateMode.EPHEMERAL_SEQUENTIAL);
    int seqNo = getSequenceNumber(res, getLockName(lastName, mode));
    if (seqNo == -1) {
        curatorFramework.delete().forPath(res);
        throw new LockException("The created node does not contain a sequence number: " + res);
    }
    List<String> children = curatorFramework.getChildren().forPath(lastName);
    String exLock = getLockName(lastName, HiveLockMode.EXCLUSIVE);
    String shLock = getLockName(lastName, HiveLockMode.SHARED);
    for (String child : children) {
        child = lastName + "/" + child;
        // Is there a conflicting lock on the same object with a lower sequence
        // number
        int childSeq = seqNo;
        if (child.startsWith(exLock)) {
            childSeq = getSequenceNumber(child, exLock);
        }
        if ((mode == HiveLockMode.EXCLUSIVE) && child.startsWith(shLock)) {
            childSeq = getSequenceNumber(child, shLock);
        }
        if ((childSeq >= 0) && (childSeq < seqNo)) {
            try {
                curatorFramework.delete().forPath(res);
            } finally {
                if (LOG.isDebugEnabled()) {
                    try {
                        String data = new String(curatorFramework.getData().forPath(child));
                        conflictingLocks.add(data);
                    } catch (Exception e) {
                    // ignored
                    }
                }
            }
            return null;
        }
    }
    Metrics metrics = MetricsFactory.getInstance();
    if (metrics != null) {
        try {
            switch(mode) {
                case EXCLUSIVE:
                    metrics.incrementCounter(MetricsConstant.ZOOKEEPER_HIVE_EXCLUSIVELOCKS);
                    break;
                case SEMI_SHARED:
                    metrics.incrementCounter(MetricsConstant.ZOOKEEPER_HIVE_SEMISHAREDLOCKS);
                    break;
                default:
                    metrics.incrementCounter(MetricsConstant.ZOOKEEPER_HIVE_SHAREDLOCKS);
                    break;
            }
        } catch (Exception e) {
            LOG.warn("Error Reporting hive client zookeeper lock operation to Metrics system", e);
        }
    }
    return new ZooKeeperHiveLock(res, key, mode);
}
Also used : Metrics(org.apache.hadoop.hive.common.metrics.common.Metrics) HiveLockObjectData(org.apache.hadoop.hive.ql.lockmgr.HiveLockObject.HiveLockObjectData) ArrayList(java.util.ArrayList) KeeperException(org.apache.zookeeper.KeeperException) KeeperException(org.apache.zookeeper.KeeperException)

Example 20 with Metrics

use of org.apache.hadoop.hive.common.metrics.common.Metrics in project hive by apache.

the class DbLockManager method unlock.

@Override
public void unlock(HiveLock hiveLock) throws LockException {
    long lockId = ((DbHiveLock) hiveLock).lockId;
    boolean removed = false;
    try {
        LOG.debug("Unlocking " + hiveLock);
        txnManager.getMS().unlock(lockId);
        // important to remove after unlock() in case it fails
        removed = locks.remove(hiveLock);
        Metrics metrics = MetricsFactory.getInstance();
        if (metrics != null) {
            try {
                metrics.decrementCounter(MetricsConstant.METASTORE_HIVE_LOCKS);
            } catch (Exception e) {
                LOG.warn("Error Reporting hive client metastore unlock operation to Metrics system", e);
            }
        }
        LOG.debug("Removed a lock " + removed);
    } catch (NoSuchLockException e) {
        // if metastore has no record of this lock, it most likely timed out; either way
        // there is no point tracking it here any longer
        removed = locks.remove(hiveLock);
        LOG.error("Metastore could find no record of lock " + JavaUtils.lockIdToString(lockId));
        throw new LockException(e, ErrorMsg.LOCK_NO_SUCH_LOCK, JavaUtils.lockIdToString(lockId));
    } catch (TxnOpenException e) {
        throw new RuntimeException("Attempt to unlock lock " + JavaUtils.lockIdToString(lockId) + "associated with an open transaction, " + e.getMessage(), e);
    } catch (TException e) {
        throw new LockException(ErrorMsg.METASTORE_COMMUNICATION_FAILED.getMsg(), e);
    } finally {
        if (removed) {
            LOG.debug("Removed a lock " + hiveLock);
        }
    }
}
Also used : TException(org.apache.thrift.TException) Metrics(org.apache.hadoop.hive.common.metrics.common.Metrics) TException(org.apache.thrift.TException) IOException(java.io.IOException)

Aggregations

Metrics (org.apache.hadoop.hive.common.metrics.common.Metrics)23 IOException (java.io.IOException)5 ArrayList (java.util.ArrayList)5 HiveSQLException (org.apache.hive.service.cli.HiveSQLException)4 Test (org.junit.Test)4 LockException (org.apache.hadoop.hive.ql.lockmgr.LockException)3 PerfLogger (org.apache.hadoop.hive.ql.log.PerfLogger)3 ImmutableMap (com.google.common.collect.ImmutableMap)2 SQLException (java.sql.SQLException)2 HashMap (java.util.HashMap)2 LinkedHashMap (java.util.LinkedHashMap)2 LinkedHashSet (java.util.LinkedHashSet)2 Map (java.util.Map)2 MetricsFactory (org.apache.hadoop.hive.common.metrics.common.MetricsFactory)2 MetricsScope (org.apache.hadoop.hive.common.metrics.common.MetricsScope)2 TaskResult (org.apache.hadoop.hive.ql.exec.TaskResult)2 TaskRunner (org.apache.hadoop.hive.ql.exec.TaskRunner)2 HookContext (org.apache.hadoop.hive.ql.hooks.HookContext)2 WriteEntity (org.apache.hadoop.hive.ql.hooks.WriteEntity)2 AuthorizationException (org.apache.hadoop.hive.ql.metadata.AuthorizationException)2