Search in sources :

Example 1 with HiveLockObj

use of org.apache.hadoop.hive.ql.lockmgr.HiveLockObj in project hive by apache.

the class MoveTask method releaseLocks.

// Release all the locks acquired for this object
// This becomes important for multi-table inserts when one branch may take much more
// time than the others. It is better to release the lock for this particular insert.
// The other option is to wait for all the branches to finish, or set
// hive.multi.insert.move.tasks.share.dependencies to true, which will mean that the
// first multi-insert results will be available when all of the branches of multi-table
// inserts are done.
private void releaseLocks(LoadTableDesc ltd) throws HiveException {
    // nothing needs to be done
    if (!conf.getBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY)) {
        LOG.debug("No locks to release because Hive concurrency support is not enabled");
        return;
    }
    if (context.getHiveTxnManager().supportsAcid()) {
        // Acid LM doesn't maintain getOutputLockObjects(); this 'if' just makes logic more explicit
        return;
    }
    HiveLockManager lockMgr = context.getHiveTxnManager().getLockManager();
    WriteEntity output = context.getLoadTableOutputMap().get(ltd);
    List<HiveLockObj> lockObjects = context.getOutputLockObjects().get(output);
    if (CollectionUtils.isEmpty(lockObjects)) {
        LOG.debug("No locks found to release");
        return;
    }
    LOG.info("Releasing {} locks", lockObjects.size());
    for (HiveLockObj lockObj : lockObjects) {
        List<HiveLock> locks = lockMgr.getLocks(lockObj.getObj(), false, true);
        for (HiveLock lock : locks) {
            if (lock.getHiveLockMode() == lockObj.getMode()) {
                if (context.getHiveLocks().remove(lock)) {
                    try {
                        lockMgr.unlock(lock);
                    } catch (LockException le) {
                        // should be OK since the lock is ephemeral and will eventually be deleted
                        // when the query finishes and zookeeper session is closed.
                        LOG.warn("Could not release lock {}", lock.getHiveLockObject().getName(), le);
                    }
                }
            }
        }
    }
}
Also used : LockException(org.apache.hadoop.hive.ql.lockmgr.LockException) HiveLockObj(org.apache.hadoop.hive.ql.lockmgr.HiveLockObj) HiveLock(org.apache.hadoop.hive.ql.lockmgr.HiveLock) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity) HiveLockManager(org.apache.hadoop.hive.ql.lockmgr.HiveLockManager)

Example 2 with HiveLockObj

use of org.apache.hadoop.hive.ql.lockmgr.HiveLockObj in project hive by apache.

the class MoveTask method acquireLockForFileMove.

private LocalTableLock acquireLockForFileMove(LoadTableDesc loadTableWork) throws HiveException {
    LockFileMoveMode mode = LockFileMoveMode.fromConf(conf);
    if (mode == LockFileMoveMode.NONE) {
        return new LocalTableLock();
    }
    if (mode == LockFileMoveMode.DP && loadTableWork.getDPCtx() == null) {
        return new LocalTableLock();
    }
    WriteEntity output = context.getLoadTableOutputMap().get(loadTableWork);
    List<HiveLockObj> lockObjects = context.getOutputLockObjects().get(output);
    if (lockObjects == null) {
        return new LocalTableLock();
    }
    TableDesc table = loadTableWork.getTable();
    if (table == null) {
        return new LocalTableLock();
    }
    Hive db = getHive();
    Table baseTable = db.getTable(loadTableWork.getTable().getTableName());
    HiveLockObject.HiveLockObjectData lockData = new HiveLockObject.HiveLockObjectData(queryPlan.getQueryId(), String.valueOf(System.currentTimeMillis()), "IMPLICIT", queryPlan.getQueryStr(), conf);
    HiveLockObject lock = new HiveLockObject(baseTable, lockData);
    for (HiveLockObj hiveLockObj : lockObjects) {
        if (Arrays.equals(hiveLockObj.getObj().getPaths(), lock.getPaths())) {
            HiveLockMode l = hiveLockObj.getMode();
            if (l == HiveLockMode.EXCLUSIVE || l == HiveLockMode.SEMI_SHARED) {
                // no need to lock ; already owns a more powerful one
                return new LocalTableLock();
            }
        }
    }
    return new LocalTableLock(lock);
}
Also used : Hive(org.apache.hadoop.hive.ql.metadata.Hive) Table(org.apache.hadoop.hive.ql.metadata.Table) HiveLockObject(org.apache.hadoop.hive.ql.lockmgr.HiveLockObject) HiveLockObj(org.apache.hadoop.hive.ql.lockmgr.HiveLockObj) TableDesc(org.apache.hadoop.hive.ql.plan.TableDesc) LoadTableDesc(org.apache.hadoop.hive.ql.plan.LoadTableDesc) CreateTableDesc(org.apache.hadoop.hive.ql.ddl.table.create.CreateTableDesc) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity) HiveLockMode(org.apache.hadoop.hive.ql.lockmgr.HiveLockMode)

Aggregations

WriteEntity (org.apache.hadoop.hive.ql.hooks.WriteEntity)2 HiveLockObj (org.apache.hadoop.hive.ql.lockmgr.HiveLockObj)2 CreateTableDesc (org.apache.hadoop.hive.ql.ddl.table.create.CreateTableDesc)1 HiveLock (org.apache.hadoop.hive.ql.lockmgr.HiveLock)1 HiveLockManager (org.apache.hadoop.hive.ql.lockmgr.HiveLockManager)1 HiveLockMode (org.apache.hadoop.hive.ql.lockmgr.HiveLockMode)1 HiveLockObject (org.apache.hadoop.hive.ql.lockmgr.HiveLockObject)1 LockException (org.apache.hadoop.hive.ql.lockmgr.LockException)1 Hive (org.apache.hadoop.hive.ql.metadata.Hive)1 Table (org.apache.hadoop.hive.ql.metadata.Table)1 LoadTableDesc (org.apache.hadoop.hive.ql.plan.LoadTableDesc)1 TableDesc (org.apache.hadoop.hive.ql.plan.TableDesc)1