use of org.apache.hadoop.hive.ql.lockmgr.HiveLockObj in project hive by apache.
the class MoveTask method releaseLocks.
// Release all the locks acquired for this object
// This becomes important for multi-table inserts when one branch may take much more
// time than the others. It is better to release the lock for this particular insert.
// The other option is to wait for all the branches to finish, or set
// hive.multi.insert.move.tasks.share.dependencies to true, which will mean that the
// first multi-insert results will be available when all of the branches of multi-table
// inserts are done.
private void releaseLocks(LoadTableDesc ltd) throws HiveException {
// nothing needs to be done
if (!conf.getBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY)) {
LOG.debug("No locks to release because Hive concurrency support is not enabled");
return;
}
if (context.getHiveTxnManager().supportsAcid()) {
// Acid LM doesn't maintain getOutputLockObjects(); this 'if' just makes logic more explicit
return;
}
HiveLockManager lockMgr = context.getHiveTxnManager().getLockManager();
WriteEntity output = context.getLoadTableOutputMap().get(ltd);
List<HiveLockObj> lockObjects = context.getOutputLockObjects().get(output);
if (CollectionUtils.isEmpty(lockObjects)) {
LOG.debug("No locks found to release");
return;
}
LOG.info("Releasing {} locks", lockObjects.size());
for (HiveLockObj lockObj : lockObjects) {
List<HiveLock> locks = lockMgr.getLocks(lockObj.getObj(), false, true);
for (HiveLock lock : locks) {
if (lock.getHiveLockMode() == lockObj.getMode()) {
if (context.getHiveLocks().remove(lock)) {
try {
lockMgr.unlock(lock);
} catch (LockException le) {
// should be OK since the lock is ephemeral and will eventually be deleted
// when the query finishes and zookeeper session is closed.
LOG.warn("Could not release lock {}", lock.getHiveLockObject().getName(), le);
}
}
}
}
}
}
use of org.apache.hadoop.hive.ql.lockmgr.HiveLockObj in project hive by apache.
the class MoveTask method acquireLockForFileMove.
private LocalTableLock acquireLockForFileMove(LoadTableDesc loadTableWork) throws HiveException {
LockFileMoveMode mode = LockFileMoveMode.fromConf(conf);
if (mode == LockFileMoveMode.NONE) {
return new LocalTableLock();
}
if (mode == LockFileMoveMode.DP && loadTableWork.getDPCtx() == null) {
return new LocalTableLock();
}
WriteEntity output = context.getLoadTableOutputMap().get(loadTableWork);
List<HiveLockObj> lockObjects = context.getOutputLockObjects().get(output);
if (lockObjects == null) {
return new LocalTableLock();
}
TableDesc table = loadTableWork.getTable();
if (table == null) {
return new LocalTableLock();
}
Hive db = getHive();
Table baseTable = db.getTable(loadTableWork.getTable().getTableName());
HiveLockObject.HiveLockObjectData lockData = new HiveLockObject.HiveLockObjectData(queryPlan.getQueryId(), String.valueOf(System.currentTimeMillis()), "IMPLICIT", queryPlan.getQueryStr(), conf);
HiveLockObject lock = new HiveLockObject(baseTable, lockData);
for (HiveLockObj hiveLockObj : lockObjects) {
if (Arrays.equals(hiveLockObj.getObj().getPaths(), lock.getPaths())) {
HiveLockMode l = hiveLockObj.getMode();
if (l == HiveLockMode.EXCLUSIVE || l == HiveLockMode.SEMI_SHARED) {
// no need to lock ; already owns a more powerful one
return new LocalTableLock();
}
}
}
return new LocalTableLock(lock);
}
Aggregations