use of org.apache.hadoop.hive.ql.lockmgr.HiveLockObject.HiveLockObjectData in project hive by apache.
the class TestDummyTxnManager method testDedupLockObjects.
@Test
public void testDedupLockObjects() {
List<HiveLockObj> lockObjs = new ArrayList<HiveLockObj>();
String path1 = "path1";
String path2 = "path2";
HiveLockObjectData lockData1 = new HiveLockObjectData("query1", "1", "IMPLICIT", "drop table table1");
HiveLockObjectData lockData2 = new HiveLockObjectData("query1", "1", "IMPLICIT", "drop table table1");
// Start with the following locks:
// [path1, shared]
// [path1, exclusive]
// [path2, shared]
// [path2, shared]
// [path2, shared]
lockObjs.add(new HiveLockObj(new HiveLockObject(path1, lockData1), HiveLockMode.SHARED));
String name1 = lockObjs.get(lockObjs.size() - 1).getName();
lockObjs.add(new HiveLockObj(new HiveLockObject(path1, lockData1), HiveLockMode.EXCLUSIVE));
lockObjs.add(new HiveLockObj(new HiveLockObject(path2, lockData2), HiveLockMode.SHARED));
String name2 = lockObjs.get(lockObjs.size() - 1).getName();
lockObjs.add(new HiveLockObj(new HiveLockObject(path2, lockData2), HiveLockMode.SHARED));
lockObjs.add(new HiveLockObj(new HiveLockObject(path2, lockData2), HiveLockMode.SHARED));
DummyTxnManager.dedupLockObjects(lockObjs);
// After dedup we should be left with 2 locks:
// [path1, exclusive]
// [path2, shared]
Assert.assertEquals("Locks should be deduped", 2, lockObjs.size());
Comparator<HiveLockObj> cmp = new Comparator<HiveLockObj>() {
@Override
public int compare(HiveLockObj lock1, HiveLockObj lock2) {
return lock1.getName().compareTo(lock2.getName());
}
};
Collections.sort(lockObjs, cmp);
HiveLockObj lockObj = lockObjs.get(0);
Assert.assertEquals(name1, lockObj.getName());
Assert.assertEquals(HiveLockMode.EXCLUSIVE, lockObj.getMode());
lockObj = lockObjs.get(1);
Assert.assertEquals(name2, lockObj.getName());
Assert.assertEquals(HiveLockMode.SHARED, lockObj.getMode());
}
use of org.apache.hadoop.hive.ql.lockmgr.HiveLockObject.HiveLockObjectData in project hive by apache.
the class TestZookeeperLockManager method setup.
@Before
public void setup() {
conf = new HiveConf();
lockObjData = new HiveLockObjectData("1", "10", "SHARED", "show tables");
hiveLock = new HiveLockObject(TABLE, lockObjData);
zLock = new ZooKeeperHiveLock(TABLE_LOCK_PATH, hiveLock, HiveLockMode.SHARED);
while (server == null) {
try {
server = new TestingServer();
CuratorFrameworkFactory.Builder builder = CuratorFrameworkFactory.builder();
client = builder.connectString(server.getConnectString()).retryPolicy(new RetryOneTime(1)).build();
client.start();
} catch (Exception e) {
System.err.println("Getting bind exception - retrying to allocate server");
server = null;
}
}
}
use of org.apache.hadoop.hive.ql.lockmgr.HiveLockObject.HiveLockObjectData in project hive by apache.
the class DDLTask method showLocks.
/**
* Write a list of the current locks to a file.
* @param db
*
* @param showLocks
* the locks we're interested in.
* @return Returns 0 when execution succeeds and above 0 if it fails.
* @throws HiveException
* Throws this exception if an unexpected error occurs.
*/
private int showLocks(Hive db, ShowLocksDesc showLocks) throws HiveException {
Context ctx = driverContext.getCtx();
HiveTxnManager txnManager = ctx.getHiveTxnManager();
HiveLockManager lockMgr = txnManager.getLockManager();
if (txnManager.useNewShowLocksFormat())
return showLocksNewFormat(showLocks, lockMgr);
boolean isExt = showLocks.isExt();
if (lockMgr == null) {
throw new HiveException("show Locks LockManager not specified");
}
// write the results in the file
DataOutputStream outStream = getOutputStream(showLocks.getResFile());
try {
List<HiveLock> locks = null;
if (showLocks.getTableName() == null) {
// TODO should be doing security check here. Users should not be
// able to see each other's locks.
locks = lockMgr.getLocks(false, isExt);
} else {
locks = lockMgr.getLocks(HiveLockObject.createFrom(db, showLocks.getTableName(), showLocks.getPartSpec()), true, isExt);
}
Collections.sort(locks, new Comparator<HiveLock>() {
@Override
public int compare(HiveLock o1, HiveLock o2) {
int cmp = o1.getHiveLockObject().getName().compareTo(o2.getHiveLockObject().getName());
if (cmp == 0) {
if (o1.getHiveLockMode() == o2.getHiveLockMode()) {
return cmp;
}
// EXCLUSIVE locks occur before SHARED locks
if (o1.getHiveLockMode() == HiveLockMode.EXCLUSIVE) {
return -1;
}
return +1;
}
return cmp;
}
});
Iterator<HiveLock> locksIter = locks.iterator();
while (locksIter.hasNext()) {
HiveLock lock = locksIter.next();
outStream.writeBytes(lock.getHiveLockObject().getDisplayName());
outStream.write(separator);
outStream.writeBytes(lock.getHiveLockMode().toString());
if (isExt) {
HiveLockObjectData lockData = lock.getHiveLockObject().getData();
if (lockData != null) {
outStream.write(terminator);
outStream.writeBytes("LOCK_QUERYID:" + lockData.getQueryId());
outStream.write(terminator);
outStream.writeBytes("LOCK_TIME:" + lockData.getLockTime());
outStream.write(terminator);
outStream.writeBytes("LOCK_MODE:" + lockData.getLockMode());
outStream.write(terminator);
outStream.writeBytes("LOCK_QUERYSTRING:" + lockData.getQueryStr());
}
}
outStream.write(terminator);
}
} catch (FileNotFoundException e) {
LOG.warn("show function: " + stringifyException(e));
return 1;
} catch (IOException e) {
LOG.warn("show function: " + stringifyException(e));
return 1;
} catch (Exception e) {
throw new HiveException(e.toString(), e);
} finally {
IOUtils.closeStream(outStream);
}
return 0;
}
use of org.apache.hadoop.hive.ql.lockmgr.HiveLockObject.HiveLockObjectData in project hive by apache.
the class HiveTxnManagerImpl method lockTable.
@Override
public int lockTable(Hive db, LockTableDesc lockTbl) throws HiveException {
HiveLockManager lockMgr = getAndCheckLockManager();
HiveLockMode mode = HiveLockMode.valueOf(lockTbl.getMode());
String tabName = lockTbl.getTableName();
Table tbl = db.getTable(tabName);
if (tbl == null) {
throw new HiveException("Table " + tabName + " does not exist ");
}
Map<String, String> partSpec = lockTbl.getPartSpec();
HiveLockObjectData lockData = new HiveLockObjectData(lockTbl.getQueryId(), String.valueOf(System.currentTimeMillis()), "EXPLICIT", lockTbl.getQueryStr());
if (partSpec == null) {
HiveLock lck = lockMgr.lock(new HiveLockObject(tbl, lockData), mode, true);
if (lck == null) {
return 1;
}
return 0;
}
Partition par = db.getPartition(tbl, partSpec, false);
if (par == null) {
throw new HiveException("Partition " + partSpec + " for table " + tabName + " does not exist");
}
HiveLock lck = lockMgr.lock(new HiveLockObject(par, lockData), mode, true);
if (lck == null) {
return 1;
}
return 0;
}
use of org.apache.hadoop.hive.ql.lockmgr.HiveLockObject.HiveLockObjectData in project hive by apache.
the class ZooKeeperHiveLockManager method lockPrimitive.
private ZooKeeperHiveLock lockPrimitive(HiveLockObject key, HiveLockMode mode, boolean keepAlive, boolean parentCreated, Set<String> conflictingLocks) throws Exception {
String res;
// If the parents have already been created, create the last child only
List<String> names = new ArrayList<String>();
String lastName;
HiveLockObjectData lockData = key.getData();
lockData.setClientIp(clientIp);
if (parentCreated) {
lastName = getLastObjectName(parent, key);
names.add(lastName);
} else {
names = getObjectNames(key);
lastName = names.get(names.size() - 1);
}
// Create the parents first
for (String name : names) {
try {
res = createChild(name, new byte[0], CreateMode.PERSISTENT);
} catch (Exception e) {
if (!(e instanceof KeeperException) || ((KeeperException) e).code() != KeeperException.Code.NODEEXISTS) {
//if the exception is not 'NODEEXISTS', re-throw it
throw e;
}
}
}
res = createChild(getLockName(lastName, mode), key.getData().toString().getBytes(), keepAlive ? CreateMode.PERSISTENT_SEQUENTIAL : CreateMode.EPHEMERAL_SEQUENTIAL);
int seqNo = getSequenceNumber(res, getLockName(lastName, mode));
if (seqNo == -1) {
curatorFramework.delete().forPath(res);
return null;
}
List<String> children = curatorFramework.getChildren().forPath(lastName);
String exLock = getLockName(lastName, HiveLockMode.EXCLUSIVE);
String shLock = getLockName(lastName, HiveLockMode.SHARED);
for (String child : children) {
child = lastName + "/" + child;
// Is there a conflicting lock on the same object with a lower sequence
// number
int childSeq = seqNo;
if (child.startsWith(exLock)) {
childSeq = getSequenceNumber(child, exLock);
}
if ((mode == HiveLockMode.EXCLUSIVE) && child.startsWith(shLock)) {
childSeq = getSequenceNumber(child, shLock);
}
if ((childSeq >= 0) && (childSeq < seqNo)) {
try {
curatorFramework.delete().forPath(res);
} finally {
if (LOG.isDebugEnabled()) {
try {
String data = new String(curatorFramework.getData().forPath(child));
conflictingLocks.add(data);
} catch (Exception e) {
//ignored
}
}
}
return null;
}
}
Metrics metrics = MetricsFactory.getInstance();
if (metrics != null) {
try {
switch(mode) {
case EXCLUSIVE:
metrics.incrementCounter(MetricsConstant.ZOOKEEPER_HIVE_EXCLUSIVELOCKS);
break;
case SEMI_SHARED:
metrics.incrementCounter(MetricsConstant.ZOOKEEPER_HIVE_SEMISHAREDLOCKS);
break;
default:
metrics.incrementCounter(MetricsConstant.ZOOKEEPER_HIVE_SHAREDLOCKS);
break;
}
} catch (Exception e) {
LOG.warn("Error Reporting hive client zookeeper lock operation to Metrics system", e);
}
}
return new ZooKeeperHiveLock(res, key, mode);
}
Aggregations