use of org.apache.hadoop.hive.metastore.api.ShowLocksResponseElement in project hive by apache.
the class TestDbTxnManager2 method testWriteSetTracking8.
/**
* Concurrent updates with partition pruning predicate and w/o one
*/
@Test
public void testWriteSetTracking8() throws Exception {
dropTable(new String[] { "tab1", "TAB1" });
CommandProcessorResponse cpr = driver.run("create table if not exists tab1 (a int, b int) partitioned by (p string) " + "clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')");
checkCmdOnDriver(cpr);
//txnid:1
checkCmdOnDriver(driver.run("insert into tab1 partition(p)(a,b,p) values(1,1,'one'),(2,2,'two')"));
HiveTxnManager txnMgr2 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf);
txnMgr2.openTxn(ctx, "T2");
checkCmdOnDriver(driver.compileAndRespond("update tab1 set b = 7 where b=1"));
txnMgr2.acquireLocks(driver.getPlan(), ctx, "T2");
List<ShowLocksResponseElement> locks = getLocks(txnMgr2);
Assert.assertEquals("Unexpected lock count", 2, locks.size());
checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "TAB1", "p=two", locks);
checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "TAB1", "p=one", locks);
//now start concurrent txn
txnMgr.openTxn(ctx, "T3");
checkCmdOnDriver(driver.compileAndRespond("update tab1 set b = 7 where p='two'"));
((DbTxnManager) txnMgr).acquireLocks(driver.getPlan(), ctx, "T3", false);
locks = getLocks(txnMgr);
Assert.assertEquals("Unexpected lock count", 3, locks.size());
checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "TAB1", "p=two", locks);
checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "TAB1", "p=one", locks);
checkLock(LockType.SHARED_WRITE, LockState.WAITING, "default", "TAB1", "p=two", locks);
//this simulates the completion of txnid:2
AddDynamicPartitions adp = new AddDynamicPartitions(txnMgr2.getCurrentTxnId(), "default", "tab1", Collections.singletonList("p=one"));
adp.setOperationType(DataOperationType.UPDATE);
txnHandler.addDynamicPartitions(adp);
//txnid:2
txnMgr2.commitTxn();
//retest WAITING locks (both have same ext id)
((DbLockManager) txnMgr.getLockManager()).checkLock(locks.get(2).getLockid());
locks = getLocks(txnMgr);
Assert.assertEquals("Unexpected lock count", 1, locks.size());
checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "TAB1", "p=two", locks);
//completion of txnid:3
adp = new AddDynamicPartitions(txnMgr.getCurrentTxnId(), "default", "tab1", Collections.singletonList("p=two"));
adp.setOperationType(DataOperationType.UPDATE);
txnHandler.addDynamicPartitions(adp);
//txnid:3
txnMgr.commitTxn();
Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString("select * from WRITE_SET"), 1, TxnDbUtil.countQueryAgent("select count(*) from WRITE_SET where ws_partition='p=one' and ws_operation_type='u' and ws_table='tab1'"));
Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString("select * from WRITE_SET"), 1, TxnDbUtil.countQueryAgent("select count(*) from WRITE_SET where ws_partition='p=two' and ws_operation_type='u' and ws_table='tab1'"));
Assert.assertEquals("COMPLETED_TXN_COMPONENTS mismatch: " + TxnDbUtil.queryToString("select * from COMPLETED_TXN_COMPONENTS"), 4, TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_TXN_COMPONENTS where ctc_table='tab1' and ctc_partition is not null"));
}
use of org.apache.hadoop.hive.metastore.api.ShowLocksResponseElement in project hive by apache.
the class TestDbTxnManager2 method testShowLocksFilterOptions.
@Test
public void testShowLocksFilterOptions() throws Exception {
CommandProcessorResponse cpr = driver.run("drop table if exists db1.t14");
checkCmdOnDriver(cpr);
// Note that db1 and db2 have a table with common name
cpr = driver.run("drop table if exists db2.t14");
checkCmdOnDriver(cpr);
cpr = driver.run("drop table if exists db2.t15");
checkCmdOnDriver(cpr);
cpr = driver.run("drop table if exists db2.t16");
checkCmdOnDriver(cpr);
cpr = driver.run("drop database if exists db1");
checkCmdOnDriver(cpr);
cpr = driver.run("drop database if exists db2");
checkCmdOnDriver(cpr);
cpr = driver.run("create database if not exists db1");
checkCmdOnDriver(cpr);
cpr = driver.run("create database if not exists db2");
checkCmdOnDriver(cpr);
cpr = driver.run("create table if not exists db1.t14 (a int, b int) partitioned by (ds string) clustered by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')");
checkCmdOnDriver(cpr);
cpr = driver.run("create table if not exists db2.t14 (a int, b int) clustered by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')");
checkCmdOnDriver(cpr);
cpr = driver.run("create table if not exists db2.t15 (a int, b int) clustered by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')");
checkCmdOnDriver(cpr);
cpr = driver.run("create table if not exists db2.t16 (a int, b int) clustered by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')");
checkCmdOnDriver(cpr);
// Acquire different locks at different levels
cpr = driver.compileAndRespond("insert into table db1.t14 partition (ds='today') values (1, 2)");
checkCmdOnDriver(cpr);
txnMgr.acquireLocks(driver.getPlan(), ctx, "Tom");
HiveTxnManager txnMgr2 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf);
cpr = driver.compileAndRespond("insert into table db1.t14 partition (ds='tomorrow') values (3, 4)");
checkCmdOnDriver(cpr);
txnMgr2.acquireLocks(driver.getPlan(), ctx, "Jerry");
HiveTxnManager txnMgr3 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf);
cpr = driver.compileAndRespond("select * from db2.t15");
checkCmdOnDriver(cpr);
txnMgr3.acquireLocks(driver.getPlan(), ctx, "Donald");
HiveTxnManager txnMgr4 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf);
cpr = driver.compileAndRespond("select * from db2.t16");
checkCmdOnDriver(cpr);
txnMgr4.acquireLocks(driver.getPlan(), ctx, "Hillary");
HiveTxnManager txnMgr5 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf);
cpr = driver.compileAndRespond("select * from db2.t14");
checkCmdOnDriver(cpr);
txnMgr5.acquireLocks(driver.getPlan(), ctx, "Obama");
// Simulate SHOW LOCKS with different filter options
// SHOW LOCKS (no filter)
List<ShowLocksResponseElement> locks = getLocks();
Assert.assertEquals("Unexpected lock count", 5, locks.size());
checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db1", "t14", "ds=today", locks);
checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db1", "t14", "ds=tomorrow", locks);
checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db2", "t15", null, locks);
checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db2", "t16", null, locks);
checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db2", "t14", null, locks);
// SHOW LOCKS db2
locks = getLocksWithFilterOptions(txnMgr3, "db2", null, null);
Assert.assertEquals("Unexpected lock count", 3, locks.size());
checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db2", "t15", null, locks);
checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db2", "t16", null, locks);
checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db2", "t14", null, locks);
// SHOW LOCKS t14
cpr = driver.run("use db1");
checkCmdOnDriver(cpr);
locks = getLocksWithFilterOptions(txnMgr, null, "t14", null);
Assert.assertEquals("Unexpected lock count", 2, locks.size());
checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db1", "t14", "ds=today", locks);
checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db1", "t14", "ds=tomorrow", locks);
// Note that it shouldn't show t14 from db2
// SHOW LOCKS t14 PARTITION ds='today'
Map<String, String> partSpec = new HashMap<String, String>();
partSpec.put("ds", "today");
locks = getLocksWithFilterOptions(txnMgr, null, "t14", partSpec);
Assert.assertEquals("Unexpected lock count", 1, locks.size());
checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db1", "t14", "ds=today", locks);
// SHOW LOCKS t15
cpr = driver.run("use db2");
checkCmdOnDriver(cpr);
locks = getLocksWithFilterOptions(txnMgr3, null, "t15", null);
Assert.assertEquals("Unexpected lock count", 1, locks.size());
checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "db2", "t15", null, locks);
}
use of org.apache.hadoop.hive.metastore.api.ShowLocksResponseElement in project hive by apache.
the class TestDbTxnManager2 method createTable.
@Test
public void createTable() throws Exception {
dropTable(new String[] { "T" });
CommandProcessorResponse cpr = driver.compileAndRespond("create table if not exists T (a int, b int)");
checkCmdOnDriver(cpr);
txnMgr.acquireLocks(driver.getPlan(), ctx, "Fifer");
List<ShowLocksResponseElement> locks = getLocks();
Assert.assertEquals("Unexpected lock count", 1, locks.size());
checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", null, null, locks);
txnMgr.getLockManager().releaseLocks(ctx.getHiveLocks());
Assert.assertEquals("Lock remained", 0, getLocks().size());
}
use of org.apache.hadoop.hive.metastore.api.ShowLocksResponseElement in project hive by apache.
the class TestDbTxnManager2 method testMergePartitioned.
/**
* "run" an Update and Merge concurrently; Check that correct locks are acquired.
* Check state of auxiliary ACID tables.
* @param causeConflict - true to make the operations cause a Write conflict
* @throws Exception
*/
private void testMergePartitioned(boolean causeConflict) throws Exception {
dropTable(new String[] { "target", "source" });
checkCmdOnDriver(driver.run("create table target (a int, b int) " + "partitioned by (p int, q int) clustered by (a) into 2 buckets " + "stored as orc TBLPROPERTIES ('transactional'='true')"));
checkCmdOnDriver(driver.run("insert into target partition(p,q) values (1,2,1,2), (3,4,1,2), (5,6,1,3), (7,8,2,2)"));
checkCmdOnDriver(driver.run("create table source (a1 int, b1 int, p1 int, q1 int)"));
long txnId1 = txnMgr.openTxn(ctx, "T1");
checkCmdOnDriver(driver.compileAndRespond("update target set b = 2 where p=1"));
txnMgr.acquireLocks(driver.getPlan(), ctx, "T1");
List<ShowLocksResponseElement> locks = getLocks(txnMgr);
Assert.assertEquals("Unexpected lock count", 2, locks.size());
checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "target", "p=1/q=2", locks);
checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "target", "p=1/q=3", locks);
DbTxnManager txnMgr2 = (DbTxnManager) TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf);
//start a 2nd (overlapping) txn
long txnid2 = txnMgr2.openTxn(ctx, "T2");
checkCmdOnDriver(driver.compileAndRespond("merge into target using source " + "on target.p=source.p1 and target.a=source.a1 " + "when matched then update set b = 11 " + "when not matched then insert values(a1,b1,p1,q1)"));
txnMgr2.acquireLocks(driver.getPlan(), ctx, "T2", false);
locks = getLocks(txnMgr);
Assert.assertEquals("Unexpected lock count", 7, locks.size());
/**
* W locks from T1 are still there, so all locks from T2 block.
* The Update part of Merge requests W locks for each existing partition in target.
* The Insert part doesn't know which partitions may be written to: thus R lock on target table.
* */
checkLock(LockType.SHARED_READ, LockState.WAITING, "default", "source", null, locks);
long extLockId = checkLock(LockType.SHARED_READ, LockState.WAITING, "default", "target", null, locks).getLockid();
checkLock(LockType.SHARED_WRITE, LockState.WAITING, "default", "target", "p=1/q=2", locks);
checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "target", "p=1/q=2", locks);
checkLock(LockType.SHARED_WRITE, LockState.WAITING, "default", "target", "p=1/q=3", locks);
checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "target", "p=1/q=3", locks);
checkLock(LockType.SHARED_WRITE, LockState.WAITING, "default", "target", "p=2/q=2", locks);
Assert.assertEquals("TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnId1) + "): " + TxnDbUtil.queryToString("select * from TXN_COMPONENTS"), //because it's using a DP write
0, TxnDbUtil.countQueryAgent("select count(*) from TXN_COMPONENTS where tc_txnid=" + txnId1));
//complete T1 transaction (simulate writing to 2 partitions)
AddDynamicPartitions adp = new AddDynamicPartitions(txnId1, "default", "target", Arrays.asList("p=1/q=2", "p=1/q=3"));
adp.setOperationType(DataOperationType.UPDATE);
txnHandler.addDynamicPartitions(adp);
Assert.assertEquals("TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnId1) + "): " + TxnDbUtil.queryToString("select * from TXN_COMPONENTS"), 2, TxnDbUtil.countQueryAgent("select count(*) from TXN_COMPONENTS where tc_txnid=" + txnId1 + " and tc_operation_type='u'"));
//commit T1
txnMgr.commitTxn();
Assert.assertEquals("WRITE_SET mismatch(" + JavaUtils.txnIdToString(txnId1) + "): " + TxnDbUtil.queryToString("select * from WRITE_SET"), //2 partitions updated
2, TxnDbUtil.countQueryAgent("select count(*) from WRITE_SET where ws_txnid=" + txnId1 + " and ws_operation_type='u'"));
//re-check locks which were in Waiting state - should now be Acquired
((DbLockManager) txnMgr2.getLockManager()).checkLock(extLockId);
locks = getLocks(txnMgr);
Assert.assertEquals("Unexpected lock count", 5, locks.size());
checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "source", null, locks);
checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "target", null, locks);
checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "target", "p=1/q=2", locks);
checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "target", "p=1/q=3", locks);
checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "target", "p=2/q=2", locks);
Assert.assertEquals("TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnid2) + "): " + TxnDbUtil.queryToString("select * from TXN_COMPONENTS"), //because it's using a DP write
0, TxnDbUtil.countQueryAgent("select count(*) from TXN_COMPONENTS where tc_txnid=" + txnid2));
//complete T2 txn
//simulate Insert into 2 partitions
adp = new AddDynamicPartitions(txnid2, "default", "target", Arrays.asList("p=1/q=2", "p=1/q=3"));
adp.setOperationType(DataOperationType.INSERT);
txnHandler.addDynamicPartitions(adp);
Assert.assertEquals("TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnid2) + "): " + TxnDbUtil.queryToString("select * from TXN_COMPONENTS"), 2, TxnDbUtil.countQueryAgent("select count(*) from TXN_COMPONENTS where tc_txnid=" + txnid2 + " and tc_operation_type='i'"));
//simulate Update of 1 partitions; depending on causeConflict, choose one of the partitions
//which was modified by the T1 update stmt or choose a non-conflicting one
adp = new AddDynamicPartitions(txnid2, "default", "target", Collections.singletonList(causeConflict ? "p=1/q=2" : "p=1/q=1"));
adp.setOperationType(DataOperationType.UPDATE);
txnHandler.addDynamicPartitions(adp);
Assert.assertEquals("TXN_COMPONENTS mismatch(" + JavaUtils.txnIdToString(txnid2) + "): " + TxnDbUtil.queryToString("select * from TXN_COMPONENTS"), 1, TxnDbUtil.countQueryAgent("select count(*) from TXN_COMPONENTS where tc_txnid=" + txnid2 + " and tc_operation_type='u'"));
LockException expectedException = null;
try {
txnMgr2.commitTxn();
} catch (LockException e) {
expectedException = e;
}
if (causeConflict) {
Assert.assertTrue("Didn't get exception", expectedException != null);
Assert.assertEquals("Got wrong message code", ErrorMsg.TXN_ABORTED, expectedException.getCanonicalErrorMsg());
Assert.assertEquals("Exception msg didn't match", "Aborting [txnid:3,3] due to a write conflict on default/target/p=1/q=2 committed by [txnid:2,3] u/u", expectedException.getCause().getMessage());
} else {
Assert.assertEquals("WRITE_SET mismatch(" + JavaUtils.txnIdToString(txnid2) + "): " + TxnDbUtil.queryToString("select * from WRITE_SET"), //1 partitions updated
1, TxnDbUtil.countQueryAgent("select count(*) from WRITE_SET where ws_txnid=" + txnid2 + " and ws_operation_type='u'"));
Assert.assertEquals("WRITE_SET mismatch(" + JavaUtils.txnIdToString(txnid2) + "): " + TxnDbUtil.queryToString("select * from WRITE_SET"), //1 partitions updated (and no other entries)
1, TxnDbUtil.countQueryAgent("select count(*) from WRITE_SET where ws_txnid=" + txnid2));
}
}
use of org.apache.hadoop.hive.metastore.api.ShowLocksResponseElement in project hive by apache.
the class TestDbTxnManager2 method testLockRetryLimit.
@Test
public void testLockRetryLimit() throws Exception {
dropTable(new String[] { "T9" });
conf.setIntVar(HiveConf.ConfVars.HIVE_LOCK_NUMRETRIES, 2);
conf.setBoolVar(HiveConf.ConfVars.TXN_MGR_DUMP_LOCK_STATE_ON_ACQUIRE_TIMEOUT, true);
HiveTxnManager otherTxnMgr = new DbTxnManager();
((DbTxnManager) otherTxnMgr).setHiveConf(conf);
CommandProcessorResponse cpr = driver.run("create table T9(a int)");
checkCmdOnDriver(cpr);
cpr = driver.compileAndRespond("select * from T9");
checkCmdOnDriver(cpr);
txnMgr.acquireLocks(driver.getPlan(), ctx, "Vincent Vega");
List<ShowLocksResponseElement> locks = getLocks(txnMgr);
Assert.assertEquals("Unexpected lock count", 1, locks.size());
checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "T9", null, locks);
cpr = driver.compileAndRespond("drop table T9");
checkCmdOnDriver(cpr);
try {
otherTxnMgr.acquireLocks(driver.getPlan(), ctx, "Winston Winnfield");
} catch (LockException ex) {
Assert.assertEquals("Got wrong lock exception", ErrorMsg.LOCK_ACQUIRE_TIMEDOUT, ex.getCanonicalErrorMsg());
}
locks = getLocks(txnMgr);
Assert.assertEquals("Unexpected lock count", 1, locks.size());
checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "T9", null, locks);
otherTxnMgr.closeTxnManager();
}
Aggregations