Search in sources :

Example 66 with CommandProcessorResponse

use of org.apache.hadoop.hive.ql.processors.CommandProcessorResponse in project hive by apache.

the class TestDbTxnManager2 method basicBlocking.

@Test
public void basicBlocking() throws Exception {
    dropTable(new String[] { "T6" });
    CommandProcessorResponse cpr = driver.run("create table if not exists T6(a int)");
    checkCmdOnDriver(cpr);
    cpr = driver.compileAndRespond("select a from T6");
    checkCmdOnDriver(cpr);
    // gets S lock on T6
    txnMgr.acquireLocks(driver.getPlan(), ctx, "Fifer");
    List<HiveLock> selectLocks = ctx.getHiveLocks();
    HiveTxnManager txnMgr2 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf);
    swapTxnManager(txnMgr2);
    cpr = driver.compileAndRespond("drop table if exists T6");
    checkCmdOnDriver(cpr);
    // tries to get X lock on T1 and gets Waiting state
    LockState lockState = ((DbTxnManager) txnMgr2).acquireLocks(driver.getPlan(), ctx, "Fiddler", false);
    List<ShowLocksResponseElement> locks = getLocks();
    Assert.assertEquals("Unexpected lock count", 2, locks.size());
    checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "T6", null, locks);
    checkLock(LockType.EXCLUSIVE, LockState.WAITING, "default", "T6", null, locks);
    // release S on T6
    txnMgr.rollbackTxn();
    // attempt to X on T6 again - succeed
    lockState = ((DbLockManager) txnMgr.getLockManager()).checkLock(locks.get(1).getLockid());
    locks = getLocks();
    Assert.assertEquals("Unexpected lock count", 1, locks.size());
    checkLock(LockType.EXCLUSIVE, LockState.ACQUIRED, "default", "T6", null, locks);
    txnMgr2.rollbackTxn();
    cpr = driver.run("drop table if exists T6");
    locks = getLocks();
    Assert.assertEquals("Unexpected number of locks found", 0, locks.size());
    checkCmdOnDriver(cpr);
}
Also used : CommandProcessorResponse(org.apache.hadoop.hive.ql.processors.CommandProcessorResponse) LockState(org.apache.hadoop.hive.metastore.api.LockState) ShowLocksResponseElement(org.apache.hadoop.hive.metastore.api.ShowLocksResponseElement) Test(org.junit.Test)

Example 67 with CommandProcessorResponse

use of org.apache.hadoop.hive.ql.processors.CommandProcessorResponse in project hive by apache.

the class TestDbTxnManager2 method testMetastoreTablesCleanup.

/**
 * Normally the compaction process will clean up records in TXN_COMPONENTS, COMPLETED_TXN_COMPONENTS,
 * COMPACTION_QUEUE and COMPLETED_COMPACTIONS. But if a table/partition has been dropped before
 * compaction and there are still relevant records in those metastore tables, the Initiator will
 * complain about not being able to find the table/partition. This method is to test and make sure
 * we clean up relevant records as soon as a table/partition is dropped.
 *
 * Note, here we don't need to worry about cleaning up TXNS table, since it's handled separately.
 * @throws Exception
 */
@Test
public void testMetastoreTablesCleanup() throws Exception {
    dropTable(new String[] { "temp.T10", "temp.T11", "temp.T12p", "temp.T13p" });
    CommandProcessorResponse cpr = driver.run("create database if not exists temp");
    checkCmdOnDriver(cpr);
    // Create some ACID tables: T10, T11 - unpartitioned table, T12p, T13p - partitioned table
    cpr = driver.run("create table temp.T10 (a int, b int) clustered by(b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')");
    checkCmdOnDriver(cpr);
    cpr = driver.run("create table temp.T11 (a int, b int) clustered by(b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')");
    checkCmdOnDriver(cpr);
    cpr = driver.run("create table temp.T12p (a int, b int) partitioned by (ds string, hour string) clustered by(b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')");
    checkCmdOnDriver(cpr);
    cpr = driver.run("create table temp.T13p (a int, b int) partitioned by (ds string, hour string) clustered by(b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')");
    checkCmdOnDriver(cpr);
    // Successfully insert some data into ACID tables, so that we have records in COMPLETED_TXN_COMPONENTS
    cpr = driver.run("insert into temp.T10 values (1, 1)");
    checkCmdOnDriver(cpr);
    cpr = driver.run("insert into temp.T10 values (2, 2)");
    checkCmdOnDriver(cpr);
    cpr = driver.run("insert into temp.T11 values (3, 3)");
    checkCmdOnDriver(cpr);
    cpr = driver.run("insert into temp.T11 values (4, 4)");
    checkCmdOnDriver(cpr);
    cpr = driver.run("insert into temp.T12p partition (ds='today', hour='1') values (5, 5)");
    checkCmdOnDriver(cpr);
    cpr = driver.run("insert into temp.T12p partition (ds='tomorrow', hour='2') values (6, 6)");
    checkCmdOnDriver(cpr);
    cpr = driver.run("insert into temp.T13p partition (ds='today', hour='1') values (7, 7)");
    checkCmdOnDriver(cpr);
    cpr = driver.run("insert into temp.T13p partition (ds='tomorrow', hour='2') values (8, 8)");
    checkCmdOnDriver(cpr);
    int count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where CTC_DATABASE='temp' and CTC_TABLE in ('t10', 't11')");
    Assert.assertEquals(4, count);
    count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where CTC_DATABASE='temp' and CTC_TABLE in ('t12p', 't13p')");
    Assert.assertEquals(4, count);
    // Fail some inserts, so that we have records in TXN_COMPONENTS
    conf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, true);
    cpr = driver.run("insert into temp.T10 values (9, 9)");
    checkCmdOnDriver(cpr);
    cpr = driver.run("insert into temp.T11 values (10, 10)");
    checkCmdOnDriver(cpr);
    cpr = driver.run("insert into temp.T12p partition (ds='today', hour='1') values (11, 11)");
    checkCmdOnDriver(cpr);
    cpr = driver.run("insert into temp.T13p partition (ds='today', hour='1') values (12, 12)");
    checkCmdOnDriver(cpr);
    count = TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where TC_DATABASE='temp' and TC_TABLE in ('t10', 't11', 't12p', 't13p')");
    Assert.assertEquals(4, count);
    conf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, false);
    // Drop a table/partition; corresponding records in TXN_COMPONENTS and COMPLETED_TXN_COMPONENTS should disappear
    count = TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where TC_DATABASE='temp' and TC_TABLE='t10'");
    Assert.assertEquals(1, count);
    count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where CTC_DATABASE='temp' and CTC_TABLE='t10'");
    Assert.assertEquals(2, count);
    cpr = driver.run("drop table temp.T10");
    checkCmdOnDriver(cpr);
    count = TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where TC_DATABASE='temp' and TC_TABLE='t10'");
    Assert.assertEquals(0, count);
    count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where CTC_DATABASE='temp' and CTC_TABLE='t10'");
    Assert.assertEquals(0, count);
    count = TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where TC_DATABASE='temp' and TC_TABLE='t12p' and TC_PARTITION='ds=today/hour=1'");
    Assert.assertEquals(1, count);
    count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where CTC_DATABASE='temp' and CTC_TABLE='t12p' and CTC_PARTITION='ds=today/hour=1'");
    Assert.assertEquals(1, count);
    cpr = driver.run("alter table temp.T12p drop partition (ds='today', hour='1')");
    checkCmdOnDriver(cpr);
    count = TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where TC_DATABASE='temp' and TC_TABLE='t12p' and TC_PARTITION='ds=today/hour=1'");
    Assert.assertEquals(0, count);
    count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where CTC_DATABASE='temp' and CTC_TABLE='t12p' and CTC_PARTITION='ds=today/hour=1'");
    Assert.assertEquals(0, count);
    // Successfully perform compaction on a table/partition, so that we have successful records in COMPLETED_COMPACTIONS
    cpr = driver.run("alter table temp.T11 compact 'minor'");
    checkCmdOnDriver(cpr);
    count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t11' and CQ_STATE='i' and CQ_TYPE='i'");
    Assert.assertEquals(1, count);
    org.apache.hadoop.hive.ql.TestTxnCommands2.runWorker(conf);
    count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t11' and CQ_STATE='r' and CQ_TYPE='i'");
    Assert.assertEquals(1, count);
    org.apache.hadoop.hive.ql.TestTxnCommands2.runCleaner(conf);
    count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t11'");
    Assert.assertEquals(0, count);
    count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_COMPACTIONS where CC_DATABASE='temp' and CC_TABLE='t11' and CC_STATE='s' and CC_TYPE='i'");
    Assert.assertEquals(1, count);
    cpr = driver.run("alter table temp.T12p partition (ds='tomorrow', hour='2') compact 'minor'");
    checkCmdOnDriver(cpr);
    count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t12p' and CQ_PARTITION='ds=tomorrow/hour=2' and CQ_STATE='i' and CQ_TYPE='i'");
    Assert.assertEquals(1, count);
    org.apache.hadoop.hive.ql.TestTxnCommands2.runWorker(conf);
    count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t12p' and CQ_PARTITION='ds=tomorrow/hour=2' and CQ_STATE='r' and CQ_TYPE='i'");
    Assert.assertEquals(1, count);
    org.apache.hadoop.hive.ql.TestTxnCommands2.runCleaner(conf);
    count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t12p'");
    Assert.assertEquals(0, count);
    count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_COMPACTIONS where CC_DATABASE='temp' and CC_TABLE='t12p' and CC_STATE='s' and CC_TYPE='i'");
    Assert.assertEquals(1, count);
    // Fail compaction, so that we have failed records in COMPLETED_COMPACTIONS
    conf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILCOMPACTION, true);
    cpr = driver.run("alter table temp.T11 compact 'major'");
    checkCmdOnDriver(cpr);
    count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t11' and CQ_STATE='i' and CQ_TYPE='a'");
    Assert.assertEquals(1, count);
    // will fail
    org.apache.hadoop.hive.ql.TestTxnCommands2.runWorker(conf);
    count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t11' and CQ_STATE='i' and CQ_TYPE='a'");
    Assert.assertEquals(0, count);
    count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_COMPACTIONS where CC_DATABASE='temp' and CC_TABLE='t11' and CC_STATE='f' and CC_TYPE='a'");
    Assert.assertEquals(1, count);
    cpr = driver.run("alter table temp.T12p partition (ds='tomorrow', hour='2') compact 'major'");
    checkCmdOnDriver(cpr);
    count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t12p' and CQ_PARTITION='ds=tomorrow/hour=2' and CQ_STATE='i' and CQ_TYPE='a'");
    Assert.assertEquals(1, count);
    // will fail
    org.apache.hadoop.hive.ql.TestTxnCommands2.runWorker(conf);
    count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t12p' and CQ_PARTITION='ds=tomorrow/hour=2' and CQ_STATE='i' and CQ_TYPE='a'");
    Assert.assertEquals(0, count);
    count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_COMPACTIONS where CC_DATABASE='temp' and CC_TABLE='t12p' and CC_STATE='f' and CC_TYPE='a'");
    Assert.assertEquals(1, count);
    conf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILCOMPACTION, false);
    // Put 2 records into COMPACTION_QUEUE and do nothing
    cpr = driver.run("alter table temp.T11 compact 'major'");
    checkCmdOnDriver(cpr);
    count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t11' and CQ_STATE='i' and CQ_TYPE='a'");
    Assert.assertEquals(1, count);
    cpr = driver.run("alter table temp.T12p partition (ds='tomorrow', hour='2') compact 'major'");
    checkCmdOnDriver(cpr);
    count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t12p' and CQ_PARTITION='ds=tomorrow/hour=2' and CQ_STATE='i' and CQ_TYPE='a'");
    Assert.assertEquals(1, count);
    // Drop a table/partition, corresponding records in COMPACTION_QUEUE and COMPLETED_COMPACTIONS should disappear
    cpr = driver.run("drop table temp.T11");
    checkCmdOnDriver(cpr);
    count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t11'");
    Assert.assertEquals(0, count);
    count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_COMPACTIONS where CC_DATABASE='temp' and CC_TABLE='t11'");
    Assert.assertEquals(0, count);
    cpr = driver.run("alter table temp.T12p drop partition (ds='tomorrow', hour='2')");
    checkCmdOnDriver(cpr);
    count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t12p'");
    Assert.assertEquals(0, count);
    count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_COMPACTIONS where CC_DATABASE='temp' and CC_TABLE='t12p'");
    Assert.assertEquals(0, count);
    // Put 1 record into COMPACTION_QUEUE and do nothing
    cpr = driver.run("alter table temp.T13p partition (ds='today', hour='1') compact 'major'");
    checkCmdOnDriver(cpr);
    count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t13p' and CQ_STATE='i' and CQ_TYPE='a'");
    Assert.assertEquals(1, count);
    // Drop database, everything in all 4 meta tables should disappear
    count = TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where TC_DATABASE='temp' and TC_TABLE in ('t10', 't11', 't12p', 't13p')");
    Assert.assertEquals(1, count);
    count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where CTC_DATABASE='temp' and CTC_TABLE in ('t10', 't11', 't12p', 't13p')");
    Assert.assertEquals(2, count);
    count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE in ('t10', 't11', 't12p', 't13p')");
    Assert.assertEquals(1, count);
    count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_COMPACTIONS where CC_DATABASE='temp' and CC_TABLE in ('t10', 't11', 't12p', 't13p')");
    Assert.assertEquals(0, count);
    cpr = driver.run("drop database if exists temp cascade");
    checkCmdOnDriver(cpr);
    count = TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where TC_DATABASE='temp' and TC_TABLE in ('t10', 't11', 't12p', 't13p')");
    Assert.assertEquals(0, count);
    count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where CTC_DATABASE='temp' and CTC_TABLE in ('t10', 't11', 't12p', 't13p')");
    Assert.assertEquals(0, count);
    count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE in ('t10', 't11', 't12p', 't13p')");
    Assert.assertEquals(0, count);
    count = TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_COMPACTIONS where CC_DATABASE='temp' and CC_TABLE in ('t10', 't11', 't12p', 't13p')");
    Assert.assertEquals(0, count);
}
Also used : CommandProcessorResponse(org.apache.hadoop.hive.ql.processors.CommandProcessorResponse) Test(org.junit.Test)

Example 68 with CommandProcessorResponse

use of org.apache.hadoop.hive.ql.processors.CommandProcessorResponse in project hive by apache.

the class TestDbTxnManager2 method createTable.

@Test
public void createTable() throws Exception {
    dropTable(new String[] { "T" });
    CommandProcessorResponse cpr = driver.compileAndRespond("create table if not exists T (a int, b int)");
    checkCmdOnDriver(cpr);
    txnMgr.acquireLocks(driver.getPlan(), ctx, "Fifer");
    List<ShowLocksResponseElement> locks = getLocks();
    Assert.assertEquals("Unexpected lock count", 1, locks.size());
    checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", null, null, locks);
    txnMgr.commitTxn();
    Assert.assertEquals("Lock remained", 0, getLocks().size());
}
Also used : CommandProcessorResponse(org.apache.hadoop.hive.ql.processors.CommandProcessorResponse) ShowLocksResponseElement(org.apache.hadoop.hive.metastore.api.ShowLocksResponseElement) Test(org.junit.Test)

Example 69 with CommandProcessorResponse

use of org.apache.hadoop.hive.ql.processors.CommandProcessorResponse in project hive by apache.

the class TestDbTxnManager2 method testFairness2.

/**
 * T7 is a table with 2 partitions
 * 1. run select from T7
 * 2. run drop partition from T7
 * concurrently with 1 starting first so that 2 blocks
 * 3. start another concurrent select on T7 - it should block behind waiting X (from drop) - LM should be fair
 * 4. finish #1 so that drop unblocks
 * 5. rollback the drop to release its X lock
 * 6. # should unblock
 */
@Test
public void testFairness2() throws Exception {
    dropTable(new String[] { "T7" });
    CommandProcessorResponse cpr = driver.run("create table if not exists T7 (a int) " + "partitioned by (p int) stored as orc TBLPROPERTIES ('transactional'='true')");
    checkCmdOnDriver(cpr);
    checkCmdOnDriver(driver.run(// create 2 partitions
    "insert into T7 partition(p) values(1,1),(1,2)"));
    cpr = driver.compileAndRespond("select a from T7 ");
    checkCmdOnDriver(cpr);
    // gets S lock on T7
    txnMgr.acquireLocks(driver.getPlan(), ctx, "Fifer");
    HiveTxnManager txnMgr2 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf);
    swapTxnManager(txnMgr2);
    cpr = driver.compileAndRespond("alter table T7 drop partition (p=1)");
    checkCmdOnDriver(cpr);
    // tries to get X lock on T7.p=1 and gets Waiting state
    LockState lockState = ((DbTxnManager) txnMgr2).acquireLocks(driver.getPlan(), ctx, "Fiddler", false);
    List<ShowLocksResponseElement> locks = getLocks();
    Assert.assertEquals("Unexpected lock count", 4, locks.size());
    checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "T7", null, locks);
    checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "T7", "p=1", locks);
    checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "T7", "p=2", locks);
    checkLock(LockType.EXCLUSIVE, LockState.WAITING, "default", "T7", "p=1", locks);
    HiveTxnManager txnMgr3 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf);
    swapTxnManager(txnMgr3);
    // this should block behind the X lock on  T7.p=1
    cpr = driver.compileAndRespond("select a from T7");
    checkCmdOnDriver(cpr);
    // tries to get S lock on T7, S on T7.p=1 and S on T7.p=2
    ((DbTxnManager) txnMgr3).acquireLocks(driver.getPlan(), ctx, "Fifer", false);
    locks = getLocks();
    Assert.assertEquals("Unexpected lock count", 7, locks.size());
    checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "T7", null, locks);
    checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "T7", "p=1", locks);
    checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "T7", "p=2", locks);
    checkLock(LockType.SHARED_READ, LockState.WAITING, "default", "T7", null, locks);
    checkLock(LockType.SHARED_READ, LockState.WAITING, "default", "T7", "p=1", locks);
    checkLock(LockType.SHARED_READ, LockState.WAITING, "default", "T7", "p=2", locks);
    checkLock(LockType.EXCLUSIVE, LockState.WAITING, "default", "T7", "p=1", locks);
    // release locks from "select a from T7" - to unblock hte drop partition
    txnMgr.commitTxn();
    // retest the the "drop partiton" X lock
    lockState = ((DbLockManager) txnMgr2.getLockManager()).checkLock(locks.get(6).getLockid());
    locks = getLocks();
    Assert.assertEquals("Unexpected lock count", 4, locks.size());
    checkLock(LockType.EXCLUSIVE, LockState.ACQUIRED, "default", "T7", "p=1", locks);
    checkLock(LockType.SHARED_READ, LockState.WAITING, "default", "T7", null, locks);
    checkLock(LockType.SHARED_READ, LockState.WAITING, "default", "T7", "p=1", locks);
    checkLock(LockType.SHARED_READ, LockState.WAITING, "default", "T7", "p=2", locks);
    // release the X lock on T7.p=1
    txnMgr2.rollbackTxn();
    // re-test the locks
    // S lock on T7
    lockState = ((DbLockManager) txnMgr2.getLockManager()).checkLock(locks.get(1).getLockid());
    locks = getLocks();
    Assert.assertEquals("Unexpected lock count", 3, locks.size());
    checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "T7", null, locks);
    checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "T7", "p=1", locks);
    checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "T7", "p=2", locks);
}
Also used : CommandProcessorResponse(org.apache.hadoop.hive.ql.processors.CommandProcessorResponse) LockState(org.apache.hadoop.hive.metastore.api.LockState) ShowLocksResponseElement(org.apache.hadoop.hive.metastore.api.ShowLocksResponseElement) Test(org.junit.Test)

Example 70 with CommandProcessorResponse

use of org.apache.hadoop.hive.ql.processors.CommandProcessorResponse in project hive by apache.

the class TestDbTxnManager2 method testWriteSetTracking9.

/**
 * Concurrent update/delete of different partitions - should pass
 */
@Test
public void testWriteSetTracking9() throws Exception {
    dropTable(new String[] { "TAB1" });
    CommandProcessorResponse cpr = driver.run("create table if not exists tab1 (a int, b int) partitioned by (p string) " + "clustered by (a) into 2  buckets stored as orc TBLPROPERTIES ('transactional'='true')");
    checkCmdOnDriver(cpr);
    checkCmdOnDriver(driver.run("insert into tab1 partition(p)(a,b,p) values(1,1,'one'),(2,2,'two')"));
    HiveTxnManager txnMgr2 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf);
    swapTxnManager(txnMgr2);
    checkCmdOnDriver(driver.compileAndRespond("update tab1 set b = 7 where b=1"));
    long idTxnUpdate1 = txnMgr2.getCurrentTxnId();
    txnMgr2.acquireLocks(driver.getPlan(), ctx, "T2");
    List<ShowLocksResponseElement> locks = getLocks(txnMgr2);
    Assert.assertEquals("Unexpected lock count", 2, locks.size());
    checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "TAB1", "p=two", locks);
    checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "TAB1", "p=one", locks);
    // now start concurrent txn
    swapTxnManager(txnMgr);
    checkCmdOnDriver(driver.compileAndRespond("delete from tab1 where p='two' and b=2"));
    long idTxnDelete1 = txnMgr.getCurrentTxnId();
    ((DbTxnManager) txnMgr).acquireLocks(driver.getPlan(), ctx, "T3", false);
    locks = getLocks(txnMgr);
    Assert.assertEquals("Unexpected lock count", 3, locks.size());
    checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "TAB1", "p=two", locks);
    checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "TAB1", "p=one", locks);
    checkLock(LockType.SHARED_WRITE, LockState.WAITING, "default", "TAB1", "p=two", locks);
    // this simulates the completion of txnid:idTxnUpdate1
    long writeId = txnMgr2.getTableWriteId("default", "tab1");
    AddDynamicPartitions adp = new AddDynamicPartitions(txnMgr2.getCurrentTxnId(), writeId, "default", "tab1", Collections.singletonList("p=one"));
    adp.setOperationType(DataOperationType.UPDATE);
    txnHandler.addDynamicPartitions(adp);
    // txnid:idTxnUpdate1
    txnMgr2.commitTxn();
    // retest WAITING locks (both have same ext id)
    ((DbLockManager) txnMgr.getLockManager()).checkLock(locks.get(2).getLockid());
    locks = getLocks(txnMgr);
    Assert.assertEquals("Unexpected lock count", 1, locks.size());
    checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "TAB1", "p=two", locks);
    // completion of txnid:idTxnUpdate2
    writeId = txnMgr.getTableWriteId("default", "tab1");
    adp = new AddDynamicPartitions(txnMgr.getCurrentTxnId(), writeId, "default", "tab1", Collections.singletonList("p=two"));
    adp.setOperationType(DataOperationType.DELETE);
    txnHandler.addDynamicPartitions(adp);
    // txnid:idTxnUpdate2
    txnMgr.commitTxn();
    Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString(conf, "select * from COMPLETED_TXN_COMPONENTS"), 2, TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_txnid=" + (idTxnUpdate1 - 1) + "  and ctc_table='tab1'"));
    Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString(conf, "select * from COMPLETED_TXN_COMPONENTS"), 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_txnid=" + idTxnUpdate1 + "  and ctc_table='tab1' and ctc_partition='p=one'"));
    Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString(conf, "select * from COMPLETED_TXN_COMPONENTS"), 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_txnid=" + idTxnDelete1 + " and ctc_table='tab1' and ctc_partition='p=two'"));
    Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_partition='p=one' and ws_operation_type='u' and ws_table='tab1'"));
    Assert.assertEquals("WRITE_SET mismatch: " + TxnDbUtil.queryToString(conf, "select * from WRITE_SET"), 1, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET where ws_partition='p=two' and ws_operation_type='d' and ws_table='tab1'"));
    Assert.assertEquals("COMPLETED_TXN_COMPONENTS mismatch: " + TxnDbUtil.queryToString(conf, "select * from COMPLETED_TXN_COMPONENTS"), 4, TxnDbUtil.countQueryAgent(conf, "select count(*) from COMPLETED_TXN_COMPONENTS where ctc_table='tab1' and ctc_partition is not null"));
}
Also used : AddDynamicPartitions(org.apache.hadoop.hive.metastore.api.AddDynamicPartitions) CommandProcessorResponse(org.apache.hadoop.hive.ql.processors.CommandProcessorResponse) ShowLocksResponseElement(org.apache.hadoop.hive.metastore.api.ShowLocksResponseElement) Test(org.junit.Test)

Aggregations

CommandProcessorResponse (org.apache.hadoop.hive.ql.processors.CommandProcessorResponse)145 Test (org.junit.Test)92 ShowLocksResponseElement (org.apache.hadoop.hive.metastore.api.ShowLocksResponseElement)24 HCatBaseTest (org.apache.hive.hcatalog.mapreduce.HCatBaseTest)19 IOException (java.io.IOException)18 ArrayList (java.util.ArrayList)17 AddDynamicPartitions (org.apache.hadoop.hive.metastore.api.AddDynamicPartitions)8 Database (org.apache.hadoop.hive.metastore.api.Database)8 HiveConf (org.apache.hadoop.hive.conf.HiveConf)7 Table (org.apache.hadoop.hive.metastore.api.Table)7 Path (org.apache.hadoop.fs.Path)6 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)6 PigServer (org.apache.pig.PigServer)5 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)4 Driver (org.apache.hadoop.hive.ql.Driver)4 IDriver (org.apache.hadoop.hive.ql.IDriver)4 LockException (org.apache.hadoop.hive.ql.lockmgr.LockException)4 PerfLogger (org.apache.hadoop.hive.ql.log.PerfLogger)4 ParseException (org.apache.hadoop.hive.ql.parse.ParseException)4 HashMap (java.util.HashMap)3