Search in sources :

Example 6 with MetastoreTaskThread

use of org.apache.hadoop.hive.metastore.MetastoreTaskThread in project hive by apache.

the class TestDbTxnManager2 method testWriteSetTracking4.

/**
 * txns overlap, update same resource, simulate multi-stmt txn case
 * Also tests that we kill txn when it tries to acquire lock if we already know it will not be committed
 */
@Test
public void testWriteSetTracking4() throws Exception {
    dropTable(new String[] { "TAB_PART", "TAB2" });
    Assert.assertEquals(0, TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"WRITE_SET\""));
    driver.run("create table if not exists TAB_PART (a int, b int) " + "partitioned by (p string) clustered by (a) into 2  buckets stored as orc TBLPROPERTIES ('transactional'='true')");
    driver.run("create table if not exists TAB2 (a int, b int) partitioned by (p string) " + "clustered by (a) into 2  buckets stored as orc TBLPROPERTIES ('transactional'='true')");
    txnMgr.openTxn(ctx, "Long Running");
    driver.compileAndRespond("select a from TAB_PART where p = 'blah'", true);
    txnMgr.acquireLocks(driver.getPlan(), ctx, "Long Running");
    List<ShowLocksResponseElement> locks = getLocks(txnMgr);
    Assert.assertEquals("Unexpected lock count", 1, locks.size());
    // for some reason this just locks the table; if I alter table to add this partition, then
    // we end up locking both table and partition with share_read.  (Plan has 2 ReadEntities)...?
    // same for other locks below
    checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "TAB_PART", null, locks);
    HiveTxnManager txnMgr2 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf);
    txnMgr2.openTxn(ctx, "Short Running");
    // no such partition
    driver.compileAndRespond("update TAB2 set b = 7 where p = 'blah'", true);
    txnMgr2.acquireLocks(driver.getPlan(), ctx, "Short Running");
    locks = getLocks(txnMgr);
    Assert.assertEquals("Unexpected lock count", 2, locks.size());
    checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "TAB_PART", null, locks);
    checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "TAB2", null, locks);
    // update stmt has p=blah, thus nothing is actually update and we generate empty dyn part list
    Assert.assertEquals(0, TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"WRITE_SET\""));
    AllocateTableWriteIdsRequest rqst = new AllocateTableWriteIdsRequest("default", "tab2");
    rqst.setTxnIds(Collections.singletonList(txnMgr2.getCurrentTxnId()));
    AllocateTableWriteIdsResponse writeIds = txnHandler.allocateTableWriteIds(rqst);
    Assert.assertEquals(txnMgr2.getCurrentTxnId(), writeIds.getTxnToWriteIds().get(0).getTxnId());
    AddDynamicPartitions adp = new AddDynamicPartitions(txnMgr2.getCurrentTxnId(), writeIds.getTxnToWriteIds().get(0).getWriteId(), "default", "tab2", Collections.EMPTY_LIST);
    adp.setOperationType(DataOperationType.UPDATE);
    txnHandler.addDynamicPartitions(adp);
    txnMgr2.commitTxn();
    // Short Running updated nothing, so we expect 0 rows in WRITE_SET
    Assert.assertEquals(0, TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"WRITE_SET\""));
    txnMgr2.openTxn(ctx, "T3");
    // pretend this partition exists
    driver.compileAndRespond("update TAB2 set b = 7 where p = 'two'", true);
    txnMgr2.acquireLocks(driver.getPlan(), ctx, "T3");
    locks = getLocks(txnMgr);
    Assert.assertEquals("Unexpected lock count", 2, locks.size());
    checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "TAB_PART", null, locks);
    // since TAB2 is empty
    checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "TAB2", null, locks);
    // update stmt has p=blah, thus nothing is actually update and we generate empty dyn part list
    Assert.assertEquals(0, TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"WRITE_SET\""));
    rqst = new AllocateTableWriteIdsRequest("default", "tab2");
    rqst.setTxnIds(Collections.singletonList(txnMgr2.getCurrentTxnId()));
    writeIds = txnHandler.allocateTableWriteIds(rqst);
    Assert.assertEquals(txnMgr2.getCurrentTxnId(), writeIds.getTxnToWriteIds().get(0).getTxnId());
    adp = new AddDynamicPartitions(txnMgr2.getCurrentTxnId(), writeIds.getTxnToWriteIds().get(0).getWriteId(), "default", "tab2", Collections.singletonList("p=two"));
    adp.setOperationType(DataOperationType.UPDATE);
    // simulate partition update
    txnHandler.addDynamicPartitions(adp);
    txnMgr2.commitTxn();
    Assert.assertEquals("WRITE_SET mismatch: " + TestTxnDbUtil.queryToString(conf, "select * from \"WRITE_SET\""), 1, TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"WRITE_SET\""));
    MetastoreTaskThread houseKeeper = new AcidHouseKeeperService();
    houseKeeper.setConf(conf);
    houseKeeper.run();
    // since T3 overlaps with Long Running (still open) GC does nothing
    Assert.assertEquals(1, TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"WRITE_SET\""));
    // no rows match
    driver.compileAndRespond("update TAB2 set b = 17 where a = 1", true);
    txnMgr.acquireLocks(driver.getPlan(), ctx, "Long Running");
    rqst = new AllocateTableWriteIdsRequest("default", "tab2");
    rqst.setTxnIds(Collections.singletonList(txnMgr.getCurrentTxnId()));
    writeIds = txnHandler.allocateTableWriteIds(rqst);
    Assert.assertEquals(txnMgr.getCurrentTxnId(), writeIds.getTxnToWriteIds().get(0).getTxnId());
    // so generate empty Dyn Part call
    adp = new AddDynamicPartitions(txnMgr.getCurrentTxnId(), writeIds.getTxnToWriteIds().get(0).getWriteId(), "default", "tab2", Collections.EMPTY_LIST);
    adp.setOperationType(DataOperationType.UPDATE);
    txnHandler.addDynamicPartitions(adp);
    txnMgr.commitTxn();
    locks = getLocks(txnMgr);
    Assert.assertEquals("Unexpected lock count", 0, locks.size());
    /*
      The last transaction will always remain in the transaction table, so we will open an other one,
      wait for the timeout period to exceed, then start the initiator that will clean
     */
    txnMgr.openTxn(ctx, "Long Running");
    Thread.sleep(txnHandler.getOpenTxnTimeOutMillis());
    // Now we can clean the write_set
    houseKeeper.run();
    Assert.assertEquals(0, TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"WRITE_SET\""));
}
Also used : AddDynamicPartitions(org.apache.hadoop.hive.metastore.api.AddDynamicPartitions) AllocateTableWriteIdsResponse(org.apache.hadoop.hive.metastore.api.AllocateTableWriteIdsResponse) AllocateTableWriteIdsRequest(org.apache.hadoop.hive.metastore.api.AllocateTableWriteIdsRequest) MetastoreTaskThread(org.apache.hadoop.hive.metastore.MetastoreTaskThread) AcidHouseKeeperService(org.apache.hadoop.hive.metastore.txn.AcidHouseKeeperService) ShowLocksResponseElement(org.apache.hadoop.hive.metastore.api.ShowLocksResponseElement) Test(org.junit.Test)

Aggregations

MetastoreTaskThread (org.apache.hadoop.hive.metastore.MetastoreTaskThread)6 AcidHouseKeeperService (org.apache.hadoop.hive.metastore.txn.AcidHouseKeeperService)6 Test (org.junit.Test)5 ShowLocksResponseElement (org.apache.hadoop.hive.metastore.api.ShowLocksResponseElement)2 TxnStore (org.apache.hadoop.hive.metastore.txn.TxnStore)2 CommandProcessorException (org.apache.hadoop.hive.ql.processors.CommandProcessorException)2 IOException (java.io.IOException)1 ExecutionException (java.util.concurrent.ExecutionException)1 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)1 FileStatus (org.apache.hadoop.fs.FileStatus)1 FileSystem (org.apache.hadoop.fs.FileSystem)1 LocatedFileStatus (org.apache.hadoop.fs.LocatedFileStatus)1 Path (org.apache.hadoop.fs.Path)1 AddDynamicPartitions (org.apache.hadoop.hive.metastore.api.AddDynamicPartitions)1 AllocateTableWriteIdsRequest (org.apache.hadoop.hive.metastore.api.AllocateTableWriteIdsRequest)1 AllocateTableWriteIdsResponse (org.apache.hadoop.hive.metastore.api.AllocateTableWriteIdsResponse)1 CompactionRequest (org.apache.hadoop.hive.metastore.api.CompactionRequest)1 GetOpenTxnsInfoResponse (org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse)1 MetaException (org.apache.hadoop.hive.metastore.api.MetaException)1 NoSuchObjectException (org.apache.hadoop.hive.metastore.api.NoSuchObjectException)1