use of org.apache.hadoop.hive.metastore.MetastoreTaskThread in project hive by apache.
the class TestTxnCommands method testTimeOutReaper.
@Test
public void testTimeOutReaper() throws Exception {
runStatementOnDriver("start transaction");
runStatementOnDriver("delete from " + Table.ACIDTBL + " where a = 5");
// make sure currently running txn is considered aborted by housekeeper
hiveConf.setTimeVar(HiveConf.ConfVars.HIVE_TXN_TIMEOUT, 2, TimeUnit.MILLISECONDS);
MetastoreTaskThread houseKeeperService = new AcidHouseKeeperService();
houseKeeperService.setConf(hiveConf);
// this will abort the txn
houseKeeperService.run();
// this should fail because txn aborted due to timeout
CommandProcessorException e = runStatementOnDriverNegative("delete from " + Table.ACIDTBL + " where a = 5");
Assert.assertTrue("Actual: " + e.getMessage(), e.getMessage().contains("Transaction manager has aborted the transaction txnid:1"));
// now test that we don't timeout locks we should not
// heartbeater should be running in the background every 1/2 second
hiveConf.setTimeVar(HiveConf.ConfVars.HIVE_TXN_TIMEOUT, 1, TimeUnit.SECONDS);
// Have to reset the conf when we change it so that the change takes affect
houseKeeperService.setConf(hiveConf);
runStatementOnDriver("start transaction");
runStatementOnDriver("select count(*) from " + Table.ACIDTBL + " where a = 17");
pause(750);
TxnStore txnHandler = TxnUtils.getTxnStore(hiveConf);
// since there is txn open, we are heartbeating the txn not individual locks
GetOpenTxnsInfoResponse txnsInfoResponse = txnHandler.getOpenTxnsInfo();
Assert.assertEquals(2, txnsInfoResponse.getOpen_txns().size());
TxnInfo txnInfo = null;
for (TxnInfo ti : txnsInfoResponse.getOpen_txns()) {
if (ti.getState() == TxnState.OPEN) {
txnInfo = ti;
break;
}
}
Assert.assertNotNull(txnInfo);
Assert.assertEquals(16, txnInfo.getId());
Assert.assertEquals(TxnState.OPEN, txnInfo.getState());
String s = TestTxnDbUtil.queryToString(hiveConf, "select TXN_STARTED, TXN_LAST_HEARTBEAT from TXNS where TXN_ID = " + txnInfo.getId(), false);
String[] vals = s.split("\\s+");
Assert.assertEquals("Didn't get expected timestamps", 2, vals.length);
long lastHeartbeat = Long.parseLong(vals[1]);
// these 2 values are equal when TXN entry is made. Should never be equal after 1st heartbeat, which we
// expect to have happened by now since HIVE_TXN_TIMEOUT=1sec
Assert.assertNotEquals("Didn't see heartbeat happen", Long.parseLong(vals[0]), lastHeartbeat);
ShowLocksResponse slr = txnHandler.showLocks(new ShowLocksRequest());
TestDbTxnManager2.checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", Table.ACIDTBL.name, null, slr.getLocks());
pause(750);
houseKeeperService.run();
pause(750);
slr = txnHandler.showLocks(new ShowLocksRequest());
Assert.assertEquals("Unexpected lock count: " + slr, 1, slr.getLocks().size());
TestDbTxnManager2.checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", Table.ACIDTBL.name, null, slr.getLocks());
pause(750);
houseKeeperService.run();
slr = txnHandler.showLocks(new ShowLocksRequest());
Assert.assertEquals("Unexpected lock count: " + slr, 1, slr.getLocks().size());
TestDbTxnManager2.checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", Table.ACIDTBL.name, null, slr.getLocks());
// should've done several heartbeats
s = TestTxnDbUtil.queryToString(hiveConf, "select TXN_STARTED, TXN_LAST_HEARTBEAT from TXNS where TXN_ID = " + txnInfo.getId(), false);
vals = s.split("\\s+");
Assert.assertEquals("Didn't get expected timestamps", 2, vals.length);
Assert.assertTrue("Heartbeat didn't progress: (old,new) (" + lastHeartbeat + "," + vals[1] + ")", lastHeartbeat < Long.parseLong(vals[1]));
runStatementOnDriver("rollback");
slr = txnHandler.showLocks(new ShowLocksRequest());
Assert.assertEquals("Unexpected lock count", 0, slr.getLocks().size());
}
use of org.apache.hadoop.hive.metastore.MetastoreTaskThread in project hive by apache.
the class TestTxnCommands2 method testInitiatorWithMultipleFailedCompactionsForVariousTblProperties.
void testInitiatorWithMultipleFailedCompactionsForVariousTblProperties(String tblProperties) throws Exception {
String tblName = "hive12353";
runStatementOnDriver("drop table if exists " + tblName);
runStatementOnDriver("CREATE TABLE " + tblName + "(a INT, b STRING) " + // currently ACID requires table to be bucketed
" CLUSTERED BY(a) INTO 1 BUCKETS" + " STORED AS ORC TBLPROPERTIES ( " + tblProperties + " )");
hiveConf.setIntVar(HiveConf.ConfVars.HIVE_COMPACTOR_DELTA_NUM_THRESHOLD, 4);
for (int i = 0; i < 5; i++) {
// generate enough delta files so that Initiator can trigger auto compaction
runStatementOnDriver("insert into " + tblName + " values(" + (i + 1) + ", 'foo'),(" + (i + 2) + ", 'bar'),(" + (i + 3) + ", 'baz')");
}
hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILCOMPACTION, true);
MetastoreConf.setBoolVar(hiveConf, MetastoreConf.ConfVars.COMPACTOR_INITIATOR_ON, true);
int numFailedCompactions = MetastoreConf.getIntVar(hiveConf, MetastoreConf.ConfVars.COMPACTOR_INITIATOR_FAILED_THRESHOLD);
AtomicBoolean stop = new AtomicBoolean(true);
// create failed compactions
for (int i = 0; i < numFailedCompactions; i++) {
// each of these should fail
txnHandler.compact(new CompactionRequest("default", tblName, CompactionType.MINOR));
runWorker(hiveConf);
}
// this should not schedule a new compaction due to prior failures, but will create 'did not initiate' entry
runInitiator(hiveConf);
int numDidNotInitiateCompactions = 1;
checkCompactionState(new CompactionsByState(numDidNotInitiateCompactions, numFailedCompactions, 0, 0, 0, 0, numFailedCompactions + numDidNotInitiateCompactions), countCompacts(txnHandler));
MetastoreConf.setTimeVar(hiveConf, MetastoreConf.ConfVars.ACID_HOUSEKEEPER_SERVICE_INTERVAL, 10, TimeUnit.MILLISECONDS);
MetastoreTaskThread houseKeeper = new AcidHouseKeeperService();
houseKeeper.setConf(hiveConf);
houseKeeper.run();
checkCompactionState(new CompactionsByState(numDidNotInitiateCompactions, numFailedCompactions, 0, 0, 0, 0, numFailedCompactions + numDidNotInitiateCompactions), countCompacts(txnHandler));
txnHandler.compact(new CompactionRequest("default", tblName, CompactionType.MAJOR));
// will fail
runWorker(hiveConf);
txnHandler.compact(new CompactionRequest("default", tblName, CompactionType.MINOR));
// will fail
runWorker(hiveConf);
runInitiator(hiveConf);
numDidNotInitiateCompactions++;
runInitiator(hiveConf);
numDidNotInitiateCompactions++;
checkCompactionState(new CompactionsByState(numDidNotInitiateCompactions, numFailedCompactions + 2, 0, 0, 0, 0, numFailedCompactions + 2 + numDidNotInitiateCompactions), countCompacts(txnHandler));
houseKeeper.run();
// COMPACTOR_HISTORY_RETENTION_FAILED failed compacts left (and no other since we only have failed ones here)
checkCompactionState(new CompactionsByState(MetastoreConf.getIntVar(hiveConf, MetastoreConf.ConfVars.COMPACTOR_HISTORY_RETENTION_DID_NOT_INITIATE), MetastoreConf.getIntVar(hiveConf, MetastoreConf.ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED), 0, 0, 0, 0, MetastoreConf.getIntVar(hiveConf, MetastoreConf.ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED) + MetastoreConf.getIntVar(hiveConf, MetastoreConf.ConfVars.COMPACTOR_HISTORY_RETENTION_DID_NOT_INITIATE)), countCompacts(txnHandler));
hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILCOMPACTION, false);
txnHandler.compact(new CompactionRequest("default", tblName, CompactionType.MINOR));
// at this point "show compactions" should have (COMPACTOR_HISTORY_RETENTION_FAILED) failed + 1 initiated (explicitly by user)
checkCompactionState(new CompactionsByState(MetastoreConf.getIntVar(hiveConf, MetastoreConf.ConfVars.COMPACTOR_HISTORY_RETENTION_DID_NOT_INITIATE), MetastoreConf.getIntVar(hiveConf, MetastoreConf.ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED), 1, 0, 0, 0, MetastoreConf.getIntVar(hiveConf, MetastoreConf.ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED) + MetastoreConf.getIntVar(hiveConf, MetastoreConf.ConfVars.COMPACTOR_HISTORY_RETENTION_DID_NOT_INITIATE) + 1), countCompacts(txnHandler));
// will succeed and transition to Initiated->Working->Ready for Cleaning
runWorker(hiveConf);
checkCompactionState(new CompactionsByState(MetastoreConf.getIntVar(hiveConf, MetastoreConf.ConfVars.COMPACTOR_HISTORY_RETENTION_DID_NOT_INITIATE), MetastoreConf.getIntVar(hiveConf, MetastoreConf.ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED), 0, 1, 0, 0, MetastoreConf.getIntVar(hiveConf, MetastoreConf.ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED) + MetastoreConf.getIntVar(hiveConf, MetastoreConf.ConfVars.COMPACTOR_HISTORY_RETENTION_DID_NOT_INITIATE) + 1), countCompacts(txnHandler));
// transition to Success state
runCleaner(hiveConf);
houseKeeper.run();
checkCompactionState(new CompactionsByState(MetastoreConf.getIntVar(hiveConf, MetastoreConf.ConfVars.COMPACTOR_HISTORY_RETENTION_DID_NOT_INITIATE), MetastoreConf.getIntVar(hiveConf, MetastoreConf.ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED), 0, 0, 1, 0, MetastoreConf.getIntVar(hiveConf, MetastoreConf.ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED) + MetastoreConf.getIntVar(hiveConf, MetastoreConf.ConfVars.COMPACTOR_HISTORY_RETENTION_DID_NOT_INITIATE) + 1), countCompacts(txnHandler));
}
use of org.apache.hadoop.hive.metastore.MetastoreTaskThread in project hive by apache.
the class TestTxnCommands method testDropTableWithSuffix.
@Test
public void testDropTableWithSuffix() throws Exception {
String tableName = "tab_acid";
runStatementOnDriver("drop table if exists " + tableName);
HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVE_ACID_CREATE_TABLE_USE_SUFFIX, true);
runStatementOnDriver("create table " + tableName + "(a int, b int) stored as orc TBLPROPERTIES ('transactional'='true')");
runStatementOnDriver("insert into " + tableName + " values(1,2),(3,4)");
runStatementOnDriver("drop table " + tableName);
int count = TestTxnDbUtil.countQueryAgent(hiveConf, "select count(*) from TXN_TO_WRITE_ID where T2W_TABLE = '" + tableName + "'");
Assert.assertEquals(1, count);
FileSystem fs = FileSystem.get(hiveConf);
FileStatus[] stat = fs.listStatus(new Path(getWarehouseDir()), t -> t.getName().matches(tableName + SOFT_DELETE_TABLE_PATTERN));
if (1 != stat.length) {
Assert.fail("Table data was removed from FS");
}
MetastoreTaskThread houseKeeperService = new AcidHouseKeeperService();
houseKeeperService.setConf(hiveConf);
houseKeeperService.run();
count = TestTxnDbUtil.countQueryAgent(hiveConf, "select count(*) from TXN_TO_WRITE_ID where T2W_TABLE = '" + tableName + "'");
Assert.assertEquals(0, count);
try {
runStatementOnDriver("select * from " + tableName);
} catch (Exception ex) {
Assert.assertTrue(ex.getMessage().contains(ErrorMsg.INVALID_TABLE.getMsg(StringUtils.wrap(tableName, "'"))));
}
// Check status of compaction job
TxnStore txnHandler = TxnUtils.getTxnStore(hiveConf);
ShowCompactResponse resp = txnHandler.showCompact(new ShowCompactRequest());
Assert.assertEquals("Unexpected number of compactions in history", 1, resp.getCompactsSize());
Assert.assertEquals("Unexpected 0 compaction state", TxnStore.CLEANING_RESPONSE, resp.getCompacts().get(0).getState());
runCleaner(hiveConf);
FileStatus[] status = fs.listStatus(new Path(getWarehouseDir()), t -> t.getName().matches(tableName + SOFT_DELETE_TABLE_PATTERN));
Assert.assertEquals(0, status.length);
}
use of org.apache.hadoop.hive.metastore.MetastoreTaskThread in project hive by apache.
the class TestDbTxnManager2 method testRemoveDuplicateCompletedTxnComponents.
@Test
public void testRemoveDuplicateCompletedTxnComponents() throws Exception {
dropTable(new String[] { "tab_acid" });
driver.run("create table if not exists tab_acid (a int) partitioned by (p string) " + "stored as orc TBLPROPERTIES ('transactional'='true')");
driver.run("insert into tab_acid values(1,'foo'),(3,'bar')");
driver.run("insert into tab_acid values(2,'foo'),(4,'bar')");
driver.run("delete from tab_acid where a=2");
Assert.assertEquals(TestTxnDbUtil.queryToString(conf, "select * from \"COMPLETED_TXN_COMPONENTS\""), 5, TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"COMPLETED_TXN_COMPONENTS\""));
MetastoreTaskThread houseKeeper = new AcidHouseKeeperService();
houseKeeper.setConf(conf);
houseKeeper.run();
Assert.assertEquals(TestTxnDbUtil.queryToString(conf, "select * from \"COMPLETED_TXN_COMPONENTS\""), 2, TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"COMPLETED_TXN_COMPONENTS\""));
Assert.assertEquals(1, TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"COMPLETED_TXN_COMPONENTS\" " + "where \"CTC_PARTITION\"='p=bar' and \"CTC_TXNID\"=4"));
Assert.assertEquals(1, TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"COMPLETED_TXN_COMPONENTS\" " + "where \"CTC_PARTITION\"='p=foo' and \"CTC_TXNID\"=5"));
driver.run("insert into tab_acid values(3,'foo')");
driver.run("insert into tab_acid values(4,'foo')");
Assert.assertEquals(TestTxnDbUtil.queryToString(conf, "select * from \"COMPLETED_TXN_COMPONENTS\""), 4, TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"COMPLETED_TXN_COMPONENTS\""));
houseKeeper.run();
Assert.assertEquals(TestTxnDbUtil.queryToString(conf, "select * from \"COMPLETED_TXN_COMPONENTS\""), 3, TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"COMPLETED_TXN_COMPONENTS\""));
Assert.assertEquals(2, TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"COMPLETED_TXN_COMPONENTS\" " + "where \"CTC_PARTITION\"='p=foo' and \"CTC_TXNID\" IN (5,7)"));
}
use of org.apache.hadoop.hive.metastore.MetastoreTaskThread in project hive by apache.
the class TestDbTxnManager2 method testWriteSetTracking6.
/**
* check that read query concurrent with txn works ok
*/
@Test
public void testWriteSetTracking6() throws Exception {
dropTable(new String[] { "TAB2" });
Assert.assertEquals(0, TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"WRITE_SET\""));
driver.run("create table if not exists TAB2(a int, b int) clustered " + "by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')");
driver.compileAndRespond("select * from TAB2 where a = 113", true);
txnMgr.acquireLocks(driver.getPlan(), ctx, "Works");
List<ShowLocksResponseElement> locks = getLocks(txnMgr);
Assert.assertEquals("Unexpected lock count", 1, locks.size());
checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "TAB2", null, locks);
HiveTxnManager txnMgr2 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf);
swapTxnManager(txnMgr2);
driver.compileAndRespond("update TAB2 set b = 17 where a = 101", true);
txnMgr2.acquireLocks(driver.getPlan(), ctx, "Horton");
Assert.assertEquals(0, TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"WRITE_SET\""));
locks = getLocks(txnMgr);
Assert.assertEquals("Unexpected lock count", 2, locks.size());
checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "TAB2", null, locks);
checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "TAB2", null, locks);
// no conflict
txnMgr2.commitTxn();
Assert.assertEquals(1, TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"WRITE_SET\""));
locks = getLocks(txnMgr);
Assert.assertEquals("Unexpected lock count", 1, locks.size());
checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "TAB2", null, locks);
txnMgr.commitTxn();
/*
* The last transaction will always remain in the transaction table, so we will open an other one,
* wait for the timeout period to exceed, then start the initiator that will clean
*/
txnMgr.openTxn(ctx, "Long Running");
Thread.sleep(txnHandler.getOpenTxnTimeOutMillis());
// Now we can clean the write_set
MetastoreTaskThread writeSetService = new AcidHouseKeeperService();
writeSetService.setConf(conf);
writeSetService.run();
Assert.assertEquals(0, TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"WRITE_SET\""));
}
Aggregations