use of org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse in project hive by apache.
the class TestInitiator method cleanEmptyAbortedTxns.
@Test
public void cleanEmptyAbortedTxns() throws Exception {
// Test that we are cleaning aborted transactions with no components left in txn_components.
// Put one aborted transaction with an entry in txn_components to make sure we don't
// accidently clean it too.
Table t = newTable("default", "ceat", false);
long txnid = openTxn();
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default");
comp.setTablename("ceat");
comp.setOperationType(DataOperationType.UPDATE);
List<LockComponent> components = new ArrayList<LockComponent>(1);
components.add(comp);
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
LockResponse res = txnHandler.lock(req);
txnHandler.abortTxn(new AbortTxnRequest(txnid));
conf.setIntVar(HiveConf.ConfVars.HIVE_TXN_MAX_OPEN_BATCH, TxnStore.TIMED_OUT_TXN_ABORT_BATCH_SIZE + 50);
OpenTxnsResponse resp = txnHandler.openTxns(new OpenTxnRequest(TxnStore.TIMED_OUT_TXN_ABORT_BATCH_SIZE + 50, "user", "hostname"));
txnHandler.abortTxns(new AbortTxnsRequest(resp.getTxn_ids()));
GetOpenTxnsResponse openTxns = txnHandler.getOpenTxns();
Assert.assertEquals(TxnStore.TIMED_OUT_TXN_ABORT_BATCH_SIZE + 50 + 1, openTxns.getOpen_txnsSize());
startInitiator();
openTxns = txnHandler.getOpenTxns();
Assert.assertEquals(1, openTxns.getOpen_txnsSize());
}
use of org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse in project hive by apache.
the class TestTxnHandler method testValidTxnsEmpty.
@Test
public void testValidTxnsEmpty() throws Exception {
GetOpenTxnsInfoResponse txnsInfo = txnHandler.getOpenTxnsInfo();
assertEquals(0L, txnsInfo.getTxn_high_water_mark());
assertTrue(txnsInfo.getOpen_txns().isEmpty());
GetOpenTxnsResponse txns = txnHandler.getOpenTxns();
assertEquals(0L, txns.getTxn_high_water_mark());
assertTrue(txns.getOpen_txns().isEmpty());
}
use of org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse in project hive by apache.
the class TestTxnHandler method testAbortTxn.
@Test
public void testAbortTxn() throws Exception {
OpenTxnsResponse openedTxns = txnHandler.openTxns(new OpenTxnRequest(3, "me", "localhost"));
List<Long> txnList = openedTxns.getTxn_ids();
long first = txnList.get(0);
assertEquals(1L, first);
long second = txnList.get(1);
assertEquals(2L, second);
txnHandler.abortTxn(new AbortTxnRequest(1));
List<String> parts = new ArrayList<String>();
parts.add("p=1");
AllocateTableWriteIdsRequest rqst = new AllocateTableWriteIdsRequest("default", "T");
rqst.setTxnIds(Collections.singletonList(3L));
AllocateTableWriteIdsResponse writeIds = txnHandler.allocateTableWriteIds(rqst);
long writeId = writeIds.getTxnToWriteIds().get(0).getWriteId();
assertEquals(3, writeIds.getTxnToWriteIds().get(0).getTxnId());
assertEquals(1, writeId);
AddDynamicPartitions adp = new AddDynamicPartitions(3, writeId, "default", "T", parts);
adp.setOperationType(DataOperationType.INSERT);
txnHandler.addDynamicPartitions(adp);
GetOpenTxnsInfoResponse txnsInfo = txnHandler.getOpenTxnsInfo();
assertEquals(3, txnsInfo.getTxn_high_water_mark());
assertEquals(3, txnsInfo.getOpen_txns().size());
assertEquals(1L, txnsInfo.getOpen_txns().get(0).getId());
assertEquals(TxnState.ABORTED, txnsInfo.getOpen_txns().get(0).getState());
assertEquals(2L, txnsInfo.getOpen_txns().get(1).getId());
assertEquals(TxnState.OPEN, txnsInfo.getOpen_txns().get(1).getState());
assertEquals(3, txnsInfo.getOpen_txns().get(2).getId());
assertEquals(TxnState.OPEN, txnsInfo.getOpen_txns().get(2).getState());
GetOpenTxnsResponse txns = txnHandler.getOpenTxns();
assertEquals(3, txns.getTxn_high_water_mark());
assertEquals(3, txns.getOpen_txns().size());
boolean[] saw = new boolean[4];
for (int i = 0; i < saw.length; i++) saw[i] = false;
for (Long tid : txns.getOpen_txns()) {
saw[tid.intValue()] = true;
}
for (int i = 1; i < saw.length; i++) assertTrue(saw[i]);
txnHandler.commitTxn(new CommitTxnRequest(2));
// this succeeds as abortTxn is idempotent
txnHandler.abortTxn(new AbortTxnRequest(1));
boolean gotException = false;
try {
txnHandler.abortTxn(new AbortTxnRequest(2));
} catch (NoSuchTxnException ex) {
gotException = true;
// this is the last committed, so it is still in the txns table
Assert.assertEquals("Transaction " + JavaUtils.txnIdToString(2) + " is already committed.", ex.getMessage());
}
Assert.assertTrue(gotException);
gotException = false;
txnHandler.commitTxn(new CommitTxnRequest(3));
try {
txnHandler.abortTxn(new AbortTxnRequest(3));
} catch (NoSuchTxnException ex) {
gotException = true;
// txn 3 is not empty txn, so we get a better msg
Assert.assertEquals("Transaction " + JavaUtils.txnIdToString(3) + " is already committed.", ex.getMessage());
}
Assert.assertTrue(gotException);
txnHandler.setOpenTxnTimeOutMillis(1);
txnHandler.cleanEmptyAbortedAndCommittedTxns();
txnHandler.setOpenTxnTimeOutMillis(1000);
gotException = false;
try {
txnHandler.abortTxn(new AbortTxnRequest(2));
} catch (NoSuchTxnException ex) {
gotException = true;
// now the second transaction is cleared and since it was empty, we do not recognize it anymore
Assert.assertEquals("No such transaction " + JavaUtils.txnIdToString(2), ex.getMessage());
}
Assert.assertTrue(gotException);
gotException = false;
try {
txnHandler.abortTxn(new AbortTxnRequest(4));
} catch (NoSuchTxnException ex) {
gotException = true;
Assert.assertEquals("No such transaction " + JavaUtils.txnIdToString(4), ex.getMessage());
}
Assert.assertTrue(gotException);
}
use of org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse in project hive by apache.
the class TestTxnCommands3 method testCompactionAbort.
@Test
public void testCompactionAbort() throws Exception {
MetastoreConf.setBoolVar(hiveConf, MetastoreConf.ConfVars.CREATE_TABLES_AS_ACID, true);
dropTable(new String[] { "T" });
// note: transaction names T1, T2, etc below, are logical, the actual txnid will be different
runStatementOnDriver("create table T (a int, b int) stored as orc");
// makes delta_1_1 in T1
runStatementOnDriver("insert into T values(0,2)");
// makes delta_2_2 in T2
runStatementOnDriver("insert into T values(1,4)");
// create failed compaction attempt so that compactor txn is aborted
HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVETESTMODEFAILCOMPACTION, true);
runStatementOnDriver("alter table T compact 'minor'");
runWorker(hiveConf);
TxnStore txnHandler = TxnUtils.getTxnStore(hiveConf);
ShowCompactResponse resp = txnHandler.showCompact(new ShowCompactRequest());
Assert.assertEquals("Unexpected number of compactions in history", 1, resp.getCompactsSize());
Assert.assertEquals("Unexpected 0th compaction state", TxnStore.FAILED_RESPONSE, resp.getCompacts().get(0).getState());
GetOpenTxnsResponse openResp = txnHandler.getOpenTxns();
Assert.assertEquals(openResp.toString(), 1, openResp.getOpen_txnsSize());
// check that the compactor txn is aborted
Assert.assertTrue(openResp.toString(), BitSet.valueOf(openResp.getAbortedBits()).get(0));
runCleaner(hiveConf);
// we still have 1 aborted (compactor) txn
Assert.assertTrue(openResp.toString(), BitSet.valueOf(openResp.getAbortedBits()).get(0));
Assert.assertEquals(1, TestTxnDbUtil.countQueryAgent(hiveConf, "select count(*) from TXN_COMPONENTS"));
// this returns 1 row since we only have 1 compaction executed
int highestCompactWriteId = TestTxnDbUtil.countQueryAgent(hiveConf, "select CC_HIGHEST_WRITE_ID from COMPLETED_COMPACTIONS");
/**
* See {@link org.apache.hadoop.hive.metastore.txn.CompactionTxnHandler#updateCompactorState(CompactionInfo, long)}
* for notes on why CC_HIGHEST_WRITE_ID=TC_WRITEID
*/
Assert.assertEquals(1, TestTxnDbUtil.countQueryAgent(hiveConf, "select count(*) from TXN_COMPONENTS where TC_WRITEID=" + highestCompactWriteId));
// now make a successful compactor run so that next Cleaner run actually cleans
HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVETESTMODEFAILCOMPACTION, false);
runStatementOnDriver("alter table T compact 'minor'");
runWorker(hiveConf);
resp = txnHandler.showCompact(new ShowCompactRequest());
Assert.assertEquals("Unexpected number of compactions in history", 2, resp.getCompactsSize());
// check both combinations - don't know what order the db returns them in
Assert.assertTrue("Unexpected compaction state", (TxnStore.FAILED_RESPONSE.equalsIgnoreCase(resp.getCompacts().get(0).getState()) && TxnStore.CLEANING_RESPONSE.equalsIgnoreCase(resp.getCompacts().get(1).getState())) || (TxnStore.CLEANING_RESPONSE.equalsIgnoreCase(resp.getCompacts().get(0).getState()) && TxnStore.FAILED_RESPONSE.equalsIgnoreCase(resp.getCompacts().get(1).getState())));
// delete metadata about aborted txn from txn_components and files (if any)
runCleaner(hiveConf);
}
use of org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse in project hive by apache.
the class TestTxnCommands3 method testMinorCompactionAbortLeftoverFiles.
@Test
public void testMinorCompactionAbortLeftoverFiles() throws Exception {
MetastoreConf.setBoolVar(hiveConf, MetastoreConf.ConfVars.CREATE_TABLES_AS_ACID, true);
dropTable(new String[] { "T" });
// note: transaction names T1, T2, etc below, are logical, the actual txnid will be different
runStatementOnDriver("create table T (a int, b int) stored as orc");
// makes delta_1_1 in T1
runStatementOnDriver("insert into T values(0,2)");
// makes delta_2_2 in T2
runStatementOnDriver("insert into T values(1,4)");
// makes delta/(delete_delta)_3_3 in T3
runStatementOnDriver("update T set a=3 where b=2");
runStatementOnDriver("alter table T compact 'minor'");
// create failed compaction attempt so that compactor txn is aborted
CompactorMR compactorMr = Mockito.spy(new CompactorMR());
Mockito.doAnswer((Answer<Void>) invocationOnMock -> {
invocationOnMock.callRealMethod();
throw new RuntimeException("Will cause CompactorMR to fail all opening txn and creating directories for compaction.");
}).when(compactorMr).run(any(), any(), any(), any(), any(), any(), any(), any(), any());
Worker worker = Mockito.spy(new Worker());
worker.setConf(hiveConf);
worker.init(new AtomicBoolean(true));
Mockito.doReturn(compactorMr).when(worker).getMrCompactor();
worker.run();
TxnStore txnHandler = TxnUtils.getTxnStore(hiveConf);
ShowCompactResponse resp = txnHandler.showCompact(new ShowCompactRequest());
Assert.assertEquals("Unexpected number of compactions in history", 1, resp.getCompactsSize());
Assert.assertEquals("Unexpected 0th compaction state", TxnStore.FAILED_RESPONSE, resp.getCompacts().get(0).getState());
GetOpenTxnsResponse openResp = txnHandler.getOpenTxns();
Assert.assertEquals(openResp.toString(), 1, openResp.getOpen_txnsSize());
// check that the compactor txn is aborted
Assert.assertTrue(openResp.toString(), BitSet.valueOf(openResp.getAbortedBits()).get(0));
Assert.assertEquals(0, TestTxnDbUtil.countQueryAgent(hiveConf, "SELECT count(*) FROM hive_locks WHERE hl_txnid=" + openResp.getOpen_txns().get(0)));
FileSystem fs = FileSystem.get(hiveConf);
Path warehousePath = new Path(getWarehouseDir());
FileStatus[] actualList = fs.listStatus(new Path(warehousePath + "/t"), FileUtils.HIDDEN_FILES_PATH_FILTER);
// we expect all the t/base_* files to be removed by the compactor failure
String[] expectedList = new String[] { "/t/delta_0000001_0000001_0000", "/t/delta_0000002_0000002_0000", "/t/delete_delta_0000003_0000003_0000", "/t/delta_0000003_0000003_0000" };
checkExpectedFiles(actualList, expectedList, warehousePath.toString());
// delete metadata about aborted txn from txn_components and files (if any)
runCleaner(hiveConf);
}
Aggregations