use of org.apache.hadoop.hive.metastore.api.AbortTxnRequest in project hive by apache.
the class TestInitiator method noCompactWhenNoCompactSet.
@Test
public void noCompactWhenNoCompactSet() throws Exception {
Map<String, String> parameters = new HashMap<String, String>(1);
parameters.put("NO_AUTO_COMPACTION", "true");
Table t = newTable("default", "ncwncs", false, parameters);
HiveConf.setIntVar(conf, HiveConf.ConfVars.HIVE_COMPACTOR_ABORTEDTXN_THRESHOLD, 10);
for (int i = 0; i < 11; i++) {
long txnid = openTxn();
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default");
comp.setTablename("ncwncs");
comp.setOperationType(DataOperationType.UPDATE);
List<LockComponent> components = new ArrayList<LockComponent>(1);
components.add(comp);
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
LockResponse res = txnHandler.lock(req);
txnHandler.abortTxn(new AbortTxnRequest(txnid));
}
startInitiator();
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
Assert.assertEquals(0, rsp.getCompactsSize());
}
use of org.apache.hadoop.hive.metastore.api.AbortTxnRequest in project hive by apache.
the class TestInitiator method cleanEmptyAbortedTxns.
@Test
public void cleanEmptyAbortedTxns() throws Exception {
// Test that we are cleaning aborted transactions with no components left in txn_components.
// Put one aborted transaction with an entry in txn_components to make sure we don't
// accidently clean it too.
Table t = newTable("default", "ceat", false);
long txnid = openTxn();
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default");
comp.setTablename("ceat");
comp.setOperationType(DataOperationType.UPDATE);
List<LockComponent> components = new ArrayList<LockComponent>(1);
components.add(comp);
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
LockResponse res = txnHandler.lock(req);
txnHandler.abortTxn(new AbortTxnRequest(txnid));
for (int i = 0; i < TxnStore.TIMED_OUT_TXN_ABORT_BATCH_SIZE + 50; i++) {
txnid = openTxn();
txnHandler.abortTxn(new AbortTxnRequest(txnid));
}
GetOpenTxnsResponse openTxns = txnHandler.getOpenTxns();
Assert.assertEquals(TxnStore.TIMED_OUT_TXN_ABORT_BATCH_SIZE + 50 + 1, openTxns.getOpen_txnsSize());
startInitiator();
openTxns = txnHandler.getOpenTxns();
Assert.assertEquals(1, openTxns.getOpen_txnsSize());
}
use of org.apache.hadoop.hive.metastore.api.AbortTxnRequest in project hive by apache.
the class TestTxnHandler method testAbortInvalidTxn.
@Test
public void testAbortInvalidTxn() throws Exception {
boolean caught = false;
try {
txnHandler.abortTxn(new AbortTxnRequest(195L));
} catch (NoSuchTxnException e) {
caught = true;
}
assertTrue(caught);
}
use of org.apache.hadoop.hive.metastore.api.AbortTxnRequest in project hive by apache.
the class TestCompactionTxnHandler method testMarkCleanedCleansTxnsAndTxnComponents.
// TODO test changes to mark cleaned to clean txns and txn_components
@Test
public void testMarkCleanedCleansTxnsAndTxnComponents() throws Exception {
long txnid = openTxn();
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.DB, "mydb");
comp.setTablename("mytable");
comp.setOperationType(DataOperationType.INSERT);
List<LockComponent> components = new ArrayList<LockComponent>(1);
components.add(comp);
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
LockResponse res = txnHandler.lock(req);
assertTrue(res.getState() == LockState.ACQUIRED);
txnHandler.abortTxn(new AbortTxnRequest(txnid));
txnid = openTxn();
comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.DB, "mydb");
comp.setTablename("yourtable");
comp.setOperationType(DataOperationType.DELETE);
components = new ArrayList<LockComponent>(1);
components.add(comp);
req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
res = txnHandler.lock(req);
assertTrue(res.getState() == LockState.ACQUIRED);
txnHandler.abortTxn(new AbortTxnRequest(txnid));
txnid = openTxn();
comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.DB, "mydb");
comp.setTablename("foo");
comp.setPartitionname("bar");
comp.setOperationType(DataOperationType.UPDATE);
components = new ArrayList<LockComponent>(1);
components.add(comp);
req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
res = txnHandler.lock(req);
assertTrue(res.getState() == LockState.ACQUIRED);
comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.DB, "mydb");
comp.setTablename("foo");
comp.setPartitionname("baz");
comp.setOperationType(DataOperationType.UPDATE);
components = new ArrayList<LockComponent>(1);
components.add(comp);
req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
res = txnHandler.lock(req);
assertTrue(res.getState() == LockState.ACQUIRED);
txnHandler.abortTxn(new AbortTxnRequest(txnid));
CompactionInfo ci = new CompactionInfo();
// Now clean them and check that they are removed from the count.
CompactionRequest rqst = new CompactionRequest("mydb", "mytable", CompactionType.MAJOR);
txnHandler.compact(rqst);
assertEquals(0, txnHandler.findReadyToClean().size());
ci = txnHandler.findNextToCompact("fred");
assertNotNull(ci);
txnHandler.markCompacted(ci);
List<CompactionInfo> toClean = txnHandler.findReadyToClean();
assertEquals(1, toClean.size());
txnHandler.markCleaned(ci);
// Check that we are cleaning up the empty aborted transactions
GetOpenTxnsResponse txnList = txnHandler.getOpenTxns();
assertEquals(3, txnList.getOpen_txnsSize());
txnHandler.cleanEmptyAbortedTxns();
txnList = txnHandler.getOpenTxns();
assertEquals(2, txnList.getOpen_txnsSize());
rqst = new CompactionRequest("mydb", "foo", CompactionType.MAJOR);
rqst.setPartitionname("bar");
txnHandler.compact(rqst);
assertEquals(0, txnHandler.findReadyToClean().size());
ci = txnHandler.findNextToCompact("fred");
assertNotNull(ci);
txnHandler.markCompacted(ci);
toClean = txnHandler.findReadyToClean();
assertEquals(1, toClean.size());
txnHandler.markCleaned(ci);
txnHandler.openTxns(new OpenTxnRequest(1, "me", "localhost"));
txnHandler.cleanEmptyAbortedTxns();
txnList = txnHandler.getOpenTxns();
assertEquals(3, txnList.getOpen_txnsSize());
}
use of org.apache.hadoop.hive.metastore.api.AbortTxnRequest in project hive by apache.
the class TestTxnHandler method testValidTxnsSomeOpen.
@Test
public void testValidTxnsSomeOpen() throws Exception {
txnHandler.openTxns(new OpenTxnRequest(3, "me", "localhost"));
txnHandler.abortTxn(new AbortTxnRequest(1));
txnHandler.commitTxn(new CommitTxnRequest(2));
GetOpenTxnsInfoResponse txnsInfo = txnHandler.getOpenTxnsInfo();
assertEquals(3L, txnsInfo.getTxn_high_water_mark());
assertEquals(2, txnsInfo.getOpen_txns().size());
assertEquals(1L, txnsInfo.getOpen_txns().get(0).getId());
assertEquals(TxnState.ABORTED, txnsInfo.getOpen_txns().get(0).getState());
assertEquals(3L, txnsInfo.getOpen_txns().get(1).getId());
assertEquals(TxnState.OPEN, txnsInfo.getOpen_txns().get(1).getState());
GetOpenTxnsResponse txns = txnHandler.getOpenTxns();
assertEquals(3L, txns.getTxn_high_water_mark());
assertEquals(2, txns.getOpen_txns().size());
boolean[] saw = new boolean[4];
for (int i = 0; i < saw.length; i++) saw[i] = false;
for (Long tid : txns.getOpen_txns()) {
saw[tid.intValue()] = true;
}
assertTrue(saw[1]);
assertFalse(saw[2]);
assertTrue(saw[3]);
}
Aggregations