use of org.apache.hadoop.hive.metastore.api.AbortTxnRequest in project hive by apache.
the class TestTxnHandler method heartbeatTxnRangeOneAborted.
@Test
public void heartbeatTxnRangeOneAborted() throws Exception {
long txnid = openTxn();
assertEquals(1, txnid);
txnid = openTxn();
txnid = openTxn();
txnHandler.abortTxn(new AbortTxnRequest(3));
HeartbeatTxnRangeResponse rsp = txnHandler.heartbeatTxnRange(new HeartbeatTxnRangeRequest(1, 3));
assertEquals(1, rsp.getAbortedSize());
Long txn = rsp.getAborted().iterator().next();
assertEquals(3L, (long) txn);
assertEquals(0, rsp.getNosuch().size());
}
use of org.apache.hadoop.hive.metastore.api.AbortTxnRequest in project hive by apache.
the class TestTxnHandler method testAbortTxn.
@Test
public void testAbortTxn() throws Exception {
OpenTxnsResponse openedTxns = txnHandler.openTxns(new OpenTxnRequest(3, "me", "localhost"));
List<Long> txnList = openedTxns.getTxn_ids();
long first = txnList.get(0);
assertEquals(1L, first);
long second = txnList.get(1);
assertEquals(2L, second);
txnHandler.abortTxn(new AbortTxnRequest(1));
List<String> parts = new ArrayList<String>();
parts.add("p=1");
AddDynamicPartitions adp = new AddDynamicPartitions(3, "default", "T", parts);
adp.setOperationType(DataOperationType.INSERT);
txnHandler.addDynamicPartitions(adp);
GetOpenTxnsInfoResponse txnsInfo = txnHandler.getOpenTxnsInfo();
assertEquals(3, txnsInfo.getTxn_high_water_mark());
assertEquals(3, txnsInfo.getOpen_txns().size());
assertEquals(1L, txnsInfo.getOpen_txns().get(0).getId());
assertEquals(TxnState.ABORTED, txnsInfo.getOpen_txns().get(0).getState());
assertEquals(2L, txnsInfo.getOpen_txns().get(1).getId());
assertEquals(TxnState.OPEN, txnsInfo.getOpen_txns().get(1).getState());
assertEquals(3, txnsInfo.getOpen_txns().get(2).getId());
assertEquals(TxnState.OPEN, txnsInfo.getOpen_txns().get(2).getState());
GetOpenTxnsResponse txns = txnHandler.getOpenTxns();
assertEquals(3, txns.getTxn_high_water_mark());
assertEquals(3, txns.getOpen_txns().size());
boolean[] saw = new boolean[4];
for (int i = 0; i < saw.length; i++) saw[i] = false;
for (Long tid : txns.getOpen_txns()) {
saw[tid.intValue()] = true;
}
for (int i = 1; i < saw.length; i++) assertTrue(saw[i]);
txnHandler.commitTxn(new CommitTxnRequest(2));
//this succeeds as abortTxn is idempotent
txnHandler.abortTxn(new AbortTxnRequest(1));
boolean gotException = false;
try {
txnHandler.abortTxn(new AbortTxnRequest(2));
} catch (NoSuchTxnException ex) {
gotException = true;
//if this wasn't an empty txn, we'd get a better msg
Assert.assertEquals("No such transaction " + JavaUtils.txnIdToString(2), ex.getMessage());
}
Assert.assertTrue(gotException);
gotException = false;
txnHandler.commitTxn(new CommitTxnRequest(3));
try {
txnHandler.abortTxn(new AbortTxnRequest(3));
} catch (NoSuchTxnException ex) {
gotException = true;
//txn 3 is not empty txn, so we get a better msg
Assert.assertEquals("Transaction " + JavaUtils.txnIdToString(3) + " is already committed.", ex.getMessage());
}
Assert.assertTrue(gotException);
gotException = false;
try {
txnHandler.abortTxn(new AbortTxnRequest(4));
} catch (NoSuchTxnException ex) {
gotException = true;
Assert.assertEquals("No such transaction " + JavaUtils.txnIdToString(4), ex.getMessage());
}
Assert.assertTrue(gotException);
}
use of org.apache.hadoop.hive.metastore.api.AbortTxnRequest in project hive by apache.
the class TestInitiator method noCompactOnManyDifferentPartitionAborts.
@Test
public void noCompactOnManyDifferentPartitionAborts() throws Exception {
Table t = newTable("default", "ncomdpa", true);
for (int i = 0; i < 11; i++) {
Partition p = newPartition(t, "day-" + i);
}
HiveConf.setIntVar(conf, HiveConf.ConfVars.HIVE_COMPACTOR_ABORTEDTXN_THRESHOLD, 10);
for (int i = 0; i < 11; i++) {
long txnid = openTxn();
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default");
comp.setTablename("ncomdpa");
comp.setPartitionname("ds=day-" + i);
comp.setOperationType(DataOperationType.UPDATE);
List<LockComponent> components = new ArrayList<LockComponent>(1);
components.add(comp);
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
LockResponse res = txnHandler.lock(req);
txnHandler.abortTxn(new AbortTxnRequest(txnid));
}
startInitiator();
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
Assert.assertEquals(0, rsp.getCompactsSize());
}
use of org.apache.hadoop.hive.metastore.api.AbortTxnRequest in project hive by apache.
the class TestInitiator method majorCompactOnTableTooManyAborts.
@Test
public void majorCompactOnTableTooManyAborts() throws Exception {
Table t = newTable("default", "mcottma", false);
HiveConf.setIntVar(conf, HiveConf.ConfVars.HIVE_COMPACTOR_ABORTEDTXN_THRESHOLD, 10);
for (int i = 0; i < 11; i++) {
long txnid = openTxn();
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default");
comp.setTablename("mcottma");
comp.setOperationType(DataOperationType.UPDATE);
List<LockComponent> components = new ArrayList<LockComponent>(1);
components.add(comp);
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
LockResponse res = txnHandler.lock(req);
txnHandler.abortTxn(new AbortTxnRequest(txnid));
}
startInitiator();
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
List<ShowCompactResponseElement> compacts = rsp.getCompacts();
Assert.assertEquals(1, compacts.size());
Assert.assertEquals("initiated", compacts.get(0).getState());
Assert.assertEquals("mcottma", compacts.get(0).getTablename());
Assert.assertEquals(CompactionType.MAJOR, compacts.get(0).getType());
}
use of org.apache.hadoop.hive.metastore.api.AbortTxnRequest in project hive by apache.
the class TestInitiator method noCompactWhenCompactAlreadyScheduled.
@Test
public void noCompactWhenCompactAlreadyScheduled() throws Exception {
Table t = newTable("default", "ncwcas", false);
HiveConf.setIntVar(conf, HiveConf.ConfVars.HIVE_COMPACTOR_ABORTEDTXN_THRESHOLD, 10);
for (int i = 0; i < 11; i++) {
long txnid = openTxn();
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default");
comp.setTablename("ncwcas");
comp.setOperationType(DataOperationType.UPDATE);
List<LockComponent> components = new ArrayList<LockComponent>(1);
components.add(comp);
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
LockResponse res = txnHandler.lock(req);
txnHandler.abortTxn(new AbortTxnRequest(txnid));
}
CompactionRequest rqst = new CompactionRequest("default", "ncwcas", CompactionType.MAJOR);
txnHandler.compact(rqst);
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
List<ShowCompactResponseElement> compacts = rsp.getCompacts();
Assert.assertEquals(1, compacts.size());
Assert.assertEquals("initiated", compacts.get(0).getState());
Assert.assertEquals("ncwcas", compacts.get(0).getTablename());
startInitiator();
rsp = txnHandler.showCompact(new ShowCompactRequest());
compacts = rsp.getCompacts();
Assert.assertEquals(1, compacts.size());
Assert.assertEquals("initiated", compacts.get(0).getState());
Assert.assertEquals("ncwcas", compacts.get(0).getTablename());
Assert.assertEquals(CompactionType.MAJOR, compacts.get(0).getType());
}
Aggregations