use of org.apache.hadoop.hive.metastore.api.OpenTxnRequest in project hive by apache.
the class TestInitiator method cleanEmptyAbortedTxns.
@Test
public void cleanEmptyAbortedTxns() throws Exception {
// Test that we are cleaning aborted transactions with no components left in txn_components.
// Put one aborted transaction with an entry in txn_components to make sure we don't
// accidently clean it too.
Table t = newTable("default", "ceat", false);
long txnid = openTxn();
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default");
comp.setTablename("ceat");
comp.setOperationType(DataOperationType.UPDATE);
List<LockComponent> components = new ArrayList<LockComponent>(1);
components.add(comp);
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
LockResponse res = txnHandler.lock(req);
txnHandler.abortTxn(new AbortTxnRequest(txnid));
conf.setIntVar(HiveConf.ConfVars.HIVE_TXN_MAX_OPEN_BATCH, TxnStore.TIMED_OUT_TXN_ABORT_BATCH_SIZE + 50);
OpenTxnsResponse resp = txnHandler.openTxns(new OpenTxnRequest(TxnStore.TIMED_OUT_TXN_ABORT_BATCH_SIZE + 50, "user", "hostname"));
txnHandler.abortTxns(new AbortTxnsRequest(resp.getTxn_ids()));
GetOpenTxnsResponse openTxns = txnHandler.getOpenTxns();
Assert.assertEquals(TxnStore.TIMED_OUT_TXN_ABORT_BATCH_SIZE + 50 + 1, openTxns.getOpen_txnsSize());
startInitiator();
openTxns = txnHandler.getOpenTxns();
Assert.assertEquals(1, openTxns.getOpen_txnsSize());
}
use of org.apache.hadoop.hive.metastore.api.OpenTxnRequest in project hive by apache.
the class TestTxnHandler method testAbortTxn.
@Test
public void testAbortTxn() throws Exception {
OpenTxnsResponse openedTxns = txnHandler.openTxns(new OpenTxnRequest(3, "me", "localhost"));
List<Long> txnList = openedTxns.getTxn_ids();
long first = txnList.get(0);
assertEquals(1L, first);
long second = txnList.get(1);
assertEquals(2L, second);
txnHandler.abortTxn(new AbortTxnRequest(1));
List<String> parts = new ArrayList<String>();
parts.add("p=1");
AllocateTableWriteIdsRequest rqst = new AllocateTableWriteIdsRequest("default", "T");
rqst.setTxnIds(Collections.singletonList(3L));
AllocateTableWriteIdsResponse writeIds = txnHandler.allocateTableWriteIds(rqst);
long writeId = writeIds.getTxnToWriteIds().get(0).getWriteId();
assertEquals(3, writeIds.getTxnToWriteIds().get(0).getTxnId());
assertEquals(1, writeId);
AddDynamicPartitions adp = new AddDynamicPartitions(3, writeId, "default", "T", parts);
adp.setOperationType(DataOperationType.INSERT);
txnHandler.addDynamicPartitions(adp);
GetOpenTxnsInfoResponse txnsInfo = txnHandler.getOpenTxnsInfo();
assertEquals(3, txnsInfo.getTxn_high_water_mark());
assertEquals(3, txnsInfo.getOpen_txns().size());
assertEquals(1L, txnsInfo.getOpen_txns().get(0).getId());
assertEquals(TxnState.ABORTED, txnsInfo.getOpen_txns().get(0).getState());
assertEquals(2L, txnsInfo.getOpen_txns().get(1).getId());
assertEquals(TxnState.OPEN, txnsInfo.getOpen_txns().get(1).getState());
assertEquals(3, txnsInfo.getOpen_txns().get(2).getId());
assertEquals(TxnState.OPEN, txnsInfo.getOpen_txns().get(2).getState());
GetOpenTxnsResponse txns = txnHandler.getOpenTxns();
assertEquals(3, txns.getTxn_high_water_mark());
assertEquals(3, txns.getOpen_txns().size());
boolean[] saw = new boolean[4];
for (int i = 0; i < saw.length; i++) saw[i] = false;
for (Long tid : txns.getOpen_txns()) {
saw[tid.intValue()] = true;
}
for (int i = 1; i < saw.length; i++) assertTrue(saw[i]);
txnHandler.commitTxn(new CommitTxnRequest(2));
// this succeeds as abortTxn is idempotent
txnHandler.abortTxn(new AbortTxnRequest(1));
boolean gotException = false;
try {
txnHandler.abortTxn(new AbortTxnRequest(2));
} catch (NoSuchTxnException ex) {
gotException = true;
// this is the last committed, so it is still in the txns table
Assert.assertEquals("Transaction " + JavaUtils.txnIdToString(2) + " is already committed.", ex.getMessage());
}
Assert.assertTrue(gotException);
gotException = false;
txnHandler.commitTxn(new CommitTxnRequest(3));
try {
txnHandler.abortTxn(new AbortTxnRequest(3));
} catch (NoSuchTxnException ex) {
gotException = true;
// txn 3 is not empty txn, so we get a better msg
Assert.assertEquals("Transaction " + JavaUtils.txnIdToString(3) + " is already committed.", ex.getMessage());
}
Assert.assertTrue(gotException);
txnHandler.setOpenTxnTimeOutMillis(1);
txnHandler.cleanEmptyAbortedAndCommittedTxns();
txnHandler.setOpenTxnTimeOutMillis(1000);
gotException = false;
try {
txnHandler.abortTxn(new AbortTxnRequest(2));
} catch (NoSuchTxnException ex) {
gotException = true;
// now the second transaction is cleared and since it was empty, we do not recognize it anymore
Assert.assertEquals("No such transaction " + JavaUtils.txnIdToString(2), ex.getMessage());
}
Assert.assertTrue(gotException);
gotException = false;
try {
txnHandler.abortTxn(new AbortTxnRequest(4));
} catch (NoSuchTxnException ex) {
gotException = true;
Assert.assertEquals("No such transaction " + JavaUtils.txnIdToString(4), ex.getMessage());
}
Assert.assertTrue(gotException);
}
use of org.apache.hadoop.hive.metastore.api.OpenTxnRequest in project hive by apache.
the class TestTxnHandler method testRecoverManyTimeouts.
@Test
public void testRecoverManyTimeouts() throws Exception {
long timeout = txnHandler.setTimeout(1);
try {
txnHandler.openTxns(new OpenTxnRequest(503, "me", "localhost"));
Thread.sleep(1000);
txnHandler.performTimeOuts();
GetOpenTxnsInfoResponse rsp = txnHandler.getOpenTxnsInfo();
int numAborted = 0;
for (TxnInfo txnInfo : rsp.getOpen_txns()) {
assertEquals(TxnState.ABORTED, txnInfo.getState());
numAborted++;
}
assertEquals(503, numAborted);
} finally {
txnHandler.setTimeout(timeout);
}
}
use of org.apache.hadoop.hive.metastore.api.OpenTxnRequest in project hive by apache.
the class TestTxnHandler method testReplTimeouts.
@Test
public void testReplTimeouts() throws Exception {
createDatabaseForReplTests("default", MetaStoreUtils.getDefaultCatalog(conf));
long timeout = txnHandler.setTimeout(1);
try {
OpenTxnRequest request = new OpenTxnRequest(3, "me", "localhost");
OpenTxnsResponse response = txnHandler.openTxns(request);
request.setReplPolicy("default.*");
request.setReplSrcTxnIds(response.getTxn_ids());
request.setTxn_type(TxnType.REPL_CREATED);
OpenTxnsResponse responseRepl = txnHandler.openTxns(request);
Thread.sleep(1000);
txnHandler.performTimeOuts();
GetOpenTxnsInfoResponse rsp = txnHandler.getOpenTxnsInfo();
int numAborted = 0;
int numOpen = 0;
for (TxnInfo txnInfo : rsp.getOpen_txns()) {
if (TxnState.ABORTED == txnInfo.getState()) {
assertTrue(response.getTxn_ids().contains(txnInfo.getId()));
numAborted++;
}
if (TxnState.OPEN == txnInfo.getState()) {
assertTrue(responseRepl.getTxn_ids().contains(txnInfo.getId()));
numOpen++;
}
}
assertEquals(3, numAborted);
assertEquals(3, numOpen);
} finally {
txnHandler.setTimeout(timeout);
}
}
use of org.apache.hadoop.hive.metastore.api.OpenTxnRequest in project hive by apache.
the class TestCompactionTxnHandler method addDynamicPartitions.
@Test
public void addDynamicPartitions() throws Exception {
String dbName = "default";
String tableName = "adp_table";
OpenTxnsResponse openTxns = txnHandler.openTxns(new OpenTxnRequest(1, "me", "localhost"));
long txnId = openTxns.getTxn_ids().get(0);
AllocateTableWriteIdsRequest rqst = new AllocateTableWriteIdsRequest(dbName, tableName);
rqst.setTxnIds(openTxns.getTxn_ids());
AllocateTableWriteIdsResponse writeIds = txnHandler.allocateTableWriteIds(rqst);
long writeId = writeIds.getTxnToWriteIds().get(0).getWriteId();
assertEquals(txnId, writeIds.getTxnToWriteIds().get(0).getTxnId());
assertEquals(1, writeId);
// lock a table, as in dynamic partitions
LockComponent lc = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, dbName);
lc.setIsDynamicPartitionWrite(true);
lc.setTablename(tableName);
DataOperationType dop = DataOperationType.UPDATE;
lc.setOperationType(dop);
LockRequest lr = new LockRequest(Arrays.asList(lc), "me", "localhost");
lr.setTxnid(txnId);
LockResponse lock = txnHandler.lock(lr);
assertEquals(LockState.ACQUIRED, lock.getState());
AddDynamicPartitions adp = new AddDynamicPartitions(txnId, writeId, dbName, tableName, Arrays.asList("ds=yesterday", "ds=today"));
adp.setOperationType(dop);
txnHandler.addDynamicPartitions(adp);
txnHandler.commitTxn(new CommitTxnRequest(txnId));
Set<CompactionInfo> potentials = txnHandler.findPotentialCompactions(1000, -1L);
assertEquals(2, potentials.size());
SortedSet<CompactionInfo> sorted = new TreeSet<CompactionInfo>(potentials);
int i = 0;
for (CompactionInfo ci : sorted) {
assertEquals(dbName, ci.dbname);
assertEquals(tableName, ci.tableName);
switch(i++) {
case 0:
assertEquals("ds=today", ci.partName);
break;
case 1:
assertEquals("ds=yesterday", ci.partName);
break;
default:
throw new RuntimeException("What?");
}
}
}
Aggregations