use of org.apache.hadoop.hive.metastore.api.LockRequest in project hive by apache.
the class TestTxnHandler method testCheckLockTxnAborted.
@Test
public void testCheckLockTxnAborted() throws Exception {
// Test that when a transaction is aborted, the heartbeat fails
long txnid = openTxn();
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.DB, "mydb");
comp.setTablename("mytable");
comp.setPartitionname("mypartition");
comp.setOperationType(DataOperationType.DELETE);
List<LockComponent> components = new ArrayList<LockComponent>(1);
components.add(comp);
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
LockResponse res = txnHandler.lock(req);
long lockid = res.getLockid();
txnHandler.abortTxn(new AbortTxnRequest(txnid));
try {
// This will throw NoSuchLockException (even though it's the
// transaction we've closed) because that will have deleted the lock.
txnHandler.checkLock(new CheckLockRequest(lockid));
fail("Allowed to check lock on aborted transaction.");
} catch (NoSuchLockException e) {
}
}
use of org.apache.hadoop.hive.metastore.api.LockRequest in project hive by apache.
the class TestTxnHandler method testCheckLockAcquireAfterWaiting.
@Test
public void testCheckLockAcquireAfterWaiting() throws Exception {
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.DB, "mydb");
comp.setTablename("mytable");
comp.setPartitionname("mypartition");
comp.setOperationType(DataOperationType.DELETE);
List<LockComponent> components = new ArrayList<LockComponent>(1);
components.add(comp);
LockRequest req = new LockRequest(components, "me", "localhost");
long txnId = openTxn();
req.setTxnid(txnId);
LockResponse res = txnHandler.lock(req);
long lockid1 = res.getLockid();
assertTrue(res.getState() == LockState.ACQUIRED);
comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.DB, "mydb");
comp.setTablename("mytable");
comp.setPartitionname("mypartition");
comp.setOperationType(DataOperationType.UPDATE);
components.clear();
components.add(comp);
req = new LockRequest(components, "me", "localhost");
req.setTxnid(openTxn());
res = txnHandler.lock(req);
long lockid2 = res.getLockid();
assertTrue(res.getState() == LockState.WAITING);
txnHandler.abortTxn(new AbortTxnRequest(txnId));
res = txnHandler.checkLock(new CheckLockRequest(lockid2));
assertTrue(res.getState() == LockState.ACQUIRED);
}
use of org.apache.hadoop.hive.metastore.api.LockRequest in project hive by apache.
the class TestTxnHandler method testUnlockOnAbort.
@Test
public void testUnlockOnAbort() throws Exception {
// Test that committing unlocks
long txnid = openTxn();
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.DB, "mydb");
comp.setOperationType(DataOperationType.UPDATE);
List<LockComponent> components = new ArrayList<LockComponent>(1);
components.add(comp);
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
LockResponse res = txnHandler.lock(req);
assertTrue(res.getState() == LockState.ACQUIRED);
txnHandler.abortTxn(new AbortTxnRequest(txnid));
assertEquals(0, txnHandler.numLocksInLockTable());
}
use of org.apache.hadoop.hive.metastore.api.LockRequest in project hive by apache.
the class TestTxnHandler method testLockEESR.
@Test
public void testLockEESR() throws Exception {
// Test that exclusive blocks exclusive and read
LockComponent comp = new LockComponent(LockType.EXCLUSIVE, LockLevel.DB, "mydb");
comp.setTablename("mytable");
comp.setPartitionname("mypartition");
comp.setOperationType(DataOperationType.NO_TXN);
List<LockComponent> components = new ArrayList<LockComponent>(1);
components.add(comp);
LockRequest req = new LockRequest(components, "me", "localhost");
LockResponse res = txnHandler.lock(req);
assertTrue(res.getState() == LockState.ACQUIRED);
comp = new LockComponent(LockType.EXCLUSIVE, LockLevel.DB, "mydb");
comp.setTablename("mytable");
comp.setPartitionname("mypartition");
comp.setOperationType(DataOperationType.NO_TXN);
components.clear();
components.add(comp);
req = new LockRequest(components, "me", "localhost");
res = txnHandler.lock(req);
assertTrue(res.getState() == LockState.WAITING);
comp = new LockComponent(LockType.SHARED_READ, LockLevel.DB, "mydb");
comp.setTablename("mytable");
comp.setPartitionname("mypartition");
comp.setOperationType(DataOperationType.SELECT);
components.clear();
components.add(comp);
req = new LockRequest(components, "me", "localhost");
res = txnHandler.lock(req);
assertTrue(res.getState() == LockState.WAITING);
}
use of org.apache.hadoop.hive.metastore.api.LockRequest in project hive by apache.
the class TestTxnHandler method testUnlockOnCommit.
@Test
public void testUnlockOnCommit() throws Exception {
// Test that committing unlocks
long txnid = openTxn();
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.DB, "mydb");
comp.setTablename("mytable");
comp.setOperationType(DataOperationType.DELETE);
List<LockComponent> components = new ArrayList<LockComponent>(1);
components.add(comp);
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
LockResponse res = txnHandler.lock(req);
assertTrue(res.getState() == LockState.ACQUIRED);
txnHandler.commitTxn(new CommitTxnRequest(txnid));
assertEquals(0, txnHandler.numLocksInLockTable());
}
Aggregations