use of org.apache.hadoop.hive.metastore.api.LockResponse in project hive by apache.
the class TestTxnHandler method testLockSWSR.
@Test
public void testLockSWSR() throws Exception {
// Test that read can acquire after write
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.DB, "mydb");
comp.setTablename("mytable");
comp.setPartitionname("mypartition");
comp.setOperationType(DataOperationType.UPDATE);
List<LockComponent> components = new ArrayList<LockComponent>(1);
components.add(comp);
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(openTxn());
LockResponse res = txnHandler.lock(req);
assertTrue(res.getState() == LockState.ACQUIRED);
comp = new LockComponent(LockType.SHARED_READ, LockLevel.DB, "mydb");
comp.setTablename("mytable");
comp.setPartitionname("mypartition");
comp.setOperationType(DataOperationType.SELECT);
components.clear();
components.add(comp);
req = new LockRequest(components, "me", "localhost");
res = txnHandler.lock(req);
assertTrue(res.getState() == LockState.ACQUIRED);
}
use of org.apache.hadoop.hive.metastore.api.LockResponse in project hive by apache.
the class TestTxnHandler method testLockSRE.
@Test
public void testLockSRE() throws Exception {
// Test that read blocks exclusive
LockComponent comp = new LockComponent(LockType.SHARED_READ, LockLevel.DB, "mydb");
comp.setTablename("mytable");
comp.setPartitionname("mypartition");
comp.setOperationType(DataOperationType.SELECT);
List<LockComponent> components = new ArrayList<LockComponent>(1);
components.add(comp);
LockRequest req = new LockRequest(components, "me", "localhost");
LockResponse res = txnHandler.lock(req);
assertTrue(res.getState() == LockState.ACQUIRED);
comp = new LockComponent(LockType.EXCLUSIVE, LockLevel.DB, "mydb");
comp.setTablename("mytable");
comp.setPartitionname("mypartition");
comp.setOperationType(DataOperationType.NO_TXN);
components.clear();
components.add(comp);
req = new LockRequest(components, "me", "localhost");
res = txnHandler.lock(req);
assertTrue(res.getState() == LockState.WAITING);
}
use of org.apache.hadoop.hive.metastore.api.LockResponse in project hive by apache.
the class TestTxnHandler method testLockSWSWSW.
@Test
public void testLockSWSWSW() throws Exception {
// Test that write blocks two writes
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.DB, "mydb");
comp.setTablename("mytable");
comp.setPartitionname("mypartition");
comp.setOperationType(DataOperationType.DELETE);
List<LockComponent> components = new ArrayList<LockComponent>(1);
components.add(comp);
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(openTxn());
LockResponse res = txnHandler.lock(req);
assertTrue(res.getState() == LockState.ACQUIRED);
comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.DB, "mydb");
comp.setTablename("mytable");
comp.setPartitionname("mypartition");
comp.setOperationType(DataOperationType.DELETE);
components.clear();
components.add(comp);
req = new LockRequest(components, "me", "localhost");
req.setTxnid(openTxn());
res = txnHandler.lock(req);
assertTrue(res.getState() == LockState.WAITING);
comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.DB, "mydb");
comp.setTablename("mytable");
comp.setPartitionname("mypartition");
comp.setOperationType(DataOperationType.DELETE);
components.clear();
components.add(comp);
req = new LockRequest(components, "me", "localhost");
req.setTxnid(openTxn());
res = txnHandler.lock(req);
assertTrue(res.getState() == LockState.WAITING);
}
use of org.apache.hadoop.hive.metastore.api.LockResponse in project hive by apache.
the class TestTxnHandler method testLockSRSW.
@Test
public void testLockSRSW() throws Exception {
// Test that write can acquire after read
LockComponent comp = new LockComponent(LockType.SHARED_READ, LockLevel.DB, "mydb");
comp.setTablename("mytable");
comp.setPartitionname("mypartition");
comp.setOperationType(DataOperationType.INSERT);
List<LockComponent> components = new ArrayList<LockComponent>(1);
components.add(comp);
LockRequest req = new LockRequest(components, "me", "localhost");
LockResponse res = txnHandler.lock(req);
assertTrue(res.getState() == LockState.ACQUIRED);
comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.DB, "mydb");
comp.setTablename("mytable");
comp.setPartitionname("mypartition");
comp.setOperationType(DataOperationType.DELETE);
components.clear();
components.add(comp);
req = new LockRequest(components, "me", "localhost");
req.setTxnid(openTxn());
res = txnHandler.lock(req);
assertTrue(res.getState() == LockState.ACQUIRED);
}
use of org.apache.hadoop.hive.metastore.api.LockResponse in project hive by apache.
the class TestTxnHandler method testLockSameDB.
@Test
public void testLockSameDB() throws Exception {
// Test that two different databases don't collide on their locks
LockComponent comp = new LockComponent(LockType.EXCLUSIVE, LockLevel.DB, "mydb");
comp.setOperationType(DataOperationType.NO_TXN);
List<LockComponent> components = new ArrayList<LockComponent>(1);
components.add(comp);
LockRequest req = new LockRequest(components, "me", "localhost");
LockResponse res = txnHandler.lock(req);
assertTrue(res.getState() == LockState.ACQUIRED);
comp = new LockComponent(LockType.EXCLUSIVE, LockLevel.DB, "mydb");
comp.setOperationType(DataOperationType.NO_TXN);
components.clear();
components.add(comp);
req = new LockRequest(components, "me", "localhost");
res = txnHandler.lock(req);
assertTrue(res.getState() == LockState.WAITING);
}
Aggregations