use of org.apache.hadoop.hive.metastore.api.CheckLockRequest in project presto by prestodb.
the class ThriftHiveMetastore method lock.
@Override
public long lock(MetastoreContext metastoreContext, String databaseName, String tableName) {
try {
final LockComponent lockComponent = new LockComponent(EXCLUSIVE, LockLevel.TABLE, databaseName);
lockComponent.setTablename(tableName);
final LockRequest lockRequest = new LockRequest(Lists.newArrayList(lockComponent), metastoreContext.getUsername(), InetAddress.getLocalHost().getHostName());
LockResponse lockResponse = stats.getLock().wrap(() -> getMetastoreClientThenCall(metastoreContext, client -> client.lock(lockRequest))).call();
LockState state = lockResponse.getState();
long lockId = lockResponse.getLockid();
final AtomicBoolean acquired = new AtomicBoolean(state.equals(ACQUIRED));
try {
if (state.equals(WAITING)) {
retry().maxAttempts(Integer.MAX_VALUE - 100).stopOnIllegalExceptions().exceptionMapper(e -> {
if (e instanceof WaitingForLockException) {
// only retry on waiting for lock exception
return e;
} else {
return new IllegalStateException(e.getMessage(), e);
}
}).run("lock", stats.getLock().wrap(() -> getMetastoreClientThenCall(metastoreContext, client -> {
LockResponse response = client.checkLock(new CheckLockRequest(lockId));
LockState newState = response.getState();
if (newState.equals(WAITING)) {
throw new WaitingForLockException("Waiting for lock.");
} else if (newState.equals(ACQUIRED)) {
acquired.set(true);
} else {
throw new RuntimeException(String.format("Failed to acquire lock: %s", newState.name()));
}
return null;
})));
}
} finally {
if (!acquired.get()) {
unlock(metastoreContext, lockId);
}
}
if (!acquired.get()) {
throw new RuntimeException("Failed to acquire lock");
}
return lockId;
} catch (TException e) {
throw new PrestoException(HIVE_METASTORE_ERROR, e);
} catch (Exception e) {
throw propagate(e);
}
}
use of org.apache.hadoop.hive.metastore.api.CheckLockRequest in project hive by apache.
the class TestTxnHandler method testCheckLockNoSuchLock.
@Test
public void testCheckLockNoSuchLock() throws Exception {
try {
txnHandler.checkLock(new CheckLockRequest(23L));
fail("Allowed to check lock on non-existent lock");
} catch (NoSuchLockException e) {
}
}
use of org.apache.hadoop.hive.metastore.api.CheckLockRequest in project hive by apache.
the class TestTxnHandler method testLockTimeout.
@Test
public void testLockTimeout() throws Exception {
long timeout = txnHandler.setTimeout(1);
try {
LockComponent comp = new LockComponent(LockType.EXCLUSIVE, LockLevel.DB, "mydb");
comp.setTablename("mytable");
comp.setPartitionname("mypartition=myvalue");
comp.setOperationType(DataOperationType.NO_TXN);
List<LockComponent> components = new ArrayList<LockComponent>(1);
components.add(comp);
LockRequest req = new LockRequest(components, "me", "localhost");
LockResponse res = txnHandler.lock(req);
assertTrue(res.getState() == LockState.ACQUIRED);
Thread.sleep(1000);
txnHandler.performTimeOuts();
txnHandler.checkLock(new CheckLockRequest(res.getLockid()));
fail("Told there was a lock, when it should have timed out.");
} catch (NoSuchLockException e) {
} finally {
txnHandler.setTimeout(timeout);
}
}
use of org.apache.hadoop.hive.metastore.api.CheckLockRequest in project hive by apache.
the class TestTxnHandler method testMultipleLockWait.
@Test
public void testMultipleLockWait() throws Exception {
// Test that two shared read locks can share a partition
LockComponent comp = new LockComponent(LockType.EXCLUSIVE, LockLevel.DB, "mydb");
comp.setTablename("mytable");
comp.setPartitionname("mypartition=myvalue");
comp.setOperationType(DataOperationType.NO_TXN);
List<LockComponent> components = new ArrayList<LockComponent>(2);
components.add(comp);
comp = new LockComponent(LockType.EXCLUSIVE, LockLevel.DB, "mydb");
comp.setTablename("mytable");
comp.setPartitionname("anotherpartition=anothervalue");
comp.setOperationType(DataOperationType.NO_TXN);
components.add(comp);
LockRequest req = new LockRequest(components, "me", "localhost");
LockResponse res = txnHandler.lock(req);
long lockid1 = res.getLockid();
assertTrue(res.getState() == LockState.ACQUIRED);
comp = new LockComponent(LockType.EXCLUSIVE, LockLevel.DB, "mydb");
comp.setTablename("mytable");
comp.setPartitionname("mypartition=myvalue");
comp.setOperationType(DataOperationType.NO_TXN);
components = new ArrayList<LockComponent>(1);
components.add(comp);
req = new LockRequest(components, "me", "localhost");
res = txnHandler.lock(req);
long lockid2 = res.getLockid();
assertTrue(res.getState() == LockState.WAITING);
txnHandler.unlock(new UnlockRequest(lockid1));
res = txnHandler.checkLock(new CheckLockRequest(lockid2));
assertTrue(res.getState() == LockState.ACQUIRED);
}
use of org.apache.hadoop.hive.metastore.api.CheckLockRequest in project hive by apache.
the class TestTxnHandler method testCheckLockTxnAborted.
@Test
public void testCheckLockTxnAborted() throws Exception {
// Test that when a transaction is aborted, the heartbeat fails
long txnid = openTxn();
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.DB, "mydb");
comp.setTablename("mytable");
comp.setPartitionname("mypartition=myvalue");
comp.setOperationType(DataOperationType.DELETE);
List<LockComponent> components = new ArrayList<LockComponent>(1);
components.add(comp);
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
LockResponse res = txnHandler.lock(req);
long lockid = res.getLockid();
txnHandler.abortTxn(new AbortTxnRequest(txnid));
try {
// This will throw NoSuchLockException (even though it's the
// transaction we've closed) because that will have deleted the lock.
txnHandler.checkLock(new CheckLockRequest(lockid));
fail("Allowed to check lock on aborted transaction.");
} catch (NoSuchLockException e) {
}
}
Aggregations