use of org.apache.hadoop.hive.metastore.api.LockResponse in project hive by apache.
the class TestHiveMetaStoreTxns method testLocksWithTxn.
@Test
public void testLocksWithTxn() throws Exception {
long txnid = client.openTxn("me");
LockRequestBuilder rqstBuilder = new LockRequestBuilder();
rqstBuilder.setTransactionId(txnid).addLockComponent(new LockComponentBuilder().setDbName("mydb").setTableName("mytable").setPartitionName("mypartition").setSemiShared().setOperationType(DataOperationType.UPDATE).build()).addLockComponent(new LockComponentBuilder().setDbName("mydb").setTableName("yourtable").setSemiShared().setOperationType(DataOperationType.UPDATE).build()).addLockComponent(new LockComponentBuilder().setDbName("yourdb").setShared().setOperationType(DataOperationType.SELECT).build()).setUser("fred");
LockResponse res = client.lock(rqstBuilder.build());
Assert.assertEquals(1L, res.getLockid());
Assert.assertEquals(LockState.ACQUIRED, res.getState());
res = client.checkLock(1);
Assert.assertEquals(1L, res.getLockid());
Assert.assertEquals(LockState.ACQUIRED, res.getState());
client.heartbeat(txnid, 1);
client.commitTxn(txnid);
}
use of org.apache.hadoop.hive.metastore.api.LockResponse in project hive by apache.
the class TestCompactionTxnHandler method addDynamicPartitions.
@Test
public void addDynamicPartitions() throws Exception {
String dbName = "default";
String tableName = "adp_table";
OpenTxnsResponse openTxns = txnHandler.openTxns(new OpenTxnRequest(1, "me", "localhost"));
long txnId = openTxns.getTxn_ids().get(0);
AllocateTableWriteIdsResponse writeIds = txnHandler.allocateTableWriteIds(new AllocateTableWriteIdsRequest(openTxns.getTxn_ids(), dbName, tableName));
long writeId = writeIds.getTxnToWriteIds().get(0).getWriteId();
assertEquals(txnId, writeIds.getTxnToWriteIds().get(0).getTxnId());
assertEquals(1, writeId);
// lock a table, as in dynamic partitions
LockComponent lc = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, dbName);
lc.setIsDynamicPartitionWrite(true);
lc.setTablename(tableName);
DataOperationType dop = DataOperationType.UPDATE;
lc.setOperationType(dop);
LockRequest lr = new LockRequest(Arrays.asList(lc), "me", "localhost");
lr.setTxnid(txnId);
LockResponse lock = txnHandler.lock(lr);
assertEquals(LockState.ACQUIRED, lock.getState());
AddDynamicPartitions adp = new AddDynamicPartitions(txnId, writeId, dbName, tableName, Arrays.asList("ds=yesterday", "ds=today"));
adp.setOperationType(dop);
txnHandler.addDynamicPartitions(adp);
txnHandler.commitTxn(new CommitTxnRequest(txnId));
Set<CompactionInfo> potentials = txnHandler.findPotentialCompactions(1000);
assertEquals(2, potentials.size());
SortedSet<CompactionInfo> sorted = new TreeSet<CompactionInfo>(potentials);
int i = 0;
for (CompactionInfo ci : sorted) {
assertEquals(dbName, ci.dbname);
assertEquals(tableName, ci.tableName);
switch(i++) {
case 0:
assertEquals("ds=today", ci.partName);
break;
case 1:
assertEquals("ds=yesterday", ci.partName);
break;
default:
throw new RuntimeException("What?");
}
}
}
use of org.apache.hadoop.hive.metastore.api.LockResponse in project hive by apache.
the class TestTxnHandler method testWrongLockForOperation.
@Ignore("now that every op has a txn ctx, we don't produce the error expected here....")
@Test
public void testWrongLockForOperation() throws Exception {
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.DB, "mydb");
comp.setTablename("mytable");
comp.setPartitionname("mypartition");
comp.setOperationType(DataOperationType.NO_TXN);
List<LockComponent> components = new ArrayList<LockComponent>(1);
components.add(comp);
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(openTxn());
Exception expectedError = null;
try {
LockResponse res = txnHandler.lock(req);
} catch (Exception e) {
expectedError = e;
}
Assert.assertTrue(expectedError != null && expectedError.getMessage().contains("Unexpected DataOperationType"));
}
use of org.apache.hadoop.hive.metastore.api.LockResponse in project presto by prestodb.
the class ThriftHiveMetastore method lock.
@Override
public long lock(MetastoreContext metastoreContext, String databaseName, String tableName) {
try {
final LockComponent lockComponent = new LockComponent(EXCLUSIVE, LockLevel.TABLE, databaseName);
lockComponent.setTablename(tableName);
final LockRequest lockRequest = new LockRequest(Lists.newArrayList(lockComponent), metastoreContext.getUsername(), InetAddress.getLocalHost().getHostName());
LockResponse lockResponse = stats.getLock().wrap(() -> getMetastoreClientThenCall(metastoreContext, client -> client.lock(lockRequest))).call();
LockState state = lockResponse.getState();
long lockId = lockResponse.getLockid();
final AtomicBoolean acquired = new AtomicBoolean(state.equals(ACQUIRED));
try {
if (state.equals(WAITING)) {
retry().maxAttempts(Integer.MAX_VALUE - 100).stopOnIllegalExceptions().exceptionMapper(e -> {
if (e instanceof WaitingForLockException) {
// only retry on waiting for lock exception
return e;
} else {
return new IllegalStateException(e.getMessage(), e);
}
}).run("lock", stats.getLock().wrap(() -> getMetastoreClientThenCall(metastoreContext, client -> {
LockResponse response = client.checkLock(new CheckLockRequest(lockId));
LockState newState = response.getState();
if (newState.equals(WAITING)) {
throw new WaitingForLockException("Waiting for lock.");
} else if (newState.equals(ACQUIRED)) {
acquired.set(true);
} else {
throw new RuntimeException(String.format("Failed to acquire lock: %s", newState.name()));
}
return null;
})));
}
} finally {
if (!acquired.get()) {
unlock(metastoreContext, lockId);
}
}
if (!acquired.get()) {
throw new RuntimeException("Failed to acquire lock");
}
return lockId;
} catch (TException e) {
throw new PrestoException(HIVE_METASTORE_ERROR, e);
} catch (Exception e) {
throw propagate(e);
}
}
use of org.apache.hadoop.hive.metastore.api.LockResponse in project hive by apache.
the class TestInitiator method majorCompactOnPartitionTooManyAborts.
@Test
public void majorCompactOnPartitionTooManyAborts() throws Exception {
Table t = newTable("default", "mcoptma", true);
Partition p = newPartition(t, "today");
HiveConf.setIntVar(conf, HiveConf.ConfVars.HIVE_COMPACTOR_ABORTEDTXN_THRESHOLD, 10);
for (int i = 0; i < 11; i++) {
long txnid = openTxn();
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default");
comp.setTablename("mcoptma");
comp.setPartitionname("ds=today");
comp.setOperationType(DataOperationType.DELETE);
List<LockComponent> components = new ArrayList<LockComponent>(1);
components.add(comp);
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
LockResponse res = txnHandler.lock(req);
txnHandler.abortTxn(new AbortTxnRequest(txnid));
}
startInitiator();
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
List<ShowCompactResponseElement> compacts = rsp.getCompacts();
Assert.assertEquals(1, compacts.size());
Assert.assertEquals("initiated", compacts.get(0).getState());
Assert.assertEquals("mcoptma", compacts.get(0).getTablename());
Assert.assertEquals("ds=today", compacts.get(0).getPartitionname());
Assert.assertEquals(CompactionType.MAJOR, compacts.get(0).getType());
}
Aggregations