use of org.apache.hadoop.hive.metastore.api.LockComponent in project hive by apache.
the class TestInitiator method enoughDeltasNoBase.
@Test
public void enoughDeltasNoBase() throws Exception {
Table t = newTable("default", "ednb", true);
Partition p = newPartition(t, "today");
addDeltaFile(t, p, 1L, 201L, 200);
addDeltaFile(t, p, 202L, 202L, 1);
addDeltaFile(t, p, 203L, 203L, 1);
addDeltaFile(t, p, 204L, 204L, 1);
addDeltaFile(t, p, 205L, 205L, 1);
addDeltaFile(t, p, 206L, 206L, 1);
addDeltaFile(t, p, 207L, 207L, 1);
addDeltaFile(t, p, 208L, 208L, 1);
addDeltaFile(t, p, 209L, 209L, 1);
addDeltaFile(t, p, 210L, 210L, 1);
addDeltaFile(t, p, 211L, 211L, 1);
burnThroughTransactions(210);
long txnid = openTxn();
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.PARTITION, "default");
comp.setTablename("ednb");
comp.setPartitionname("ds=today");
comp.setOperationType(DataOperationType.DELETE);
List<LockComponent> components = new ArrayList<LockComponent>(1);
components.add(comp);
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
LockResponse res = txnHandler.lock(req);
txnHandler.commitTxn(new CommitTxnRequest(txnid));
startInitiator();
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
List<ShowCompactResponseElement> compacts = rsp.getCompacts();
Assert.assertEquals(1, compacts.size());
Assert.assertEquals("initiated", compacts.get(0).getState());
Assert.assertEquals("ednb", compacts.get(0).getTablename());
Assert.assertEquals("ds=today", compacts.get(0).getPartitionname());
Assert.assertEquals(CompactionType.MAJOR, compacts.get(0).getType());
}
use of org.apache.hadoop.hive.metastore.api.LockComponent in project hive by apache.
the class Lock method buildLockRequest.
private LockRequest buildLockRequest(Long transactionId) {
if (transactionId == null && !sinks.isEmpty()) {
throw new IllegalArgumentException("Cannot sink to tables outside of a transaction: sinks=" + asStrings(sinks));
}
LockRequestBuilder requestBuilder = new LockRequestBuilder();
for (Table table : tables) {
LockComponentBuilder componentBuilder = new LockComponentBuilder().setDbName(table.getDbName()).setTableName(table.getTableName());
//and insert/select and if resource (that is written to) is ACID or not
if (sinks.contains(table)) {
componentBuilder.setSemiShared().setOperationType(DataOperationType.UPDATE).setIsAcid(true);
} else {
componentBuilder.setShared().setOperationType(DataOperationType.INSERT).setIsAcid(true);
}
LockComponent component = componentBuilder.build();
requestBuilder.addLockComponent(component);
}
if (transactionId != null) {
requestBuilder.setTransactionId(transactionId);
}
LockRequest request = requestBuilder.setUser(user).build();
return request;
}
use of org.apache.hadoop.hive.metastore.api.LockComponent in project hive by apache.
the class TestLock method testAcquireTxnLockCheckLocks.
@Test
public void testAcquireTxnLockCheckLocks() throws Exception {
writeLock.acquire(TRANSACTION_ID);
verify(mockMetaStoreClient).lock(requestCaptor.capture());
LockRequest request = requestCaptor.getValue();
assertEquals(TRANSACTION_ID, request.getTxnid());
assertEquals(USER, request.getUser());
assertEquals(InetAddress.getLocalHost().getHostName(), request.getHostname());
List<LockComponent> components = request.getComponent();
assertEquals(3, components.size());
LockComponent expected1 = new LockComponent(LockType.SHARED_READ, LockLevel.TABLE, "DB");
expected1.setTablename("SOURCE_1");
expected1.setOperationType(DataOperationType.INSERT);
expected1.setIsAcid(true);
assertTrue(components.contains(expected1));
LockComponent expected2 = new LockComponent(LockType.SHARED_READ, LockLevel.TABLE, "DB");
expected2.setTablename("SOURCE_2");
expected2.setOperationType(DataOperationType.INSERT);
expected2.setIsAcid(true);
assertTrue(components.contains(expected2));
LockComponent expected3 = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "DB");
expected3.setTablename("SINK");
expected3.setOperationType(DataOperationType.UPDATE);
expected3.setIsAcid(true);
assertTrue(components.contains(expected3));
}
use of org.apache.hadoop.hive.metastore.api.LockComponent in project hive by apache.
the class TestLock method testAcquireReadLockCheckLocks.
@Test
public void testAcquireReadLockCheckLocks() throws Exception {
readLock.acquire();
verify(mockMetaStoreClient).lock(requestCaptor.capture());
LockRequest request = requestCaptor.getValue();
assertEquals(0, request.getTxnid());
assertEquals(USER, request.getUser());
assertEquals(InetAddress.getLocalHost().getHostName(), request.getHostname());
List<LockComponent> components = request.getComponent();
assertEquals(2, components.size());
LockComponent expected1 = new LockComponent(LockType.SHARED_READ, LockLevel.TABLE, "DB");
expected1.setTablename("SOURCE_1");
expected1.setOperationType(DataOperationType.INSERT);
expected1.setIsAcid(true);
assertTrue(components.contains(expected1));
LockComponent expected2 = new LockComponent(LockType.SHARED_READ, LockLevel.TABLE, "DB");
expected2.setTablename("SOURCE_2");
expected2.setOperationType(DataOperationType.INSERT);
expected2.setIsAcid(true);
assertTrue(components.contains(expected2));
}
Aggregations