use of org.apache.hadoop.hive.metastore.api.LockComponent in project hive by apache.
the class TestTxnHandler method testLockDifferentTables.
@Test
public void testLockDifferentTables() throws Exception {
// Test that two different tables don't collide on their locks
LockComponent comp = new LockComponent(LockType.EXCLUSIVE, LockLevel.DB, "mydb");
comp.setOperationType(DataOperationType.NO_TXN);
comp.setTablename("mytable");
List<LockComponent> components = new ArrayList<LockComponent>(1);
components.add(comp);
LockRequest req = new LockRequest(components, "me", "localhost");
LockResponse res = txnHandler.lock(req);
assertTrue(res.getState() == LockState.ACQUIRED);
comp = new LockComponent(LockType.EXCLUSIVE, LockLevel.DB, "mydb");
comp.setOperationType(DataOperationType.NO_TXN);
comp.setTablename("yourtable");
components.clear();
components.add(comp);
req = new LockRequest(components, "me", "localhost");
res = txnHandler.lock(req);
assertTrue(res.getState() == LockState.ACQUIRED);
}
use of org.apache.hadoop.hive.metastore.api.LockComponent in project hive by apache.
the class TestCleaner method notBlockedBySubsequentLock.
@Test
public void notBlockedBySubsequentLock() throws Exception {
Table t = newTable("default", "bblt", false);
// Set the run frequency low on this test so it doesn't take long
conf.setTimeVar(HiveConf.ConfVars.HIVE_COMPACTOR_CLEANER_RUN_INTERVAL, 100, TimeUnit.MILLISECONDS);
addBaseFile(t, null, 20L, 20);
addDeltaFile(t, null, 21L, 22L, 2);
addDeltaFile(t, null, 23L, 24L, 2);
addDeltaFile(t, null, 21L, 24L, 4);
burnThroughTransactions(25);
CompactionRequest rqst = new CompactionRequest("default", "bblt", CompactionType.MINOR);
txnHandler.compact(rqst);
CompactionInfo ci = txnHandler.findNextToCompact("fred");
txnHandler.markCompacted(ci);
txnHandler.setRunAs(ci.id, System.getProperty("user.name"));
LockComponent comp = new LockComponent(LockType.SHARED_READ, LockLevel.TABLE, "default");
comp.setTablename("bblt");
comp.setOperationType(DataOperationType.INSERT);
List<LockComponent> components = new ArrayList<LockComponent>(1);
components.add(comp);
LockRequest req = new LockRequest(components, "me", "localhost");
LockResponse res = txnHandler.lock(req);
AtomicBoolean looped = new AtomicBoolean();
looped.set(false);
startCleaner(looped);
// Make sure the compactor has a chance to run once
while (!looped.get()) {
Thread.currentThread().sleep(100);
}
// There should still be one request, as the locks still held.
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
List<ShowCompactResponseElement> compacts = rsp.getCompacts();
Assert.assertEquals(1, compacts.size());
// obtain a second lock. This shouldn't block cleaner as it was acquired after the initial
// clean request
LockComponent comp2 = new LockComponent(LockType.SHARED_READ, LockLevel.TABLE, "default");
comp2.setTablename("bblt");
comp.setOperationType(DataOperationType.SELECT);
List<LockComponent> components2 = new ArrayList<LockComponent>(1);
components2.add(comp2);
LockRequest req2 = new LockRequest(components, "me", "localhost");
LockResponse res2 = txnHandler.lock(req2);
// Unlock the previous lock
txnHandler.unlock(new UnlockRequest(res.getLockid()));
looped.set(false);
while (!looped.get()) {
Thread.currentThread().sleep(100);
}
stopThread();
Thread.currentThread().sleep(200);
// Check there are no compactions requests left.
rsp = txnHandler.showCompact(new ShowCompactRequest());
compacts = rsp.getCompacts();
Assert.assertEquals(1, compacts.size());
Assert.assertTrue(TxnStore.SUCCEEDED_RESPONSE.equals(rsp.getCompacts().get(0).getState()));
}
use of org.apache.hadoop.hive.metastore.api.LockComponent in project hive by apache.
the class TestCleaner method partitionNotBlockedBySubsequentLock.
@Test
public void partitionNotBlockedBySubsequentLock() throws Exception {
Table t = newTable("default", "bblt", true);
Partition p = newPartition(t, "today");
// Set the run frequency low on this test so it doesn't take long
conf.setTimeVar(HiveConf.ConfVars.HIVE_COMPACTOR_CLEANER_RUN_INTERVAL, 100, TimeUnit.MILLISECONDS);
addBaseFile(t, p, 20L, 20);
addDeltaFile(t, p, 21L, 22L, 2);
addDeltaFile(t, p, 23L, 24L, 2);
addDeltaFile(t, p, 21L, 24L, 4);
burnThroughTransactions(25);
CompactionRequest rqst = new CompactionRequest("default", "bblt", CompactionType.MINOR);
rqst.setPartitionname("ds=today");
txnHandler.compact(rqst);
CompactionInfo ci = txnHandler.findNextToCompact("fred");
txnHandler.markCompacted(ci);
txnHandler.setRunAs(ci.id, System.getProperty("user.name"));
LockComponent comp = new LockComponent(LockType.SHARED_READ, LockLevel.PARTITION, "default");
comp.setTablename("bblt");
comp.setPartitionname("ds=today");
comp.setOperationType(DataOperationType.INSERT);
List<LockComponent> components = new ArrayList<LockComponent>(1);
components.add(comp);
LockRequest req = new LockRequest(components, "me", "localhost");
LockResponse res = txnHandler.lock(req);
AtomicBoolean looped = new AtomicBoolean();
looped.set(false);
startCleaner(looped);
// Make sure the compactor has a chance to run once
while (!looped.get()) {
Thread.currentThread().sleep(100);
}
// There should still be one request, as the locks still held.
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
List<ShowCompactResponseElement> compacts = rsp.getCompacts();
Assert.assertEquals(1, compacts.size());
// obtain a second lock. This shouldn't block cleaner as it was acquired after the initial
// clean request
LockComponent comp2 = new LockComponent(LockType.SHARED_READ, LockLevel.PARTITION, "default");
comp2.setTablename("bblt");
comp2.setPartitionname("ds=today");
comp.setOperationType(DataOperationType.SELECT);
List<LockComponent> components2 = new ArrayList<LockComponent>(1);
components2.add(comp2);
LockRequest req2 = new LockRequest(components, "me", "localhost");
LockResponse res2 = txnHandler.lock(req2);
// Unlock the previous lock
txnHandler.unlock(new UnlockRequest(res.getLockid()));
looped.set(false);
while (!looped.get()) {
Thread.currentThread().sleep(100);
}
stopThread();
Thread.currentThread().sleep(200);
// Check there are no compactions requests left.
rsp = txnHandler.showCompact(new ShowCompactRequest());
compacts = rsp.getCompacts();
Assert.assertEquals(1, compacts.size());
Assert.assertTrue(TxnStore.SUCCEEDED_RESPONSE.equals(rsp.getCompacts().get(0).getState()));
}
use of org.apache.hadoop.hive.metastore.api.LockComponent in project hive by apache.
the class TestCleaner method blockedByLockPartition.
@Test
public void blockedByLockPartition() throws Exception {
Table t = newTable("default", "bblp", true);
Partition p = newPartition(t, "today");
addBaseFile(t, p, 20L, 20);
addDeltaFile(t, p, 21L, 22L, 2);
addDeltaFile(t, p, 23L, 24L, 2);
addDeltaFile(t, p, 21L, 24L, 4);
burnThroughTransactions(25);
CompactionRequest rqst = new CompactionRequest("default", "bblp", CompactionType.MINOR);
rqst.setPartitionname("ds=today");
txnHandler.compact(rqst);
CompactionInfo ci = txnHandler.findNextToCompact("fred");
txnHandler.markCompacted(ci);
txnHandler.setRunAs(ci.id, System.getProperty("user.name"));
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.PARTITION, "default");
comp.setTablename("bblp");
comp.setPartitionname("ds=today");
comp.setOperationType(DataOperationType.DELETE);
List<LockComponent> components = new ArrayList<LockComponent>(1);
components.add(comp);
LockRequest req = new LockRequest(components, "me", "localhost");
OpenTxnsResponse resp = txnHandler.openTxns(new OpenTxnRequest(1, "Dracula", "Transylvania"));
req.setTxnid(resp.getTxn_ids().get(0));
LockResponse res = txnHandler.lock(req);
startCleaner();
// Check there are no compactions requests left.
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
List<ShowCompactResponseElement> compacts = rsp.getCompacts();
Assert.assertEquals(1, compacts.size());
Assert.assertEquals("ready for cleaning", compacts.get(0).getState());
Assert.assertEquals("bblp", compacts.get(0).getTablename());
Assert.assertEquals("ds=today", compacts.get(0).getPartitionname());
Assert.assertEquals(CompactionType.MINOR, compacts.get(0).getType());
}
use of org.apache.hadoop.hive.metastore.api.LockComponent in project hive by apache.
the class TestCleaner method blockedByLockTable.
@Test
public void blockedByLockTable() throws Exception {
Table t = newTable("default", "bblt", false);
addBaseFile(t, null, 20L, 20);
addDeltaFile(t, null, 21L, 22L, 2);
addDeltaFile(t, null, 23L, 24L, 2);
addDeltaFile(t, null, 21L, 24L, 4);
burnThroughTransactions(25);
CompactionRequest rqst = new CompactionRequest("default", "bblt", CompactionType.MINOR);
txnHandler.compact(rqst);
CompactionInfo ci = txnHandler.findNextToCompact("fred");
txnHandler.markCompacted(ci);
txnHandler.setRunAs(ci.id, System.getProperty("user.name"));
LockComponent comp = new LockComponent(LockType.SHARED_READ, LockLevel.TABLE, "default");
comp.setTablename("bblt");
comp.setOperationType(DataOperationType.SELECT);
List<LockComponent> components = new ArrayList<LockComponent>(1);
components.add(comp);
LockRequest req = new LockRequest(components, "me", "localhost");
LockResponse res = txnHandler.lock(req);
startCleaner();
// Check there are no compactions requests left.
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
List<ShowCompactResponseElement> compacts = rsp.getCompacts();
Assert.assertEquals(1, compacts.size());
Assert.assertEquals("ready for cleaning", compacts.get(0).getState());
Assert.assertEquals("bblt", compacts.get(0).getTablename());
Assert.assertEquals(CompactionType.MINOR, compacts.get(0).getType());
}
Aggregations