Search in sources :

Example 21 with LockComponent

use of org.apache.hadoop.hive.metastore.api.LockComponent in project hive by apache.

the class TestInitiator method noCompactWhenCompactAlreadyScheduled.

@Test
public void noCompactWhenCompactAlreadyScheduled() throws Exception {
    Table t = newTable("default", "ncwcas", false);
    HiveConf.setIntVar(conf, HiveConf.ConfVars.HIVE_COMPACTOR_ABORTEDTXN_THRESHOLD, 10);
    for (int i = 0; i < 11; i++) {
        long txnid = openTxn();
        LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default");
        comp.setTablename("ncwcas");
        comp.setOperationType(DataOperationType.UPDATE);
        List<LockComponent> components = new ArrayList<LockComponent>(1);
        components.add(comp);
        LockRequest req = new LockRequest(components, "me", "localhost");
        req.setTxnid(txnid);
        LockResponse res = txnHandler.lock(req);
        txnHandler.abortTxn(new AbortTxnRequest(txnid));
    }
    CompactionRequest rqst = new CompactionRequest("default", "ncwcas", CompactionType.MAJOR);
    txnHandler.compact(rqst);
    ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
    List<ShowCompactResponseElement> compacts = rsp.getCompacts();
    Assert.assertEquals(1, compacts.size());
    Assert.assertEquals("initiated", compacts.get(0).getState());
    Assert.assertEquals("ncwcas", compacts.get(0).getTablename());
    startInitiator();
    rsp = txnHandler.showCompact(new ShowCompactRequest());
    compacts = rsp.getCompacts();
    Assert.assertEquals(1, compacts.size());
    Assert.assertEquals("initiated", compacts.get(0).getState());
    Assert.assertEquals("ncwcas", compacts.get(0).getTablename());
    Assert.assertEquals(CompactionType.MAJOR, compacts.get(0).getType());
}
Also used : Table(org.apache.hadoop.hive.metastore.api.Table) LockComponent(org.apache.hadoop.hive.metastore.api.LockComponent) ArrayList(java.util.ArrayList) AbortTxnRequest(org.apache.hadoop.hive.metastore.api.AbortTxnRequest) LockResponse(org.apache.hadoop.hive.metastore.api.LockResponse) ShowCompactResponse(org.apache.hadoop.hive.metastore.api.ShowCompactResponse) ShowCompactRequest(org.apache.hadoop.hive.metastore.api.ShowCompactRequest) CompactionRequest(org.apache.hadoop.hive.metastore.api.CompactionRequest) LockRequest(org.apache.hadoop.hive.metastore.api.LockRequest) ShowCompactResponseElement(org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement) Test(org.junit.Test)

Example 22 with LockComponent

use of org.apache.hadoop.hive.metastore.api.LockComponent in project hive by apache.

the class TestInitiator method compactPartitionHighDeltaPct.

@Test
public void compactPartitionHighDeltaPct() throws Exception {
    Table t = newTable("default", "cphdp", true);
    Partition p = newPartition(t, "today");
    addBaseFile(t, p, 20L, 20);
    addDeltaFile(t, p, 21L, 22L, 2);
    addDeltaFile(t, p, 23L, 24L, 2);
    burnThroughTransactions(23);
    long txnid = openTxn();
    LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.PARTITION, "default");
    comp.setTablename("cphdp");
    comp.setPartitionname("ds=today");
    comp.setOperationType(DataOperationType.UPDATE);
    List<LockComponent> components = new ArrayList<LockComponent>(1);
    components.add(comp);
    LockRequest req = new LockRequest(components, "me", "localhost");
    req.setTxnid(txnid);
    LockResponse res = txnHandler.lock(req);
    txnHandler.commitTxn(new CommitTxnRequest(txnid));
    startInitiator();
    ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
    List<ShowCompactResponseElement> compacts = rsp.getCompacts();
    Assert.assertEquals(1, compacts.size());
    Assert.assertEquals("initiated", compacts.get(0).getState());
    Assert.assertEquals("cphdp", compacts.get(0).getTablename());
    Assert.assertEquals("ds=today", compacts.get(0).getPartitionname());
    Assert.assertEquals(CompactionType.MAJOR, compacts.get(0).getType());
}
Also used : CommitTxnRequest(org.apache.hadoop.hive.metastore.api.CommitTxnRequest) Partition(org.apache.hadoop.hive.metastore.api.Partition) Table(org.apache.hadoop.hive.metastore.api.Table) LockComponent(org.apache.hadoop.hive.metastore.api.LockComponent) ArrayList(java.util.ArrayList) LockResponse(org.apache.hadoop.hive.metastore.api.LockResponse) ShowCompactResponse(org.apache.hadoop.hive.metastore.api.ShowCompactResponse) ShowCompactRequest(org.apache.hadoop.hive.metastore.api.ShowCompactRequest) LockRequest(org.apache.hadoop.hive.metastore.api.LockRequest) ShowCompactResponseElement(org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement) Test(org.junit.Test)

Example 23 with LockComponent

use of org.apache.hadoop.hive.metastore.api.LockComponent in project hive by apache.

the class TestInitiator method noCompactWhenNoCompactSet.

@Test
public void noCompactWhenNoCompactSet() throws Exception {
    Map<String, String> parameters = new HashMap<String, String>(1);
    parameters.put("NO_AUTO_COMPACTION", "true");
    Table t = newTable("default", "ncwncs", false, parameters);
    HiveConf.setIntVar(conf, HiveConf.ConfVars.HIVE_COMPACTOR_ABORTEDTXN_THRESHOLD, 10);
    for (int i = 0; i < 11; i++) {
        long txnid = openTxn();
        LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default");
        comp.setTablename("ncwncs");
        comp.setOperationType(DataOperationType.UPDATE);
        List<LockComponent> components = new ArrayList<LockComponent>(1);
        components.add(comp);
        LockRequest req = new LockRequest(components, "me", "localhost");
        req.setTxnid(txnid);
        LockResponse res = txnHandler.lock(req);
        txnHandler.abortTxn(new AbortTxnRequest(txnid));
    }
    startInitiator();
    ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
    Assert.assertEquals(0, rsp.getCompactsSize());
}
Also used : Table(org.apache.hadoop.hive.metastore.api.Table) LockComponent(org.apache.hadoop.hive.metastore.api.LockComponent) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) AbortTxnRequest(org.apache.hadoop.hive.metastore.api.AbortTxnRequest) LockResponse(org.apache.hadoop.hive.metastore.api.LockResponse) ShowCompactResponse(org.apache.hadoop.hive.metastore.api.ShowCompactResponse) ShowCompactRequest(org.apache.hadoop.hive.metastore.api.ShowCompactRequest) LockRequest(org.apache.hadoop.hive.metastore.api.LockRequest) Test(org.junit.Test)

Example 24 with LockComponent

use of org.apache.hadoop.hive.metastore.api.LockComponent in project hive by apache.

the class TestInitiator method cleanEmptyAbortedTxns.

@Test
public void cleanEmptyAbortedTxns() throws Exception {
    // Test that we are cleaning aborted transactions with no components left in txn_components.
    // Put one aborted transaction with an entry in txn_components to make sure we don't
    // accidently clean it too.
    Table t = newTable("default", "ceat", false);
    long txnid = openTxn();
    LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default");
    comp.setTablename("ceat");
    comp.setOperationType(DataOperationType.UPDATE);
    List<LockComponent> components = new ArrayList<LockComponent>(1);
    components.add(comp);
    LockRequest req = new LockRequest(components, "me", "localhost");
    req.setTxnid(txnid);
    LockResponse res = txnHandler.lock(req);
    txnHandler.abortTxn(new AbortTxnRequest(txnid));
    for (int i = 0; i < TxnStore.TIMED_OUT_TXN_ABORT_BATCH_SIZE + 50; i++) {
        txnid = openTxn();
        txnHandler.abortTxn(new AbortTxnRequest(txnid));
    }
    GetOpenTxnsResponse openTxns = txnHandler.getOpenTxns();
    Assert.assertEquals(TxnStore.TIMED_OUT_TXN_ABORT_BATCH_SIZE + 50 + 1, openTxns.getOpen_txnsSize());
    startInitiator();
    openTxns = txnHandler.getOpenTxns();
    Assert.assertEquals(1, openTxns.getOpen_txnsSize());
}
Also used : Table(org.apache.hadoop.hive.metastore.api.Table) LockComponent(org.apache.hadoop.hive.metastore.api.LockComponent) LockResponse(org.apache.hadoop.hive.metastore.api.LockResponse) ArrayList(java.util.ArrayList) GetOpenTxnsResponse(org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse) AbortTxnRequest(org.apache.hadoop.hive.metastore.api.AbortTxnRequest) LockRequest(org.apache.hadoop.hive.metastore.api.LockRequest) Test(org.junit.Test)

Example 25 with LockComponent

use of org.apache.hadoop.hive.metastore.api.LockComponent in project hive by apache.

the class TestTxnHandler method testLockESRSW.

@Test
public void testLockESRSW() throws Exception {
    // Test that exclusive lock blocks read and write
    LockComponent comp = new LockComponent(LockType.EXCLUSIVE, LockLevel.DB, "mydb");
    comp.setTablename("mytable");
    comp.setPartitionname("mypartition");
    comp.setOperationType(DataOperationType.NO_TXN);
    List<LockComponent> components = new ArrayList<LockComponent>(1);
    components.add(comp);
    LockRequest req = new LockRequest(components, "me", "localhost");
    LockResponse res = txnHandler.lock(req);
    assertTrue(res.getState() == LockState.ACQUIRED);
    comp = new LockComponent(LockType.SHARED_READ, LockLevel.DB, "mydb");
    comp.setTablename("mytable");
    comp.setPartitionname("mypartition");
    comp.setOperationType(DataOperationType.SELECT);
    components.clear();
    components.add(comp);
    req = new LockRequest(components, "me", "localhost");
    res = txnHandler.lock(req);
    assertTrue(res.getState() == LockState.WAITING);
    comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.DB, "mydb");
    comp.setTablename("mytable");
    comp.setPartitionname("mypartition");
    comp.setOperationType(DataOperationType.UPDATE);
    components.clear();
    components.add(comp);
    req = new LockRequest(components, "me", "localhost");
    req.setTxnid(openTxn());
    res = txnHandler.lock(req);
    assertTrue(res.getState() == LockState.WAITING);
}
Also used : LockComponent(org.apache.hadoop.hive.metastore.api.LockComponent) LockResponse(org.apache.hadoop.hive.metastore.api.LockResponse) ArrayList(java.util.ArrayList) LockRequest(org.apache.hadoop.hive.metastore.api.LockRequest) CheckLockRequest(org.apache.hadoop.hive.metastore.api.CheckLockRequest) Test(org.junit.Test)

Aggregations

LockComponent (org.apache.hadoop.hive.metastore.api.LockComponent)94 LockRequest (org.apache.hadoop.hive.metastore.api.LockRequest)94 Test (org.junit.Test)93 LockResponse (org.apache.hadoop.hive.metastore.api.LockResponse)58 ArrayList (java.util.ArrayList)57 CheckLockRequest (org.apache.hadoop.hive.metastore.api.CheckLockRequest)32 Table (org.apache.hadoop.hive.metastore.api.Table)24 ShowCompactRequest (org.apache.hadoop.hive.metastore.api.ShowCompactRequest)22 ShowCompactResponse (org.apache.hadoop.hive.metastore.api.ShowCompactResponse)22 ShowCompactResponseElement (org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement)17 CommitTxnRequest (org.apache.hadoop.hive.metastore.api.CommitTxnRequest)15 AbortTxnRequest (org.apache.hadoop.hive.metastore.api.AbortTxnRequest)11 Partition (org.apache.hadoop.hive.metastore.api.Partition)10 CompactionRequest (org.apache.hadoop.hive.metastore.api.CompactionRequest)6 UnlockRequest (org.apache.hadoop.hive.metastore.api.UnlockRequest)5 NoSuchLockException (org.apache.hadoop.hive.metastore.api.NoSuchLockException)4 CompactionInfo (org.apache.hadoop.hive.metastore.txn.CompactionInfo)4 GetOpenTxnsResponse (org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse)3 OpenTxnRequest (org.apache.hadoop.hive.metastore.api.OpenTxnRequest)3 HashMap (java.util.HashMap)2