Search in sources :

Example 1 with UnlockRequest

use of org.apache.hadoop.hive.metastore.api.UnlockRequest in project hive by apache.

the class TestTxnHandler method testUnlockWithTxn.

@Test
public void testUnlockWithTxn() throws Exception {
    LOG.debug("Starting testUnlockWithTxn");
    // Test that attempting to unlock locks associated with a transaction
    // generates an error
    long txnid = openTxn();
    LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.DB, "mydb");
    comp.setTablename("mytable");
    comp.setPartitionname("mypartition=myvalue");
    comp.setOperationType(DataOperationType.DELETE);
    List<LockComponent> components = new ArrayList<LockComponent>(1);
    components.add(comp);
    LockRequest req = new LockRequest(components, "me", "localhost");
    req.setTxnid(txnid);
    LockResponse res = txnHandler.lock(req);
    long lockid = res.getLockid();
    try {
        txnHandler.unlock(new UnlockRequest(lockid));
        fail("Allowed to unlock lock associated with transaction.");
    } catch (TxnOpenException e) {
    }
}
Also used : LockComponent(org.apache.hadoop.hive.metastore.api.LockComponent) LockResponse(org.apache.hadoop.hive.metastore.api.LockResponse) ArrayList(java.util.ArrayList) LockRequest(org.apache.hadoop.hive.metastore.api.LockRequest) CheckLockRequest(org.apache.hadoop.hive.metastore.api.CheckLockRequest) UnlockRequest(org.apache.hadoop.hive.metastore.api.UnlockRequest) TxnOpenException(org.apache.hadoop.hive.metastore.api.TxnOpenException) Test(org.junit.Test)

Example 2 with UnlockRequest

use of org.apache.hadoop.hive.metastore.api.UnlockRequest in project hive by apache.

the class TestCleaner method notBlockedBySubsequentLock.

@Test
public void notBlockedBySubsequentLock() throws Exception {
    Table t = newTable("default", "bblt", false);
    // Set the run frequency low on this test so it doesn't take long
    conf.setTimeVar(HiveConf.ConfVars.HIVE_COMPACTOR_CLEANER_RUN_INTERVAL, 100, TimeUnit.MILLISECONDS);
    addBaseFile(t, null, 20L, 20);
    addDeltaFile(t, null, 21L, 22L, 2);
    addDeltaFile(t, null, 23L, 24L, 2);
    addDeltaFile(t, null, 21L, 24L, 4);
    burnThroughTransactions("default", "bblt", 25);
    CompactionRequest rqst = new CompactionRequest("default", "bblt", CompactionType.MINOR);
    txnHandler.compact(rqst);
    CompactionInfo ci = txnHandler.findNextToCompact("fred");
    txnHandler.markCompacted(ci);
    txnHandler.setRunAs(ci.id, System.getProperty("user.name"));
    LockComponent comp = new LockComponent(LockType.SHARED_READ, LockLevel.TABLE, "default");
    comp.setTablename("bblt");
    comp.setOperationType(DataOperationType.INSERT);
    List<LockComponent> components = new ArrayList<LockComponent>(1);
    components.add(comp);
    LockRequest req = new LockRequest(components, "me", "localhost");
    LockResponse res = txnHandler.lock(req);
    AtomicBoolean looped = new AtomicBoolean();
    looped.set(false);
    startCleaner(looped);
    // Make sure the compactor has a chance to run once
    while (!looped.get()) {
        Thread.currentThread().sleep(100);
    }
    // There should still be one request, as the locks still held.
    ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
    List<ShowCompactResponseElement> compacts = rsp.getCompacts();
    Assert.assertEquals(1, compacts.size());
    // obtain a second lock.  This shouldn't block cleaner as it was acquired after the initial
    // clean request
    LockComponent comp2 = new LockComponent(LockType.SHARED_READ, LockLevel.TABLE, "default");
    comp2.setTablename("bblt");
    comp.setOperationType(DataOperationType.SELECT);
    List<LockComponent> components2 = new ArrayList<LockComponent>(1);
    components2.add(comp2);
    LockRequest req2 = new LockRequest(components, "me", "localhost");
    LockResponse res2 = txnHandler.lock(req2);
    // Unlock the previous lock
    txnHandler.unlock(new UnlockRequest(res.getLockid()));
    looped.set(false);
    while (!looped.get()) {
        Thread.currentThread().sleep(100);
    }
    stopThread();
    Thread.currentThread().sleep(200);
    // Check there are no compactions requests left.
    rsp = txnHandler.showCompact(new ShowCompactRequest());
    compacts = rsp.getCompacts();
    Assert.assertEquals(1, compacts.size());
    Assert.assertTrue(TxnStore.SUCCEEDED_RESPONSE.equals(rsp.getCompacts().get(0).getState()));
}
Also used : Table(org.apache.hadoop.hive.metastore.api.Table) LockComponent(org.apache.hadoop.hive.metastore.api.LockComponent) ArrayList(java.util.ArrayList) UnlockRequest(org.apache.hadoop.hive.metastore.api.UnlockRequest) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) LockResponse(org.apache.hadoop.hive.metastore.api.LockResponse) ShowCompactResponse(org.apache.hadoop.hive.metastore.api.ShowCompactResponse) CompactionInfo(org.apache.hadoop.hive.metastore.txn.CompactionInfo) ShowCompactRequest(org.apache.hadoop.hive.metastore.api.ShowCompactRequest) CompactionRequest(org.apache.hadoop.hive.metastore.api.CompactionRequest) LockRequest(org.apache.hadoop.hive.metastore.api.LockRequest) ShowCompactResponseElement(org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement) Test(org.junit.Test)

Example 3 with UnlockRequest

use of org.apache.hadoop.hive.metastore.api.UnlockRequest in project hive by apache.

the class TestCleaner method partitionNotBlockedBySubsequentLock.

@Test
public void partitionNotBlockedBySubsequentLock() throws Exception {
    Table t = newTable("default", "bblt", true);
    Partition p = newPartition(t, "today");
    // Set the run frequency low on this test so it doesn't take long
    conf.setTimeVar(HiveConf.ConfVars.HIVE_COMPACTOR_CLEANER_RUN_INTERVAL, 100, TimeUnit.MILLISECONDS);
    addBaseFile(t, p, 20L, 20);
    addDeltaFile(t, p, 21L, 22L, 2);
    addDeltaFile(t, p, 23L, 24L, 2);
    addDeltaFile(t, p, 21L, 24L, 4);
    burnThroughTransactions("default", "bblt", 25);
    CompactionRequest rqst = new CompactionRequest("default", "bblt", CompactionType.MINOR);
    rqst.setPartitionname("ds=today");
    txnHandler.compact(rqst);
    CompactionInfo ci = txnHandler.findNextToCompact("fred");
    txnHandler.markCompacted(ci);
    txnHandler.setRunAs(ci.id, System.getProperty("user.name"));
    LockComponent comp = new LockComponent(LockType.SHARED_READ, LockLevel.PARTITION, "default");
    comp.setTablename("bblt");
    comp.setPartitionname("ds=today");
    comp.setOperationType(DataOperationType.INSERT);
    List<LockComponent> components = new ArrayList<LockComponent>(1);
    components.add(comp);
    LockRequest req = new LockRequest(components, "me", "localhost");
    LockResponse res = txnHandler.lock(req);
    AtomicBoolean looped = new AtomicBoolean();
    looped.set(false);
    startCleaner(looped);
    // Make sure the compactor has a chance to run once
    while (!looped.get()) {
        Thread.currentThread().sleep(100);
    }
    // There should still be one request, as the locks still held.
    ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
    List<ShowCompactResponseElement> compacts = rsp.getCompacts();
    Assert.assertEquals(1, compacts.size());
    // obtain a second lock.  This shouldn't block cleaner as it was acquired after the initial
    // clean request
    LockComponent comp2 = new LockComponent(LockType.SHARED_READ, LockLevel.PARTITION, "default");
    comp2.setTablename("bblt");
    comp2.setPartitionname("ds=today");
    comp.setOperationType(DataOperationType.SELECT);
    List<LockComponent> components2 = new ArrayList<LockComponent>(1);
    components2.add(comp2);
    LockRequest req2 = new LockRequest(components, "me", "localhost");
    LockResponse res2 = txnHandler.lock(req2);
    // Unlock the previous lock
    txnHandler.unlock(new UnlockRequest(res.getLockid()));
    looped.set(false);
    while (!looped.get()) {
        Thread.currentThread().sleep(100);
    }
    stopThread();
    Thread.currentThread().sleep(200);
    // Check there are no compactions requests left.
    rsp = txnHandler.showCompact(new ShowCompactRequest());
    compacts = rsp.getCompacts();
    Assert.assertEquals(1, compacts.size());
    Assert.assertTrue(TxnStore.SUCCEEDED_RESPONSE.equals(rsp.getCompacts().get(0).getState()));
}
Also used : Partition(org.apache.hadoop.hive.metastore.api.Partition) Table(org.apache.hadoop.hive.metastore.api.Table) LockComponent(org.apache.hadoop.hive.metastore.api.LockComponent) ArrayList(java.util.ArrayList) UnlockRequest(org.apache.hadoop.hive.metastore.api.UnlockRequest) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) LockResponse(org.apache.hadoop.hive.metastore.api.LockResponse) ShowCompactResponse(org.apache.hadoop.hive.metastore.api.ShowCompactResponse) CompactionInfo(org.apache.hadoop.hive.metastore.txn.CompactionInfo) ShowCompactRequest(org.apache.hadoop.hive.metastore.api.ShowCompactRequest) CompactionRequest(org.apache.hadoop.hive.metastore.api.CompactionRequest) LockRequest(org.apache.hadoop.hive.metastore.api.LockRequest) ShowCompactResponseElement(org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement) Test(org.junit.Test)

Example 4 with UnlockRequest

use of org.apache.hadoop.hive.metastore.api.UnlockRequest in project hive by apache.

the class Cleaner method removeFiles.

private boolean removeFiles(String location, long minOpenTxnGLB, CompactionInfo ci, boolean dropPartition) throws MetaException, IOException, NoSuchObjectException, NoSuchTxnException {
    if (dropPartition) {
        LockRequest lockRequest = createLockRequest(ci, 0, LockType.EXCL_WRITE, DataOperationType.DELETE);
        LockResponse res = null;
        try {
            res = txnHandler.lock(lockRequest);
            if (res.getState() == LockState.ACQUIRED) {
                // check if partition wasn't recreated
                if (resolvePartition(ci) == null) {
                    return removeFiles(location, ci);
                }
            }
        } catch (NoSuchTxnException | TxnAbortedException e) {
            LOG.error(e.getMessage());
        } finally {
            if (res != null && res.getState() != LockState.NOT_ACQUIRED) {
                try {
                    txnHandler.unlock(new UnlockRequest(res.getLockid()));
                } catch (NoSuchLockException | TxnOpenException e) {
                    LOG.error(e.getMessage());
                }
            }
        }
    }
    ValidTxnList validTxnList = TxnUtils.createValidTxnListForCleaner(txnHandler.getOpenTxns(), minOpenTxnGLB);
    // save it so that getAcidState() sees it
    conf.set(ValidTxnList.VALID_TXNS_KEY, validTxnList.writeToString());
    /**
     * {@code validTxnList} is capped by minOpenTxnGLB so if
     * {@link AcidUtils#getAcidState(Path, Configuration, ValidWriteIdList)} sees a base/delta
     * produced by a compactor, that means every reader that could be active right now see it
     * as well.  That means if this base/delta shadows some earlier base/delta, the it will be
     * used in favor of any files that it shadows.  Thus the shadowed files are safe to delete.
     *
     * The metadata about aborted writeIds (and consequently aborted txn IDs) cannot be deleted
     * above COMPACTION_QUEUE.CQ_HIGHEST_WRITE_ID.
     * See {@link TxnStore#markCleaned(CompactionInfo)} for details.
     * For example given partition P1, txnid:150 starts and sees txnid:149 as open.
     * Say compactor runs in txnid:160, but 149 is still open and P1 has the largest resolved
     * writeId:17.  Compactor will produce base_17_c160.
     * Suppose txnid:149 writes delta_18_18
     * to P1 and aborts.  Compactor can only remove TXN_COMPONENTS entries
     * up to (inclusive) writeId:17 since delta_18_18 may be on disk (and perhaps corrupted) but
     * not visible based on 'validTxnList' capped at minOpenTxn so it will not not be cleaned by
     * {@link #removeFiles(String, ValidWriteIdList, CompactionInfo)} and so we must keep the
     * metadata that says that 18 is aborted.
     * In a slightly different case, whatever txn created delta_18 (and all other txn) may have
     * committed by the time cleaner runs and so cleaner will indeed see delta_18_18 and remove
     * it (since it has nothing but aborted data).  But we can't tell which actually happened
     * in markCleaned() so make sure it doesn't delete meta above CG_CQ_HIGHEST_WRITE_ID.
     *
     * We could perhaps make cleaning of aborted and obsolete and remove all aborted files up
     * to the current Min Open Write Id, this way aborted TXN_COMPONENTS meta can be removed
     * as well up to that point which may be higher than CQ_HIGHEST_WRITE_ID.  This could be
     * useful if there is all of a sudden a flood of aborted txns.  (For another day).
     */
    // Creating 'reader' list since we are interested in the set of 'obsolete' files
    ValidReaderWriteIdList validWriteIdList = getValidCleanerWriteIdList(ci, validTxnList);
    LOG.debug("Cleaning based on writeIdList: {}", validWriteIdList);
    return removeFiles(location, validWriteIdList, ci);
}
Also used : NoSuchLockException(org.apache.hadoop.hive.metastore.api.NoSuchLockException) TxnAbortedException(org.apache.hadoop.hive.metastore.api.TxnAbortedException) LockResponse(org.apache.hadoop.hive.metastore.api.LockResponse) NoSuchTxnException(org.apache.hadoop.hive.metastore.api.NoSuchTxnException) ValidTxnList(org.apache.hadoop.hive.common.ValidTxnList) ValidReaderWriteIdList(org.apache.hadoop.hive.common.ValidReaderWriteIdList) LockRequest(org.apache.hadoop.hive.metastore.api.LockRequest) UnlockRequest(org.apache.hadoop.hive.metastore.api.UnlockRequest) TxnOpenException(org.apache.hadoop.hive.metastore.api.TxnOpenException)

Example 5 with UnlockRequest

use of org.apache.hadoop.hive.metastore.api.UnlockRequest in project hive by apache.

the class TestTxnHandler method testMultipleLockWait.

@Test
public void testMultipleLockWait() throws Exception {
    // Test that two shared read locks can share a partition
    LockComponent comp = new LockComponent(LockType.EXCLUSIVE, LockLevel.DB, "mydb");
    comp.setTablename("mytable");
    comp.setPartitionname("mypartition=myvalue");
    comp.setOperationType(DataOperationType.NO_TXN);
    List<LockComponent> components = new ArrayList<LockComponent>(2);
    components.add(comp);
    comp = new LockComponent(LockType.EXCLUSIVE, LockLevel.DB, "mydb");
    comp.setTablename("mytable");
    comp.setPartitionname("anotherpartition=anothervalue");
    comp.setOperationType(DataOperationType.NO_TXN);
    components.add(comp);
    LockRequest req = new LockRequest(components, "me", "localhost");
    LockResponse res = txnHandler.lock(req);
    long lockid1 = res.getLockid();
    assertTrue(res.getState() == LockState.ACQUIRED);
    comp = new LockComponent(LockType.EXCLUSIVE, LockLevel.DB, "mydb");
    comp.setTablename("mytable");
    comp.setPartitionname("mypartition=myvalue");
    comp.setOperationType(DataOperationType.NO_TXN);
    components = new ArrayList<LockComponent>(1);
    components.add(comp);
    req = new LockRequest(components, "me", "localhost");
    res = txnHandler.lock(req);
    long lockid2 = res.getLockid();
    assertTrue(res.getState() == LockState.WAITING);
    txnHandler.unlock(new UnlockRequest(lockid1));
    res = txnHandler.checkLock(new CheckLockRequest(lockid2));
    assertTrue(res.getState() == LockState.ACQUIRED);
}
Also used : LockComponent(org.apache.hadoop.hive.metastore.api.LockComponent) LockResponse(org.apache.hadoop.hive.metastore.api.LockResponse) ArrayList(java.util.ArrayList) CheckLockRequest(org.apache.hadoop.hive.metastore.api.CheckLockRequest) LockRequest(org.apache.hadoop.hive.metastore.api.LockRequest) CheckLockRequest(org.apache.hadoop.hive.metastore.api.CheckLockRequest) UnlockRequest(org.apache.hadoop.hive.metastore.api.UnlockRequest) Test(org.junit.Test)

Aggregations

LockRequest (org.apache.hadoop.hive.metastore.api.LockRequest)6 LockResponse (org.apache.hadoop.hive.metastore.api.LockResponse)6 UnlockRequest (org.apache.hadoop.hive.metastore.api.UnlockRequest)6 ArrayList (java.util.ArrayList)5 LockComponent (org.apache.hadoop.hive.metastore.api.LockComponent)5 Test (org.junit.Test)5 CheckLockRequest (org.apache.hadoop.hive.metastore.api.CheckLockRequest)3 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)2 CompactionRequest (org.apache.hadoop.hive.metastore.api.CompactionRequest)2 ShowCompactRequest (org.apache.hadoop.hive.metastore.api.ShowCompactRequest)2 ShowCompactResponse (org.apache.hadoop.hive.metastore.api.ShowCompactResponse)2 ShowCompactResponseElement (org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement)2 Table (org.apache.hadoop.hive.metastore.api.Table)2 TxnOpenException (org.apache.hadoop.hive.metastore.api.TxnOpenException)2 CompactionInfo (org.apache.hadoop.hive.metastore.txn.CompactionInfo)2 ValidReaderWriteIdList (org.apache.hadoop.hive.common.ValidReaderWriteIdList)1 ValidTxnList (org.apache.hadoop.hive.common.ValidTxnList)1 NoSuchLockException (org.apache.hadoop.hive.metastore.api.NoSuchLockException)1 NoSuchTxnException (org.apache.hadoop.hive.metastore.api.NoSuchTxnException)1 Partition (org.apache.hadoop.hive.metastore.api.Partition)1