Search in sources :

Example 11 with ValidTxnList

use of org.apache.hadoop.hive.common.ValidTxnList in project hive by apache.

the class TestValidCompactorTxnList method exceptionsAbveHighWaterMark.

@Test
public void exceptionsAbveHighWaterMark() {
    ValidTxnList txns = new ValidCompactorTxnList(new long[] { 8, 11, 17, 29 }, 15);
    Assert.assertArrayEquals("", new long[] { 8, 11 }, txns.getInvalidTransactions());
    ValidTxnList.RangeResponse rsp = txns.isTxnRangeValid(7, 9);
    Assert.assertEquals(ValidTxnList.RangeResponse.ALL, rsp);
    rsp = txns.isTxnRangeValid(12, 16);
    Assert.assertEquals(ValidTxnList.RangeResponse.NONE, rsp);
}
Also used : ValidTxnList(org.apache.hadoop.hive.common.ValidTxnList) ValidCompactorTxnList(org.apache.hadoop.hive.common.ValidCompactorTxnList) Test(org.junit.Test)

Example 12 with ValidTxnList

use of org.apache.hadoop.hive.common.ValidTxnList in project hive by apache.

the class Driver method recordValidTxns.

// Write the current set of valid transactions into the conf file so that it can be read by
// the input format.
private void recordValidTxns() throws LockException {
    HiveTxnManager txnMgr = SessionState.get().getTxnMgr();
    ValidTxnList txns = txnMgr.getValidTxns();
    String txnStr = txns.toString();
    conf.set(ValidTxnList.VALID_TXNS_KEY, txnStr);
    if (plan.getFetchTask() != null) {
        /**
       * This is needed for {@link HiveConf.ConfVars.HIVEFETCHTASKCONVERSION} optimization which
       * initializes JobConf in FetchOperator before recordValidTxns() but this has to be done
       * after locks are acquired to avoid race conditions in ACID.
       */
        plan.getFetchTask().setValidTxnList(txnStr);
    }
    LOG.debug("Encoding valid txns info " + txnStr + " txnid:" + txnMgr.getCurrentTxnId());
}
Also used : ValidTxnList(org.apache.hadoop.hive.common.ValidTxnList) HiveTxnManager(org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager)

Example 13 with ValidTxnList

use of org.apache.hadoop.hive.common.ValidTxnList in project hive by apache.

the class Driver method recordValidTxns.

// Write the current set of valid transactions into the conf file
private void recordValidTxns(HiveTxnManager txnMgr) throws LockException {
    String oldTxnString = conf.get(ValidTxnList.VALID_TXNS_KEY);
    if ((oldTxnString != null) && (oldTxnString.length() > 0)) {
        throw new IllegalStateException("calling recordValidTxn() more than once in the same " + JavaUtils.txnIdToString(txnMgr.getCurrentTxnId()));
    }
    ValidTxnList txnList = txnMgr.getValidTxns();
    String txnStr = txnList.toString();
    conf.set(ValidTxnList.VALID_TXNS_KEY, txnStr);
    LOG.debug("Encoding valid txns info " + txnStr + " txnid:" + txnMgr.getCurrentTxnId());
}
Also used : ValidTxnList(org.apache.hadoop.hive.common.ValidTxnList)

Example 14 with ValidTxnList

use of org.apache.hadoop.hive.common.ValidTxnList in project hive by apache.

the class MaterializationsInvalidationCache method addMaterializedView.

/**
 * Adds the materialized view to the cache.
 *
 * @param dbName
 * @param tableName
 * @param tablesUsed tables used by the materialized view
 * @param validTxnList
 * @param opType
 */
private void addMaterializedView(String dbName, String tableName, Set<String> tablesUsed, String validTxnList, OpType opType) {
    if (disable) {
        // Nothing to do
        return;
    }
    // We are going to create the map for each view in the given database
    ConcurrentMap<String, MaterializationInvalidationInfo> cq = new ConcurrentHashMap<String, MaterializationInvalidationInfo>();
    final ConcurrentMap<String, MaterializationInvalidationInfo> prevCq = materializations.putIfAbsent(dbName, cq);
    if (prevCq != null) {
        cq = prevCq;
    }
    // important information in the registry to account for rewriting invalidation
    if (validTxnList == null) {
        // This can happen when the materialized view was created on non-transactional tables
        return;
    }
    if (opType == OpType.CREATE || opType == OpType.ALTER) {
        // You store the materialized view
        cq.put(tableName, new MaterializationInvalidationInfo(tablesUsed, validTxnList));
    } else {
        ValidTxnList txnList = new ValidReadTxnList(validTxnList);
        for (String qNameTableUsed : tablesUsed) {
            // First we insert a new tree set to keep table modifications, unless it already exists
            ConcurrentSkipListMap<Long, Long> modificationsTree = new ConcurrentSkipListMap<Long, Long>();
            final ConcurrentSkipListMap<Long, Long> prevModificationsTree = tableModifications.putIfAbsent(qNameTableUsed, modificationsTree);
            if (prevModificationsTree != null) {
                modificationsTree = prevModificationsTree;
            }
            // check if the MV is still valid.
            try {
                String[] names = qNameTableUsed.split("\\.");
                BasicTxnInfo e = handler.getTxnHandler().getFirstCompletedTransactionForTableAfterCommit(names[0], names[1], txnList);
                if (!e.isIsnull()) {
                    modificationsTree.put(e.getTxnid(), e.getTime());
                    // a modification event that was in the metastore.
                    continue;
                }
            } catch (MetaException ex) {
                LOG.debug("Materialized view " + Warehouse.getQualifiedName(dbName, tableName) + " ignored; error loading view into invalidation cache", ex);
                return;
            }
        }
        // For LOAD, you only add it if it does exist as you might be loading an outdated MV
        cq.putIfAbsent(tableName, new MaterializationInvalidationInfo(tablesUsed, validTxnList));
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug("Cached materialized view for rewriting in invalidation cache: " + Warehouse.getQualifiedName(dbName, tableName));
    }
}
Also used : ConcurrentSkipListMap(java.util.concurrent.ConcurrentSkipListMap) BasicTxnInfo(org.apache.hadoop.hive.metastore.api.BasicTxnInfo) ValidTxnList(org.apache.hadoop.hive.common.ValidTxnList) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ValidReadTxnList(org.apache.hadoop.hive.common.ValidReadTxnList) MetaException(org.apache.hadoop.hive.metastore.api.MetaException)

Example 15 with ValidTxnList

use of org.apache.hadoop.hive.common.ValidTxnList in project hive by apache.

the class MaterializationsInvalidationCache method getInvalidationTime.

private long getInvalidationTime(MaterializationInvalidationInfo materialization) {
    String txnListString = materialization.getValidTxnList();
    if (txnListString == null) {
        // This can happen when the materialization was created on non-transactional tables
        return Long.MIN_VALUE;
    }
    // We will obtain the modification time as follows.
    // First, we obtain the first element after high watermark (if any)
    // Then, we iterate through the elements from min open txn till high
    // watermark, updating the modification time after creation if needed
    ValidTxnList txnList = new ValidReadTxnList(txnListString);
    long firstModificationTimeAfterCreation = 0L;
    for (String qNameTableUsed : materialization.getTablesUsed()) {
        final Entry<Long, Long> tn = tableModifications.get(qNameTableUsed).higherEntry(txnList.getHighWatermark());
        if (tn != null) {
            if (firstModificationTimeAfterCreation == 0L || tn.getValue() < firstModificationTimeAfterCreation) {
                firstModificationTimeAfterCreation = tn.getValue();
            }
        }
        // when this transaction was being executed
        if (txnList.getMinOpenTxn() != null) {
            // Invalid transaction list is sorted
            int pos = 0;
            for (Map.Entry<Long, Long> t : tableModifications.get(qNameTableUsed).subMap(txnList.getMinOpenTxn(), txnList.getHighWatermark()).entrySet()) {
                while (pos < txnList.getInvalidTransactions().length && txnList.getInvalidTransactions()[pos] != t.getKey()) {
                    pos++;
                }
                if (pos >= txnList.getInvalidTransactions().length) {
                    break;
                }
                if (firstModificationTimeAfterCreation == 0L || t.getValue() < firstModificationTimeAfterCreation) {
                    firstModificationTimeAfterCreation = t.getValue();
                }
            }
        }
    }
    return firstModificationTimeAfterCreation;
}
Also used : ValidTxnList(org.apache.hadoop.hive.common.ValidTxnList) ConcurrentMap(java.util.concurrent.ConcurrentMap) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ConcurrentSkipListMap(java.util.concurrent.ConcurrentSkipListMap) ValidReadTxnList(org.apache.hadoop.hive.common.ValidReadTxnList)

Aggregations

ValidTxnList (org.apache.hadoop.hive.common.ValidTxnList)18 Test (org.junit.Test)12 ValidCompactorTxnList (org.apache.hadoop.hive.common.ValidCompactorTxnList)8 ValidReadTxnList (org.apache.hadoop.hive.common.ValidReadTxnList)6 MetastoreUnitTest (org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest)4 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)3 ConcurrentSkipListMap (java.util.concurrent.ConcurrentSkipListMap)3 ImmutableMap (com.google.common.collect.ImmutableMap)2 Map (java.util.Map)2 ConcurrentMap (java.util.concurrent.ConcurrentMap)2 MetaException (org.apache.hadoop.hive.metastore.api.MetaException)2 Connection (java.sql.Connection)1 SQLException (java.sql.SQLException)1 Statement (java.sql.Statement)1 ArrayList (java.util.ArrayList)1 Entry (java.util.Map.Entry)1 BasicTxnInfo (org.apache.hadoop.hive.metastore.api.BasicTxnInfo)1 GetValidWriteIdsResponse (org.apache.hadoop.hive.metastore.api.GetValidWriteIdsResponse)1 HeartbeatTxnRangeResponse (org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeResponse)1 TableValidWriteIds (org.apache.hadoop.hive.metastore.api.TableValidWriteIds)1