use of org.apache.hadoop.hive.common.ValidTxnList in project hive by apache.
the class TestValidCompactorTxnList method exceptionsAbveHighWaterMark.
@Test
public void exceptionsAbveHighWaterMark() {
ValidTxnList txns = new ValidCompactorTxnList(new long[] { 8, 11, 17, 29 }, 15);
Assert.assertArrayEquals("", new long[] { 8, 11 }, txns.getInvalidTransactions());
ValidTxnList.RangeResponse rsp = txns.isTxnRangeValid(7, 9);
Assert.assertEquals(ValidTxnList.RangeResponse.ALL, rsp);
rsp = txns.isTxnRangeValid(12, 16);
Assert.assertEquals(ValidTxnList.RangeResponse.NONE, rsp);
}
use of org.apache.hadoop.hive.common.ValidTxnList in project hive by apache.
the class Driver method recordValidTxns.
// Write the current set of valid transactions into the conf file so that it can be read by
// the input format.
private void recordValidTxns() throws LockException {
HiveTxnManager txnMgr = SessionState.get().getTxnMgr();
ValidTxnList txns = txnMgr.getValidTxns();
String txnStr = txns.toString();
conf.set(ValidTxnList.VALID_TXNS_KEY, txnStr);
if (plan.getFetchTask() != null) {
/**
* This is needed for {@link HiveConf.ConfVars.HIVEFETCHTASKCONVERSION} optimization which
* initializes JobConf in FetchOperator before recordValidTxns() but this has to be done
* after locks are acquired to avoid race conditions in ACID.
*/
plan.getFetchTask().setValidTxnList(txnStr);
}
LOG.debug("Encoding valid txns info " + txnStr + " txnid:" + txnMgr.getCurrentTxnId());
}
use of org.apache.hadoop.hive.common.ValidTxnList in project hive by apache.
the class Driver method recordValidTxns.
// Write the current set of valid transactions into the conf file
private void recordValidTxns(HiveTxnManager txnMgr) throws LockException {
String oldTxnString = conf.get(ValidTxnList.VALID_TXNS_KEY);
if ((oldTxnString != null) && (oldTxnString.length() > 0)) {
throw new IllegalStateException("calling recordValidTxn() more than once in the same " + JavaUtils.txnIdToString(txnMgr.getCurrentTxnId()));
}
ValidTxnList txnList = txnMgr.getValidTxns();
String txnStr = txnList.toString();
conf.set(ValidTxnList.VALID_TXNS_KEY, txnStr);
LOG.debug("Encoding valid txns info " + txnStr + " txnid:" + txnMgr.getCurrentTxnId());
}
use of org.apache.hadoop.hive.common.ValidTxnList in project hive by apache.
the class MaterializationsInvalidationCache method addMaterializedView.
/**
* Adds the materialized view to the cache.
*
* @param dbName
* @param tableName
* @param tablesUsed tables used by the materialized view
* @param validTxnList
* @param opType
*/
private void addMaterializedView(String dbName, String tableName, Set<String> tablesUsed, String validTxnList, OpType opType) {
if (disable) {
// Nothing to do
return;
}
// We are going to create the map for each view in the given database
ConcurrentMap<String, MaterializationInvalidationInfo> cq = new ConcurrentHashMap<String, MaterializationInvalidationInfo>();
final ConcurrentMap<String, MaterializationInvalidationInfo> prevCq = materializations.putIfAbsent(dbName, cq);
if (prevCq != null) {
cq = prevCq;
}
// important information in the registry to account for rewriting invalidation
if (validTxnList == null) {
// This can happen when the materialized view was created on non-transactional tables
return;
}
if (opType == OpType.CREATE || opType == OpType.ALTER) {
// You store the materialized view
cq.put(tableName, new MaterializationInvalidationInfo(tablesUsed, validTxnList));
} else {
ValidTxnList txnList = new ValidReadTxnList(validTxnList);
for (String qNameTableUsed : tablesUsed) {
// First we insert a new tree set to keep table modifications, unless it already exists
ConcurrentSkipListMap<Long, Long> modificationsTree = new ConcurrentSkipListMap<Long, Long>();
final ConcurrentSkipListMap<Long, Long> prevModificationsTree = tableModifications.putIfAbsent(qNameTableUsed, modificationsTree);
if (prevModificationsTree != null) {
modificationsTree = prevModificationsTree;
}
// check if the MV is still valid.
try {
String[] names = qNameTableUsed.split("\\.");
BasicTxnInfo e = handler.getTxnHandler().getFirstCompletedTransactionForTableAfterCommit(names[0], names[1], txnList);
if (!e.isIsnull()) {
modificationsTree.put(e.getTxnid(), e.getTime());
// a modification event that was in the metastore.
continue;
}
} catch (MetaException ex) {
LOG.debug("Materialized view " + Warehouse.getQualifiedName(dbName, tableName) + " ignored; error loading view into invalidation cache", ex);
return;
}
}
// For LOAD, you only add it if it does exist as you might be loading an outdated MV
cq.putIfAbsent(tableName, new MaterializationInvalidationInfo(tablesUsed, validTxnList));
}
if (LOG.isDebugEnabled()) {
LOG.debug("Cached materialized view for rewriting in invalidation cache: " + Warehouse.getQualifiedName(dbName, tableName));
}
}
use of org.apache.hadoop.hive.common.ValidTxnList in project hive by apache.
the class MaterializationsInvalidationCache method getInvalidationTime.
private long getInvalidationTime(MaterializationInvalidationInfo materialization) {
String txnListString = materialization.getValidTxnList();
if (txnListString == null) {
// This can happen when the materialization was created on non-transactional tables
return Long.MIN_VALUE;
}
// We will obtain the modification time as follows.
// First, we obtain the first element after high watermark (if any)
// Then, we iterate through the elements from min open txn till high
// watermark, updating the modification time after creation if needed
ValidTxnList txnList = new ValidReadTxnList(txnListString);
long firstModificationTimeAfterCreation = 0L;
for (String qNameTableUsed : materialization.getTablesUsed()) {
final Entry<Long, Long> tn = tableModifications.get(qNameTableUsed).higherEntry(txnList.getHighWatermark());
if (tn != null) {
if (firstModificationTimeAfterCreation == 0L || tn.getValue() < firstModificationTimeAfterCreation) {
firstModificationTimeAfterCreation = tn.getValue();
}
}
// when this transaction was being executed
if (txnList.getMinOpenTxn() != null) {
// Invalid transaction list is sorted
int pos = 0;
for (Map.Entry<Long, Long> t : tableModifications.get(qNameTableUsed).subMap(txnList.getMinOpenTxn(), txnList.getHighWatermark()).entrySet()) {
while (pos < txnList.getInvalidTransactions().length && txnList.getInvalidTransactions()[pos] != t.getKey()) {
pos++;
}
if (pos >= txnList.getInvalidTransactions().length) {
break;
}
if (firstModificationTimeAfterCreation == 0L || t.getValue() < firstModificationTimeAfterCreation) {
firstModificationTimeAfterCreation = t.getValue();
}
}
}
}
return firstModificationTimeAfterCreation;
}
Aggregations