use of org.apache.hadoop.hive.common.ValidTxnList in project hive by apache.
the class MaterializationsInvalidationCache method cleanup.
/**
* Removes transaction events that are not relevant anymore.
* @param minTime events generated before this time (ms) can be deleted from the cache
* @return number of events that were deleted from the cache
*/
public long cleanup(long minTime) {
// 2) Transaction should not be associated with invalidation of a MV
if (disable || !initialized) {
// Bail out
return 0L;
}
// We execute the cleanup in two steps
// First we gather all the transactions that need to be kept
final Multimap<String, Long> keepTxnInfos = HashMultimap.create();
for (Map.Entry<String, ConcurrentMap<String, MaterializationInvalidationInfo>> e : materializations.entrySet()) {
for (MaterializationInvalidationInfo m : e.getValue().values()) {
ValidTxnList txnList = new ValidReadTxnList(m.getValidTxnList());
boolean canBeDeleted = false;
String currentTableForInvalidatingTxn = null;
long currentInvalidatingTxnId = 0L;
long currentInvalidatingTxnTime = 0L;
for (String qNameTableUsed : m.getTablesUsed()) {
final Entry<Long, Long> tn = tableModifications.get(qNameTableUsed).higherEntry(txnList.getHighWatermark());
if (tn != null) {
if (currentInvalidatingTxnTime == 0L || tn.getValue() < currentInvalidatingTxnTime) {
// to be kept (if needed).
if (canBeDeleted && currentInvalidatingTxnTime < minTime) {
keepTxnInfos.remove(currentTableForInvalidatingTxn, currentInvalidatingTxnId);
}
// 2.- We add this transaction to the transactions that should be kept.
canBeDeleted = !keepTxnInfos.get(qNameTableUsed).contains(tn.getKey());
keepTxnInfos.put(qNameTableUsed, tn.getKey());
// 3.- We record this transaction as the current invalidating transaction.
currentTableForInvalidatingTxn = qNameTableUsed;
currentInvalidatingTxnId = tn.getKey();
currentInvalidatingTxnTime = tn.getValue();
}
}
if (txnList.getMinOpenTxn() != null) {
// Invalid transaction list is sorted
int pos = 0;
for (Entry<Long, Long> t : tableModifications.get(qNameTableUsed).subMap(txnList.getMinOpenTxn(), txnList.getHighWatermark()).entrySet()) {
while (pos < txnList.getInvalidTransactions().length && txnList.getInvalidTransactions()[pos] != t.getKey()) {
pos++;
}
if (pos >= txnList.getInvalidTransactions().length) {
break;
}
if (currentInvalidatingTxnTime == 0L || t.getValue() < currentInvalidatingTxnTime) {
// to be kept (if needed).
if (canBeDeleted && currentInvalidatingTxnTime < minTime) {
keepTxnInfos.remove(currentTableForInvalidatingTxn, currentInvalidatingTxnId);
}
// 2.- We add this transaction to the transactions that should be kept.
canBeDeleted = !keepTxnInfos.get(qNameTableUsed).contains(t.getKey());
keepTxnInfos.put(qNameTableUsed, t.getKey());
// 3.- We record this transaction as the current invalidating transaction.
currentTableForInvalidatingTxn = qNameTableUsed;
currentInvalidatingTxnId = t.getKey();
currentInvalidatingTxnTime = t.getValue();
}
}
}
}
}
}
// Second, we remove the transactions
long removed = 0L;
for (Entry<String, ConcurrentSkipListMap<Long, Long>> e : tableModifications.entrySet()) {
Collection<Long> c = keepTxnInfos.get(e.getKey());
for (Iterator<Entry<Long, Long>> it = e.getValue().entrySet().iterator(); it.hasNext(); ) {
Entry<Long, Long> v = it.next();
// views.
if (v.getValue() < minTime && (c.isEmpty() || !c.contains(v.getKey()))) {
if (LOG.isDebugEnabled()) {
LOG.debug("Transaction removed from cache for table {} -> id: {}, time: {}", e.getKey(), v.getKey(), v.getValue());
}
it.remove();
removed++;
}
}
}
return removed;
}
use of org.apache.hadoop.hive.common.ValidTxnList in project hive by apache.
the class TestHiveMetaStoreTxns method testTxnRange.
@Test
public void testTxnRange() throws Exception {
ValidTxnList validTxns = client.getValidTxns();
Assert.assertEquals(ValidTxnList.RangeResponse.NONE, validTxns.isTxnRangeValid(1L, 3L));
List<Long> tids = client.openTxns("me", 5).getTxn_ids();
HeartbeatTxnRangeResponse rsp = client.heartbeatTxnRange(1, 5);
Assert.assertEquals(0, rsp.getNosuch().size());
Assert.assertEquals(0, rsp.getAborted().size());
client.rollbackTxn(1L);
client.commitTxn(2L);
client.commitTxn(3L);
client.commitTxn(4L);
validTxns = client.getValidTxns();
System.out.println("validTxns = " + validTxns);
Assert.assertEquals(ValidTxnList.RangeResponse.ALL, validTxns.isTxnRangeValid(2L, 2L));
Assert.assertEquals(ValidTxnList.RangeResponse.ALL, validTxns.isTxnRangeValid(2L, 3L));
Assert.assertEquals(ValidTxnList.RangeResponse.ALL, validTxns.isTxnRangeValid(2L, 4L));
Assert.assertEquals(ValidTxnList.RangeResponse.ALL, validTxns.isTxnRangeValid(3L, 4L));
Assert.assertEquals(ValidTxnList.RangeResponse.SOME, validTxns.isTxnRangeValid(1L, 4L));
Assert.assertEquals(ValidTxnList.RangeResponse.SOME, validTxns.isTxnRangeValid(2L, 5L));
Assert.assertEquals(ValidTxnList.RangeResponse.SOME, validTxns.isTxnRangeValid(1L, 2L));
Assert.assertEquals(ValidTxnList.RangeResponse.SOME, validTxns.isTxnRangeValid(4L, 5L));
Assert.assertEquals(ValidTxnList.RangeResponse.NONE, validTxns.isTxnRangeValid(1L, 1L));
Assert.assertEquals(ValidTxnList.RangeResponse.NONE, validTxns.isTxnRangeValid(5L, 10L));
validTxns = new ValidReadTxnList("10:5:4,5,6:");
Assert.assertEquals(ValidTxnList.RangeResponse.NONE, validTxns.isTxnRangeValid(4, 6));
Assert.assertEquals(ValidTxnList.RangeResponse.ALL, validTxns.isTxnRangeValid(7, 10));
Assert.assertEquals(ValidTxnList.RangeResponse.SOME, validTxns.isTxnRangeValid(7, 11));
Assert.assertEquals(ValidTxnList.RangeResponse.SOME, validTxns.isTxnRangeValid(3, 6));
Assert.assertEquals(ValidTxnList.RangeResponse.SOME, validTxns.isTxnRangeValid(4, 7));
Assert.assertEquals(ValidTxnList.RangeResponse.SOME, validTxns.isTxnRangeValid(1, 12));
Assert.assertEquals(ValidTxnList.RangeResponse.ALL, validTxns.isTxnRangeValid(1, 3));
}
use of org.apache.hadoop.hive.common.ValidTxnList in project hive by apache.
the class TestHiveMetaStoreTxns method stringifyValidTxns.
@Test
public void stringifyValidTxns() throws Exception {
// Test with just high water mark
ValidTxnList validTxns = new ValidReadTxnList("1:" + Long.MAX_VALUE + "::");
String asString = validTxns.toString();
Assert.assertEquals("1:" + Long.MAX_VALUE + "::", asString);
validTxns = new ValidReadTxnList(asString);
Assert.assertEquals(1, validTxns.getHighWatermark());
Assert.assertNotNull(validTxns.getInvalidTransactions());
Assert.assertEquals(0, validTxns.getInvalidTransactions().length);
asString = validTxns.toString();
Assert.assertEquals("1:" + Long.MAX_VALUE + "::", asString);
validTxns = new ValidReadTxnList(asString);
Assert.assertEquals(1, validTxns.getHighWatermark());
Assert.assertNotNull(validTxns.getInvalidTransactions());
Assert.assertEquals(0, validTxns.getInvalidTransactions().length);
// Test with open transactions
validTxns = new ValidReadTxnList("10:3:5:3");
asString = validTxns.toString();
if (!asString.equals("10:3:3:5") && !asString.equals("10:3:5:3")) {
Assert.fail("Unexpected string value " + asString);
}
validTxns = new ValidReadTxnList(asString);
Assert.assertEquals(10, validTxns.getHighWatermark());
Assert.assertNotNull(validTxns.getInvalidTransactions());
Assert.assertEquals(2, validTxns.getInvalidTransactions().length);
boolean sawThree = false, sawFive = false;
for (long tid : validTxns.getInvalidTransactions()) {
if (tid == 3)
sawThree = true;
else if (tid == 5)
sawFive = true;
else
Assert.fail("Unexpected value " + tid);
}
Assert.assertTrue(sawThree);
Assert.assertTrue(sawFive);
}
Aggregations