Search in sources :

Example 16 with ValidTxnList

use of org.apache.hadoop.hive.common.ValidTxnList in project hive by apache.

the class MaterializationsInvalidationCache method cleanup.

/**
 * Removes transaction events that are not relevant anymore.
 * @param minTime events generated before this time (ms) can be deleted from the cache
 * @return number of events that were deleted from the cache
 */
public long cleanup(long minTime) {
    // 2) Transaction should not be associated with invalidation of a MV
    if (disable || !initialized) {
        // Bail out
        return 0L;
    }
    // We execute the cleanup in two steps
    // First we gather all the transactions that need to be kept
    final Multimap<String, Long> keepTxnInfos = HashMultimap.create();
    for (Map.Entry<String, ConcurrentMap<String, MaterializationInvalidationInfo>> e : materializations.entrySet()) {
        for (MaterializationInvalidationInfo m : e.getValue().values()) {
            ValidTxnList txnList = new ValidReadTxnList(m.getValidTxnList());
            boolean canBeDeleted = false;
            String currentTableForInvalidatingTxn = null;
            long currentInvalidatingTxnId = 0L;
            long currentInvalidatingTxnTime = 0L;
            for (String qNameTableUsed : m.getTablesUsed()) {
                final Entry<Long, Long> tn = tableModifications.get(qNameTableUsed).higherEntry(txnList.getHighWatermark());
                if (tn != null) {
                    if (currentInvalidatingTxnTime == 0L || tn.getValue() < currentInvalidatingTxnTime) {
                        // to be kept (if needed).
                        if (canBeDeleted && currentInvalidatingTxnTime < minTime) {
                            keepTxnInfos.remove(currentTableForInvalidatingTxn, currentInvalidatingTxnId);
                        }
                        // 2.- We add this transaction to the transactions that should be kept.
                        canBeDeleted = !keepTxnInfos.get(qNameTableUsed).contains(tn.getKey());
                        keepTxnInfos.put(qNameTableUsed, tn.getKey());
                        // 3.- We record this transaction as the current invalidating transaction.
                        currentTableForInvalidatingTxn = qNameTableUsed;
                        currentInvalidatingTxnId = tn.getKey();
                        currentInvalidatingTxnTime = tn.getValue();
                    }
                }
                if (txnList.getMinOpenTxn() != null) {
                    // Invalid transaction list is sorted
                    int pos = 0;
                    for (Entry<Long, Long> t : tableModifications.get(qNameTableUsed).subMap(txnList.getMinOpenTxn(), txnList.getHighWatermark()).entrySet()) {
                        while (pos < txnList.getInvalidTransactions().length && txnList.getInvalidTransactions()[pos] != t.getKey()) {
                            pos++;
                        }
                        if (pos >= txnList.getInvalidTransactions().length) {
                            break;
                        }
                        if (currentInvalidatingTxnTime == 0L || t.getValue() < currentInvalidatingTxnTime) {
                            // to be kept (if needed).
                            if (canBeDeleted && currentInvalidatingTxnTime < minTime) {
                                keepTxnInfos.remove(currentTableForInvalidatingTxn, currentInvalidatingTxnId);
                            }
                            // 2.- We add this transaction to the transactions that should be kept.
                            canBeDeleted = !keepTxnInfos.get(qNameTableUsed).contains(t.getKey());
                            keepTxnInfos.put(qNameTableUsed, t.getKey());
                            // 3.- We record this transaction as the current invalidating transaction.
                            currentTableForInvalidatingTxn = qNameTableUsed;
                            currentInvalidatingTxnId = t.getKey();
                            currentInvalidatingTxnTime = t.getValue();
                        }
                    }
                }
            }
        }
    }
    // Second, we remove the transactions
    long removed = 0L;
    for (Entry<String, ConcurrentSkipListMap<Long, Long>> e : tableModifications.entrySet()) {
        Collection<Long> c = keepTxnInfos.get(e.getKey());
        for (Iterator<Entry<Long, Long>> it = e.getValue().entrySet().iterator(); it.hasNext(); ) {
            Entry<Long, Long> v = it.next();
            // views.
            if (v.getValue() < minTime && (c.isEmpty() || !c.contains(v.getKey()))) {
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Transaction removed from cache for table {} -> id: {}, time: {}", e.getKey(), v.getKey(), v.getValue());
                }
                it.remove();
                removed++;
            }
        }
    }
    return removed;
}
Also used : ConcurrentSkipListMap(java.util.concurrent.ConcurrentSkipListMap) ConcurrentMap(java.util.concurrent.ConcurrentMap) ValidReadTxnList(org.apache.hadoop.hive.common.ValidReadTxnList) Entry(java.util.Map.Entry) ValidTxnList(org.apache.hadoop.hive.common.ValidTxnList) ConcurrentMap(java.util.concurrent.ConcurrentMap) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ConcurrentSkipListMap(java.util.concurrent.ConcurrentSkipListMap)

Example 17 with ValidTxnList

use of org.apache.hadoop.hive.common.ValidTxnList in project hive by apache.

the class TestHiveMetaStoreTxns method testTxnRange.

@Test
public void testTxnRange() throws Exception {
    ValidTxnList validTxns = client.getValidTxns();
    Assert.assertEquals(ValidTxnList.RangeResponse.NONE, validTxns.isTxnRangeValid(1L, 3L));
    List<Long> tids = client.openTxns("me", 5).getTxn_ids();
    HeartbeatTxnRangeResponse rsp = client.heartbeatTxnRange(1, 5);
    Assert.assertEquals(0, rsp.getNosuch().size());
    Assert.assertEquals(0, rsp.getAborted().size());
    client.rollbackTxn(1L);
    client.commitTxn(2L);
    client.commitTxn(3L);
    client.commitTxn(4L);
    validTxns = client.getValidTxns();
    System.out.println("validTxns = " + validTxns);
    Assert.assertEquals(ValidTxnList.RangeResponse.ALL, validTxns.isTxnRangeValid(2L, 2L));
    Assert.assertEquals(ValidTxnList.RangeResponse.ALL, validTxns.isTxnRangeValid(2L, 3L));
    Assert.assertEquals(ValidTxnList.RangeResponse.ALL, validTxns.isTxnRangeValid(2L, 4L));
    Assert.assertEquals(ValidTxnList.RangeResponse.ALL, validTxns.isTxnRangeValid(3L, 4L));
    Assert.assertEquals(ValidTxnList.RangeResponse.SOME, validTxns.isTxnRangeValid(1L, 4L));
    Assert.assertEquals(ValidTxnList.RangeResponse.SOME, validTxns.isTxnRangeValid(2L, 5L));
    Assert.assertEquals(ValidTxnList.RangeResponse.SOME, validTxns.isTxnRangeValid(1L, 2L));
    Assert.assertEquals(ValidTxnList.RangeResponse.SOME, validTxns.isTxnRangeValid(4L, 5L));
    Assert.assertEquals(ValidTxnList.RangeResponse.NONE, validTxns.isTxnRangeValid(1L, 1L));
    Assert.assertEquals(ValidTxnList.RangeResponse.NONE, validTxns.isTxnRangeValid(5L, 10L));
    validTxns = new ValidReadTxnList("10:5:4,5,6:");
    Assert.assertEquals(ValidTxnList.RangeResponse.NONE, validTxns.isTxnRangeValid(4, 6));
    Assert.assertEquals(ValidTxnList.RangeResponse.ALL, validTxns.isTxnRangeValid(7, 10));
    Assert.assertEquals(ValidTxnList.RangeResponse.SOME, validTxns.isTxnRangeValid(7, 11));
    Assert.assertEquals(ValidTxnList.RangeResponse.SOME, validTxns.isTxnRangeValid(3, 6));
    Assert.assertEquals(ValidTxnList.RangeResponse.SOME, validTxns.isTxnRangeValid(4, 7));
    Assert.assertEquals(ValidTxnList.RangeResponse.SOME, validTxns.isTxnRangeValid(1, 12));
    Assert.assertEquals(ValidTxnList.RangeResponse.ALL, validTxns.isTxnRangeValid(1, 3));
}
Also used : HeartbeatTxnRangeResponse(org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeResponse) ValidTxnList(org.apache.hadoop.hive.common.ValidTxnList) ValidReadTxnList(org.apache.hadoop.hive.common.ValidReadTxnList) Test(org.junit.Test) MetastoreUnitTest(org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest)

Example 18 with ValidTxnList

use of org.apache.hadoop.hive.common.ValidTxnList in project hive by apache.

the class TestHiveMetaStoreTxns method stringifyValidTxns.

@Test
public void stringifyValidTxns() throws Exception {
    // Test with just high water mark
    ValidTxnList validTxns = new ValidReadTxnList("1:" + Long.MAX_VALUE + "::");
    String asString = validTxns.toString();
    Assert.assertEquals("1:" + Long.MAX_VALUE + "::", asString);
    validTxns = new ValidReadTxnList(asString);
    Assert.assertEquals(1, validTxns.getHighWatermark());
    Assert.assertNotNull(validTxns.getInvalidTransactions());
    Assert.assertEquals(0, validTxns.getInvalidTransactions().length);
    asString = validTxns.toString();
    Assert.assertEquals("1:" + Long.MAX_VALUE + "::", asString);
    validTxns = new ValidReadTxnList(asString);
    Assert.assertEquals(1, validTxns.getHighWatermark());
    Assert.assertNotNull(validTxns.getInvalidTransactions());
    Assert.assertEquals(0, validTxns.getInvalidTransactions().length);
    // Test with open transactions
    validTxns = new ValidReadTxnList("10:3:5:3");
    asString = validTxns.toString();
    if (!asString.equals("10:3:3:5") && !asString.equals("10:3:5:3")) {
        Assert.fail("Unexpected string value " + asString);
    }
    validTxns = new ValidReadTxnList(asString);
    Assert.assertEquals(10, validTxns.getHighWatermark());
    Assert.assertNotNull(validTxns.getInvalidTransactions());
    Assert.assertEquals(2, validTxns.getInvalidTransactions().length);
    boolean sawThree = false, sawFive = false;
    for (long tid : validTxns.getInvalidTransactions()) {
        if (tid == 3)
            sawThree = true;
        else if (tid == 5)
            sawFive = true;
        else
            Assert.fail("Unexpected value " + tid);
    }
    Assert.assertTrue(sawThree);
    Assert.assertTrue(sawFive);
}
Also used : ValidTxnList(org.apache.hadoop.hive.common.ValidTxnList) ValidReadTxnList(org.apache.hadoop.hive.common.ValidReadTxnList) Test(org.junit.Test) MetastoreUnitTest(org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest)

Aggregations

ValidTxnList (org.apache.hadoop.hive.common.ValidTxnList)18 Test (org.junit.Test)12 ValidCompactorTxnList (org.apache.hadoop.hive.common.ValidCompactorTxnList)8 ValidReadTxnList (org.apache.hadoop.hive.common.ValidReadTxnList)6 MetastoreUnitTest (org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest)4 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)3 ConcurrentSkipListMap (java.util.concurrent.ConcurrentSkipListMap)3 ImmutableMap (com.google.common.collect.ImmutableMap)2 Map (java.util.Map)2 ConcurrentMap (java.util.concurrent.ConcurrentMap)2 MetaException (org.apache.hadoop.hive.metastore.api.MetaException)2 Connection (java.sql.Connection)1 SQLException (java.sql.SQLException)1 Statement (java.sql.Statement)1 ArrayList (java.util.ArrayList)1 Entry (java.util.Map.Entry)1 BasicTxnInfo (org.apache.hadoop.hive.metastore.api.BasicTxnInfo)1 GetValidWriteIdsResponse (org.apache.hadoop.hive.metastore.api.GetValidWriteIdsResponse)1 HeartbeatTxnRangeResponse (org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeResponse)1 TableValidWriteIds (org.apache.hadoop.hive.metastore.api.TableValidWriteIds)1