Search in sources :

Example 1 with Initiator

use of org.apache.hadoop.hive.ql.txn.compactor.Initiator in project hive by apache.

the class TestTxnCommandsForMmTable method testSnapshotIsolationWithAbortedTxnOnMmTable.

@Test
public void testSnapshotIsolationWithAbortedTxnOnMmTable() throws Exception {
    // Insert two rows into the table.
    runStatementOnDriver("insert into " + TableExtended.MMTBL + "(a,b) values(1,2)");
    runStatementOnDriver("insert into " + TableExtended.MMTBL + "(a,b) values(3,4)");
    // There should be 2 delta directories
    verifyDirAndResult(2);
    // Initiate a minor compaction request on the table.
    runStatementOnDriver("alter table " + TableExtended.MMTBL + " compact 'MINOR'");
    // Run Compaction Worker to do compaction.
    // But we do not compact a MM table but only transit the compaction request to
    // "ready for cleaning" state in this case.
    runWorker(hiveConf);
    verifyDirAndResult(2);
    // Start an INSERT statement transaction and roll back this transaction.
    hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, true);
    runStatementOnDriver("insert into " + TableExtended.MMTBL + " values (5, 6)");
    hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, false);
    // There should be 3 delta directories. The new one is the aborted one.
    verifyDirAndResult(3);
    // Execute SELECT statement and verify the result set (should be two rows).
    int[][] expected = new int[][] { { 1, 2 }, { 3, 4 } };
    List<String> rs = runStatementOnDriver("select a,b from " + TableExtended.MMTBL + " order by a,b");
    Assert.assertEquals(stringifyValues(expected), rs);
    // Run Cleaner.
    // This run doesn't do anything for the above aborted transaction since
    // the current compaction request entry in the compaction queue is updated
    // to have highest_write_id when the worker is run before the aborted
    // transaction. Specifically the id is 2 for the entry but the aborted
    // transaction has 3 as writeId. This run does transition the entry
    // "successful".
    runCleaner(hiveConf);
    verifyDirAndResult(3);
    // Execute SELECT and verify that aborted operation is not counted for MM table.
    rs = runStatementOnDriver("select a,b from " + TableExtended.MMTBL + " order by a,b");
    Assert.assertEquals(stringifyValues(expected), rs);
    // Run initiator to execute CompactionTxnHandler.cleanEmptyAbortedTxns()
    Assert.assertEquals(TxnDbUtil.queryToString(hiveConf, "select * from TXNS"), 1, TxnDbUtil.countQueryAgent(hiveConf, "select count(*) from TXNS"));
    Initiator i = new Initiator();
    i.setThreadId((int) i.getId());
    i.setConf(hiveConf);
    AtomicBoolean stop = new AtomicBoolean(true);
    i.init(stop, new AtomicBoolean());
    i.run();
    // This run of Initiator doesn't add any compaction_queue entry
    // since we only have one MM table with data - we don't compact MM tables.
    verifyDirAndResult(3);
    Assert.assertEquals(TxnDbUtil.queryToString(hiveConf, "select * from TXNS"), 1, TxnDbUtil.countQueryAgent(hiveConf, "select count(*) from TXNS"));
    // Execute SELECT statement and verify that aborted INSERT statement is not counted.
    rs = runStatementOnDriver("select a,b from " + TableExtended.MMTBL + " order by a,b");
    Assert.assertEquals(stringifyValues(expected), rs);
    // Initiate a minor compaction request on the table.
    runStatementOnDriver("alter table " + TableExtended.MMTBL + " compact 'MINOR'");
    // Run worker to delete aborted transaction's delta directory.
    runWorker(hiveConf);
    Assert.assertEquals(TxnDbUtil.queryToString(hiveConf, "select * from TXNS"), 1, TxnDbUtil.countQueryAgent(hiveConf, "select count(*) from TXNS"));
    Assert.assertEquals(TxnDbUtil.queryToString(hiveConf, "select * from TXN_COMPONENTS"), 1, TxnDbUtil.countQueryAgent(hiveConf, "select count(*) from TXN_COMPONENTS"));
    verifyDirAndResult(2);
    // Run Cleaner to delete rows for the aborted transaction
    // from TXN_COMPONENTS.
    runCleaner(hiveConf);
    // Run initiator to clean the row fro the aborted transaction from TXNS.
    i.run();
    Assert.assertEquals(TxnDbUtil.queryToString(hiveConf, "select * from TXNS"), 0, TxnDbUtil.countQueryAgent(hiveConf, "select count(*) from TXNS"));
    Assert.assertEquals(TxnDbUtil.queryToString(hiveConf, "select * from TXN_COMPONENTS"), 0, TxnDbUtil.countQueryAgent(hiveConf, "select count(*) from TXN_COMPONENTS"));
}
Also used : AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Initiator(org.apache.hadoop.hive.ql.txn.compactor.Initiator) Test(org.junit.Test)

Example 2 with Initiator

use of org.apache.hadoop.hive.ql.txn.compactor.Initiator in project hive by apache.

the class TxnCommandsBaseForTests method runCompactorThread.

private static void runCompactorThread(HiveConf hiveConf, CompactorThreadType type) throws Exception {
    AtomicBoolean stop = new AtomicBoolean(true);
    CompactorThread t;
    switch(type) {
        case INITIATOR:
            t = new Initiator();
            break;
        case WORKER:
            t = new Worker();
            break;
        case CLEANER:
            t = new Cleaner();
            break;
        default:
            throw new IllegalArgumentException("Unknown type: " + type);
    }
    t.setThreadId((int) t.getId());
    t.setConf(hiveConf);
    t.init(stop);
    t.run();
}
Also used : AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Initiator(org.apache.hadoop.hive.ql.txn.compactor.Initiator) CompactorThread(org.apache.hadoop.hive.ql.txn.compactor.CompactorThread) Worker(org.apache.hadoop.hive.ql.txn.compactor.Worker) Cleaner(org.apache.hadoop.hive.ql.txn.compactor.Cleaner)

Aggregations

AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)2 Initiator (org.apache.hadoop.hive.ql.txn.compactor.Initiator)2 Cleaner (org.apache.hadoop.hive.ql.txn.compactor.Cleaner)1 CompactorThread (org.apache.hadoop.hive.ql.txn.compactor.CompactorThread)1 Worker (org.apache.hadoop.hive.ql.txn.compactor.Worker)1 Test (org.junit.Test)1