Search in sources :

Example 6 with CommitTxnRequest

use of org.apache.hadoop.hive.metastore.api.CommitTxnRequest in project hive by apache.

the class TestDbTxnManager2 method testValidTxnList.

@Test
public void testValidTxnList() throws Exception {
    long readTxnId = txnMgr.openTxn(ctx, "u0", TxnType.READ_ONLY);
    HiveTxnManager txnManager1 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf);
    txnManager1.openTxn(ctx, "u0");
    // Excludes open read only txns by default
    ValidTxnList validTxns = txnManager1.getValidTxns();
    Assert.assertEquals(0, validTxns.getInvalidTransactions().length);
    // Exclude open repl created only txns
    validTxns = txnManager1.getValidTxns(Arrays.asList(TxnType.REPL_CREATED));
    Assert.assertEquals(1, validTxns.getInvalidTransactions().length);
    Assert.assertEquals(readTxnId, validTxns.getInvalidTransactions()[0]);
    txnManager1.commitTxn();
    txnMgr.commitTxn();
    long replTxnId = txnMgr.replOpenTxn("default.*", Arrays.asList(1L), "u0").get(0);
    txnManager1 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf);
    txnManager1.openTxn(ctx, "u0");
    // Excludes open read only txns by default
    validTxns = txnManager1.getValidTxns();
    Assert.assertEquals(1, validTxns.getInvalidTransactions().length);
    Assert.assertEquals(replTxnId, validTxns.getInvalidTransactions()[0]);
    // Exclude open repl created only txns
    validTxns = txnManager1.getValidTxns(Arrays.asList(TxnType.REPL_CREATED));
    Assert.assertEquals(0, validTxns.getInvalidTransactions().length);
    // Exclude open read only txns
    validTxns = txnManager1.getValidTxns(Arrays.asList(TxnType.READ_ONLY));
    Assert.assertEquals(1, validTxns.getInvalidTransactions().length);
    Assert.assertEquals(replTxnId, validTxns.getInvalidTransactions()[0]);
    CommitTxnRequest commitTxnRequest = new CommitTxnRequest(1L);
    commitTxnRequest.setReplPolicy("default.*");
    commitTxnRequest.setTxn_type(TxnType.REPL_CREATED);
    txnMgr.replCommitTxn(commitTxnRequest);
    // Transaction is committed. So no open txn
    validTxns = txnManager1.getValidTxns();
    Assert.assertEquals(0, validTxns.getInvalidTransactions().length);
    // Exclude open read only txns
    validTxns = txnManager1.getValidTxns(Arrays.asList(TxnType.READ_ONLY));
    Assert.assertEquals(0, validTxns.getInvalidTransactions().length);
    txnManager1.commitTxn();
}
Also used : CommitTxnRequest(org.apache.hadoop.hive.metastore.api.CommitTxnRequest) ValidTxnList(org.apache.hadoop.hive.common.ValidTxnList) Test(org.junit.Test)

Example 7 with CommitTxnRequest

use of org.apache.hadoop.hive.metastore.api.CommitTxnRequest in project hive by apache.

the class TestDbTxnManager method testHeartbeaterReplicationTxn.

/**
 * Same as testHeartbeater, but testing cleanup of replication txns (TxnType.REPL_CREATED)
 * Note: in TestDbTxnManager metastore.repl.txn.timeout is set to 30s for testing purposes.
 */
@Test
public void testHeartbeaterReplicationTxn() throws Exception {
    MetastoreConf.ConfVars timeThresholdConfVar = MetastoreConf.ConfVars.REPL_TXN_TIMEOUT;
    Assert.assertTrue(txnMgr instanceof DbTxnManager);
    addTableInput();
    LockException exception = null;
    String replPolicy = "default.*";
    // Case 1: If there's no delay for the heartbeat, txn should be able to commit
    txnMgr.replOpenTxn(replPolicy, Arrays.asList(1L), "fred");
    runReaper();
    try {
        CommitTxnRequest commitTxnRequest = new CommitTxnRequest(1);
        commitTxnRequest.setReplPolicy(replPolicy);
        commitTxnRequest.setTxn_type(TxnType.REPL_CREATED);
        txnMgr.replCommitTxn(commitTxnRequest);
    } catch (LockException e) {
        exception = e;
    }
    Assert.assertNull("Txn commit should be successful", exception);
    txnMgr.replOpenTxn(replPolicy, Arrays.asList(1L), "jerry");
    Thread.sleep(MetastoreConf.getTimeVar(conf, timeThresholdConfVar, TimeUnit.MILLISECONDS));
    runReaper();
    try {
        CommitTxnRequest commitTxnRequest = new CommitTxnRequest(1);
        commitTxnRequest.setReplPolicy(replPolicy);
        commitTxnRequest.setTxn_type(TxnType.REPL_CREATED);
        txnMgr.replCommitTxn(commitTxnRequest);
    } catch (LockException e) {
        exception = e;
    }
    Assert.assertNull("This CommitTxnRequest is no op since transaction is already aborted by reaper.", exception);
    try {
        txnMgr.replRollbackTxn(replPolicy, 1L);
    } catch (LockException e) {
        exception = e;
    }
    Assert.assertNull("This AbortTxnRequest is no op since transaction is already aborted by reaper.", exception);
    Assert.assertEquals(1, Metrics.getOrCreateCounter(MetricsConstants.TOTAL_NUM_TIMED_OUT_TXNS).getCount());
}
Also used : CommitTxnRequest(org.apache.hadoop.hive.metastore.api.CommitTxnRequest) MetastoreConf(org.apache.hadoop.hive.metastore.conf.MetastoreConf) Test(org.junit.Test)

Example 8 with CommitTxnRequest

use of org.apache.hadoop.hive.metastore.api.CommitTxnRequest in project hive by apache.

the class TestDbTxnManagerIsolationProperties method multipleGapOpenTxnsNoDirtyRead.

@Test
public void multipleGapOpenTxnsNoDirtyRead() throws Exception {
    driver.run(("drop table if exists gap"));
    driver.run("create table gap (a int, b int) " + "stored as orc TBLPROPERTIES ('transactional'='true')");
    // Create some TXN to delete later
    OpenTxnsResponse openTxns = txnHandler.openTxns(new OpenTxnRequest(10, "user", "local"));
    openTxns.getTxn_ids().stream().forEach(txnId -> {
        silentCommitTxn(new CommitTxnRequest(txnId));
    });
    long first = openTxns.getTxn_ids().get(0);
    long last = openTxns.getTxn_ids().get(9);
    // The next one we use for Low water mark
    driver.run("select * from gap");
    DbTxnManager txnMgr2 = (DbTxnManager) TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf);
    swapTxnManager(txnMgr2);
    // Make sure, that the time window is great enough to consider the gap open
    txnHandler.setOpenTxnTimeOutMillis(30000);
    // Create a gap
    deleteTransactionId(first, last);
    CommandProcessorResponse resp = driver2.compileAndRespond("select * from gap");
    long next = txnMgr2.getCurrentTxnId();
    Assert.assertTrue("Sequence number goes onward", next > last);
    ValidTxnList validTxns = txnMgr2.getValidTxns();
    Assert.assertEquals("Expect to see the gap as open", first, (long) validTxns.getMinOpenTxn());
    txnHandler.setOpenTxnTimeOutMillis(1000);
    // Now we cheat and create a transaction with the first sequenceId again imitating a very slow openTxns call
    setBackSequence(first);
    swapTxnManager(txnMgr);
    driver.compileAndRespond("insert into gap values(1,2)");
    next = txnMgr.getCurrentTxnId();
    Assert.assertEquals(first, next);
    driver.run();
    // Now we run our read query it should not see the write results of the insert
    swapTxnManager(txnMgr2);
    driver2.run();
    FetchTask fetchTask = driver2.getFetchTask();
    List res = new ArrayList();
    fetchTask.fetch(res);
    Assert.assertEquals("No dirty read", 0, res.size());
}
Also used : CommitTxnRequest(org.apache.hadoop.hive.metastore.api.CommitTxnRequest) CommandProcessorResponse(org.apache.hadoop.hive.ql.processors.CommandProcessorResponse) ValidTxnList(org.apache.hadoop.hive.common.ValidTxnList) ArrayList(java.util.ArrayList) OpenTxnRequest(org.apache.hadoop.hive.metastore.api.OpenTxnRequest) ArrayList(java.util.ArrayList) List(java.util.List) ValidTxnList(org.apache.hadoop.hive.common.ValidTxnList) OpenTxnsResponse(org.apache.hadoop.hive.metastore.api.OpenTxnsResponse) FetchTask(org.apache.hadoop.hive.ql.exec.FetchTask) Test(org.junit.Test)

Example 9 with CommitTxnRequest

use of org.apache.hadoop.hive.metastore.api.CommitTxnRequest in project hive by apache.

the class TestDeltaFilesMetrics method testDeltaFileMetricMultiPartitionedTable.

@Test
public void testDeltaFileMetricMultiPartitionedTable() throws Exception {
    String dbName = "default";
    String tblName = "dp";
    String part1Name = "ds=part1";
    String part2Name = "ds=part2";
    String part3Name = "ds=part3";
    Table t = newTable(dbName, tblName, true);
    List<LockComponent> components = new ArrayList<>();
    Partition p1 = newPartition(t, "part1");
    addDeltaFile(t, p1, 1L, 2L, 2);
    addDeltaFile(t, p1, 3L, 4L, 4);
    Partition p2 = newPartition(t, "part2");
    addBaseFile(t, p2, 5L, 20);
    addDeltaFile(t, p2, 6L, 7L, 2);
    addDeltaFile(t, p2, 8L, 9L, 3);
    addDeltaFile(t, p2, 10L, 11L, 1);
    Partition p3 = newPartition(t, "part3");
    addDeltaFile(t, p3, 12L, 13L, 3);
    addDeltaFile(t, p3, 14L, 15L, 20);
    addDeltaFile(t, p3, 16L, 17L, 50);
    addDeltaFile(t, p3, 18L, 19L, 2);
    components.add(createLockComponent(dbName, tblName, part1Name));
    components.add(createLockComponent(dbName, tblName, part2Name));
    components.add(createLockComponent(dbName, tblName, part3Name));
    burnThroughTransactions(dbName, tblName, 19);
    long txnId = openTxn();
    LockRequest req = new LockRequest(components, "me", "localhost");
    req.setTxnid(txnId);
    LockResponse res = txnHandler.lock(req);
    Assert.assertEquals(LockState.ACQUIRED, res.getState());
    allocateWriteId(dbName, tblName, txnId);
    txnHandler.commitTxn(new CommitTxnRequest(txnId));
    HiveConf.setIntVar(conf, HiveConf.ConfVars.HIVE_COMPACTOR_DELTA_NUM_THRESHOLD, 2);
    HiveConf.setFloatVar(conf, HiveConf.ConfVars.HIVE_COMPACTOR_DELTA_PCT_THRESHOLD, 0.4f);
    startInitiator();
    TimeUnit.SECONDS.sleep(2);
    verifyDeltaMetricsMatch(ImmutableMap.of(dbName + "." + tblName + Path.SEPARATOR + part1Name, 2, dbName + "." + tblName + Path.SEPARATOR + part2Name, 3, dbName + "." + tblName + Path.SEPARATOR + part3Name, 4), MetricsConstants.COMPACTION_NUM_DELTAS);
    verifyDeltaMetricsMatch(ImmutableMap.of(dbName + "." + tblName + Path.SEPARATOR + part2Name, 2), MetricsConstants.COMPACTION_NUM_SMALL_DELTAS);
    verifyDeltaMetricsMatch(ImmutableMap.of(), MetricsConstants.COMPACTION_NUM_OBSOLETE_DELTAS);
    ShowCompactResponse showCompactResponse = txnHandler.showCompact(new ShowCompactRequest());
    List<ShowCompactResponseElement> compacts = showCompactResponse.getCompacts();
    Assert.assertEquals(2, compacts.size());
    // Need to run two worker sessions, to compact all resources in the compaction queue
    startWorker();
    startWorker();
    TimeUnit.SECONDS.sleep(2);
    verifyDeltaMetricsMatch(ImmutableMap.of(dbName + "." + tblName + Path.SEPARATOR + part1Name, 2, dbName + "." + tblName + Path.SEPARATOR + part2Name, 1), MetricsConstants.COMPACTION_NUM_DELTAS);
    verifyDeltaMetricsMatch(ImmutableMap.of(), MetricsConstants.COMPACTION_NUM_SMALL_DELTAS);
    verifyDeltaMetricsMatch(ImmutableMap.of(dbName + "." + tblName + Path.SEPARATOR + part2Name, 3, dbName + "." + tblName + Path.SEPARATOR + part3Name, 4), MetricsConstants.COMPACTION_NUM_OBSOLETE_DELTAS);
    startCleaner();
    startCleaner();
    TimeUnit.SECONDS.sleep(2);
    verifyDeltaMetricsMatch(ImmutableMap.of(dbName + "." + tblName + Path.SEPARATOR + part1Name, 2, dbName + "." + tblName + Path.SEPARATOR + part2Name, 1), MetricsConstants.COMPACTION_NUM_DELTAS);
    verifyDeltaMetricsMatch(ImmutableMap.of(), MetricsConstants.COMPACTION_NUM_SMALL_DELTAS);
    verifyDeltaMetricsMatch(ImmutableMap.of(), MetricsConstants.COMPACTION_NUM_OBSOLETE_DELTAS);
}
Also used : CommitTxnRequest(org.apache.hadoop.hive.metastore.api.CommitTxnRequest) Partition(org.apache.hadoop.hive.metastore.api.Partition) Table(org.apache.hadoop.hive.metastore.api.Table) LockComponent(org.apache.hadoop.hive.metastore.api.LockComponent) ArrayList(java.util.ArrayList) LockResponse(org.apache.hadoop.hive.metastore.api.LockResponse) ShowCompactResponse(org.apache.hadoop.hive.metastore.api.ShowCompactResponse) ShowCompactRequest(org.apache.hadoop.hive.metastore.api.ShowCompactRequest) LockRequest(org.apache.hadoop.hive.metastore.api.LockRequest) ShowCompactResponseElement(org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement) Test(org.junit.Test)

Example 10 with CommitTxnRequest

use of org.apache.hadoop.hive.metastore.api.CommitTxnRequest in project hive by apache.

the class TestDeltaFilesMetrics method testDeltaFileMetricPartitionedTable.

@Test
public void testDeltaFileMetricPartitionedTable() throws Exception {
    String dbName = "default";
    String tblName = "dp";
    String partName = "ds=part1";
    Table t = newTable(dbName, tblName, true);
    List<LockComponent> components = new ArrayList<>();
    Partition p = newPartition(t, "part1");
    addBaseFile(t, p, 20L, 20);
    addDeltaFile(t, p, 21L, 22L, 2);
    addDeltaFile(t, p, 23L, 24L, 20);
    components.add(createLockComponent(dbName, tblName, partName));
    burnThroughTransactions(dbName, tblName, 23);
    long txnId = openTxn();
    LockRequest req = new LockRequest(components, "me", "localhost");
    req.setTxnid(txnId);
    LockResponse res = txnHandler.lock(req);
    Assert.assertEquals(LockState.ACQUIRED, res.getState());
    long writeId = allocateWriteId(dbName, tblName, txnId);
    Assert.assertEquals(24, writeId);
    txnHandler.commitTxn(new CommitTxnRequest(txnId));
    startInitiator();
    TimeUnit.SECONDS.sleep(2);
    // 2 active deltas
    // 1 small delta
    // 0 obsolete deltas
    verifyDeltaMetricsMatch(ImmutableMap.of(dbName + "." + tblName + Path.SEPARATOR + partName, 2), MetricsConstants.COMPACTION_NUM_DELTAS);
    verifyDeltaMetricsMatch(ImmutableMap.of(dbName + "." + tblName + Path.SEPARATOR + partName, 1), MetricsConstants.COMPACTION_NUM_SMALL_DELTAS);
    verifyDeltaMetricsMatch(ImmutableMap.of(), MetricsConstants.COMPACTION_NUM_OBSOLETE_DELTAS);
    startWorker();
    TimeUnit.SECONDS.sleep(2);
    // 0 active deltas
    // 0 small delta
    // 2 obsolete deltas
    verifyDeltaMetricsMatch(ImmutableMap.of(), MetricsConstants.COMPACTION_NUM_DELTAS);
    verifyDeltaMetricsMatch(ImmutableMap.of(), MetricsConstants.COMPACTION_NUM_SMALL_DELTAS);
    verifyDeltaMetricsMatch(ImmutableMap.of(dbName + "." + tblName + Path.SEPARATOR + partName, 2), MetricsConstants.COMPACTION_NUM_OBSOLETE_DELTAS);
    addDeltaFile(t, p, 25L, 26L, 2);
    addDeltaFile(t, p, 27L, 28L, 20);
    addDeltaFile(t, p, 29L, 30L, 2);
    burnThroughTransactions(dbName, tblName, 30);
    txnId = openTxn();
    req = new LockRequest(components, "me", "localhost");
    req.setTxnid(txnId);
    res = txnHandler.lock(req);
    Assert.assertEquals(LockState.ACQUIRED, res.getState());
    writeId = allocateWriteId(dbName, tblName, txnId);
    Assert.assertEquals(55, writeId);
    txnHandler.commitTxn(new CommitTxnRequest(txnId));
    // Change these params to initiate MINOR compaction
    HiveConf.setFloatVar(conf, HiveConf.ConfVars.HIVE_COMPACTOR_DELTA_PCT_THRESHOLD, 1.8f);
    HiveConf.setIntVar(conf, HiveConf.ConfVars.HIVE_COMPACTOR_DELTA_NUM_THRESHOLD, 2);
    startInitiator();
    TimeUnit.SECONDS.sleep(2);
    // 3 active deltas
    // 2 small deltas
    // 2 obsolete deltas
    verifyDeltaMetricsMatch(ImmutableMap.of(dbName + "." + tblName + Path.SEPARATOR + partName, 3), MetricsConstants.COMPACTION_NUM_DELTAS);
    verifyDeltaMetricsMatch(ImmutableMap.of(dbName + "." + tblName + Path.SEPARATOR + partName, 2), MetricsConstants.COMPACTION_NUM_SMALL_DELTAS);
    verifyDeltaMetricsMatch(ImmutableMap.of(dbName + "." + tblName + Path.SEPARATOR + partName, 2), MetricsConstants.COMPACTION_NUM_OBSOLETE_DELTAS);
    startCleaner();
    TimeUnit.SECONDS.sleep(2);
    // 3 active deltas
    // 2 small deltas
    // 0 obsolete delta
    verifyDeltaMetricsMatch(ImmutableMap.of(dbName + "." + tblName + Path.SEPARATOR + partName, 3), MetricsConstants.COMPACTION_NUM_DELTAS);
    verifyDeltaMetricsMatch(ImmutableMap.of(dbName + "." + tblName + Path.SEPARATOR + partName, 2), MetricsConstants.COMPACTION_NUM_SMALL_DELTAS);
    verifyDeltaMetricsMatch(ImmutableMap.of(), MetricsConstants.COMPACTION_NUM_OBSOLETE_DELTAS);
    startWorker();
    TimeUnit.SECONDS.sleep(2);
    // 1 active delta
    // 0 small delta
    // 3 obsolete deltas
    verifyDeltaMetricsMatch(ImmutableMap.of(dbName + "." + tblName + Path.SEPARATOR + partName, 1), MetricsConstants.COMPACTION_NUM_DELTAS);
    verifyDeltaMetricsMatch(ImmutableMap.of(), MetricsConstants.COMPACTION_NUM_SMALL_DELTAS);
    verifyDeltaMetricsMatch(ImmutableMap.of(dbName + "." + tblName + Path.SEPARATOR + partName, 3), MetricsConstants.COMPACTION_NUM_OBSOLETE_DELTAS);
    startCleaner();
    TimeUnit.SECONDS.sleep(2);
    verifyDeltaMetricsMatch(ImmutableMap.of(dbName + "." + tblName + Path.SEPARATOR + partName, 1), MetricsConstants.COMPACTION_NUM_DELTAS);
    verifyDeltaMetricsMatch(ImmutableMap.of(), MetricsConstants.COMPACTION_NUM_SMALL_DELTAS);
    verifyDeltaMetricsMatch(ImmutableMap.of(), MetricsConstants.COMPACTION_NUM_OBSOLETE_DELTAS);
}
Also used : CommitTxnRequest(org.apache.hadoop.hive.metastore.api.CommitTxnRequest) Partition(org.apache.hadoop.hive.metastore.api.Partition) Table(org.apache.hadoop.hive.metastore.api.Table) LockComponent(org.apache.hadoop.hive.metastore.api.LockComponent) LockResponse(org.apache.hadoop.hive.metastore.api.LockResponse) ArrayList(java.util.ArrayList) LockRequest(org.apache.hadoop.hive.metastore.api.LockRequest) Test(org.junit.Test)

Aggregations

CommitTxnRequest (org.apache.hadoop.hive.metastore.api.CommitTxnRequest)46 Test (org.junit.Test)41 ArrayList (java.util.ArrayList)27 LockComponent (org.apache.hadoop.hive.metastore.api.LockComponent)27 LockRequest (org.apache.hadoop.hive.metastore.api.LockRequest)27 LockResponse (org.apache.hadoop.hive.metastore.api.LockResponse)27 Table (org.apache.hadoop.hive.metastore.api.Table)26 ShowCompactRequest (org.apache.hadoop.hive.metastore.api.ShowCompactRequest)22 ShowCompactResponse (org.apache.hadoop.hive.metastore.api.ShowCompactResponse)22 Partition (org.apache.hadoop.hive.metastore.api.Partition)16 ShowCompactResponseElement (org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement)16 OpenTxnRequest (org.apache.hadoop.hive.metastore.api.OpenTxnRequest)10 GetOpenTxnsResponse (org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse)7 OpenTxnsResponse (org.apache.hadoop.hive.metastore.api.OpenTxnsResponse)6 AbortTxnRequest (org.apache.hadoop.hive.metastore.api.AbortTxnRequest)5 ValidTxnList (org.apache.hadoop.hive.common.ValidTxnList)4 AllocateTableWriteIdsRequest (org.apache.hadoop.hive.metastore.api.AllocateTableWriteIdsRequest)4 AllocateTableWriteIdsResponse (org.apache.hadoop.hive.metastore.api.AllocateTableWriteIdsResponse)4 CompactionRequest (org.apache.hadoop.hive.metastore.api.CompactionRequest)3 GetOpenTxnsInfoResponse (org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse)3