Search in sources :

Example 26 with CommitTxnRequest

use of org.apache.hadoop.hive.metastore.api.CommitTxnRequest in project hive by apache.

the class TestCleaner method testReadyForCleaningPileup.

@Test
public void testReadyForCleaningPileup() throws Exception {
    String dbName = "default";
    String tblName = "trfcp";
    String partName = "ds=today";
    Table t = newTable(dbName, tblName, true);
    Partition p = newPartition(t, "today");
    // block cleaner with an open txn
    long blockingTxn = openTxn();
    // minor compaction
    addBaseFile(t, p, 20L, 20);
    addDeltaFile(t, p, 21L, 21L, 1);
    addDeltaFile(t, p, 22L, 22L, 1);
    burnThroughTransactions(dbName, tblName, 22);
    CompactionRequest rqst = new CompactionRequest(dbName, tblName, CompactionType.MINOR);
    rqst.setPartitionname(partName);
    compactInTxn(rqst);
    addDeltaFile(t, p, 21, 22, 2);
    startCleaner();
    // make sure cleaner didn't remove anything, and cleaning is still queued
    List<Path> paths = getDirectories(conf, t, p);
    Assert.assertEquals("Expected 4 files after minor compaction, instead these files were present " + paths, 4, paths.size());
    ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
    Assert.assertEquals("Expected 1 compaction in queue, got: " + rsp.getCompacts(), 1, rsp.getCompactsSize());
    Assert.assertEquals(TxnStore.CLEANING_RESPONSE, rsp.getCompacts().get(0).getState());
    Assert.assertEquals(CompactionType.MINOR, rsp.getCompacts().get(0).getType());
    // major compaction
    addDeltaFile(t, p, 23L, 23L, 1);
    addDeltaFile(t, p, 24L, 24L, 1);
    burnThroughTransactions(dbName, tblName, 2);
    rqst = new CompactionRequest(dbName, tblName, CompactionType.MAJOR);
    rqst.setPartitionname(partName);
    long compactTxn = compactInTxn(rqst);
    addBaseFile(t, p, 24, 24, compactTxn);
    startCleaner();
    // make sure cleaner didn't remove anything, and 2 cleaning are still queued
    paths = getDirectories(conf, t, p);
    Assert.assertEquals("Expected 7 files after minor compaction, instead these files were present " + paths, 7, paths.size());
    rsp = txnHandler.showCompact(new ShowCompactRequest());
    Assert.assertEquals("Expected 2 compactions in queue, got: " + rsp.getCompacts(), 2, rsp.getCompactsSize());
    Assert.assertEquals(TxnStore.CLEANING_RESPONSE, rsp.getCompacts().get(0).getState());
    Assert.assertEquals(TxnStore.CLEANING_RESPONSE, rsp.getCompacts().get(1).getState());
    // unblock the cleaner and run again
    txnHandler.commitTxn(new CommitTxnRequest(blockingTxn));
    startCleaner();
    startCleaner();
    // make sure cleaner removed everything below base_24, and both compactions are successful
    paths = getDirectories(conf, t, p);
    Assert.assertEquals(1, paths.size());
    rsp = txnHandler.showCompact(new ShowCompactRequest());
    Assert.assertEquals("Expected 2 compactions in queue, got: " + rsp.getCompacts(), 2, rsp.getCompactsSize());
    Assert.assertEquals(TxnStore.SUCCEEDED_RESPONSE, rsp.getCompacts().get(0).getState());
    Assert.assertEquals(TxnStore.SUCCEEDED_RESPONSE, rsp.getCompacts().get(1).getState());
}
Also used : Path(org.apache.hadoop.fs.Path) CommitTxnRequest(org.apache.hadoop.hive.metastore.api.CommitTxnRequest) Partition(org.apache.hadoop.hive.metastore.api.Partition) Table(org.apache.hadoop.hive.metastore.api.Table) ShowCompactResponse(org.apache.hadoop.hive.metastore.api.ShowCompactResponse) ShowCompactRequest(org.apache.hadoop.hive.metastore.api.ShowCompactRequest) CompactionRequest(org.apache.hadoop.hive.metastore.api.CompactionRequest) Test(org.junit.Test)

Example 27 with CommitTxnRequest

use of org.apache.hadoop.hive.metastore.api.CommitTxnRequest in project hive by apache.

the class TestDeltaFilesMetrics method testDeltaFileMetricUnpartitionedTable.

@Test
public void testDeltaFileMetricUnpartitionedTable() throws Exception {
    String dbName = "default";
    String tblName = "dp";
    Table t = newTable(dbName, tblName, false);
    List<LockComponent> components = new ArrayList<>();
    addBaseFile(t, null, 20L, 20);
    addDeltaFile(t, null, 21L, 22L, 2);
    addDeltaFile(t, null, 23L, 24L, 20);
    components.add(createLockComponent(dbName, tblName, null));
    burnThroughTransactions(dbName, tblName, 24);
    long txnId = openTxn();
    LockRequest req = new LockRequest(components, "me", "localhost");
    req.setTxnid(txnId);
    LockResponse res = txnHandler.lock(req);
    Assert.assertEquals(LockState.ACQUIRED, res.getState());
    long writeId = allocateWriteId(dbName, tblName, txnId);
    Assert.assertEquals(25, writeId);
    txnHandler.commitTxn(new CommitTxnRequest(txnId));
    startInitiator();
    TimeUnit.SECONDS.sleep(2);
    // 2 active deltas
    // 1 small delta
    // 0 obsolete deltas
    verifyDeltaMetricsMatch(ImmutableMap.of(dbName + "." + tblName, 2), MetricsConstants.COMPACTION_NUM_DELTAS);
    verifyDeltaMetricsMatch(ImmutableMap.of(dbName + "." + tblName, 1), MetricsConstants.COMPACTION_NUM_SMALL_DELTAS);
    verifyDeltaMetricsMatch(ImmutableMap.of(), MetricsConstants.COMPACTION_NUM_OBSOLETE_DELTAS);
    startWorker();
    TimeUnit.SECONDS.sleep(2);
    // 0 active delta
    // 0 small delta
    // 2 obsolete delta
    verifyDeltaMetricsMatch(ImmutableMap.of(), MetricsConstants.COMPACTION_NUM_DELTAS);
    verifyDeltaMetricsMatch(ImmutableMap.of(), MetricsConstants.COMPACTION_NUM_SMALL_DELTAS);
    verifyDeltaMetricsMatch(ImmutableMap.of(dbName + "." + tblName, 2), MetricsConstants.COMPACTION_NUM_OBSOLETE_DELTAS);
    startCleaner();
    TimeUnit.SECONDS.sleep(2);
    // 0 active delta
    // 0 small delta
    // 0 obsolete delta
    verifyDeltaMetricsMatch(ImmutableMap.of(), MetricsConstants.COMPACTION_NUM_DELTAS);
    verifyDeltaMetricsMatch(ImmutableMap.of(), MetricsConstants.COMPACTION_NUM_SMALL_DELTAS);
    verifyDeltaMetricsMatch(ImmutableMap.of(), MetricsConstants.COMPACTION_NUM_OBSOLETE_DELTAS);
}
Also used : CommitTxnRequest(org.apache.hadoop.hive.metastore.api.CommitTxnRequest) Table(org.apache.hadoop.hive.metastore.api.Table) LockComponent(org.apache.hadoop.hive.metastore.api.LockComponent) LockResponse(org.apache.hadoop.hive.metastore.api.LockResponse) ArrayList(java.util.ArrayList) LockRequest(org.apache.hadoop.hive.metastore.api.LockRequest) Test(org.junit.Test)

Example 28 with CommitTxnRequest

use of org.apache.hadoop.hive.metastore.api.CommitTxnRequest in project hive by apache.

the class DbTxnManager method commitTxn.

@Override
public void commitTxn() throws LockException {
    if (!isTxnOpen()) {
        throw new RuntimeException("Attempt to commit before opening a transaction");
    }
    try {
        // do all new clear in clearLocksAndHB method to make sure that same code is there for replCommitTxn flow.
        clearLocksAndHB();
        LOG.debug("Committing txn " + JavaUtils.txnIdToString(txnId));
        CommitTxnRequest commitTxnRequest = new CommitTxnRequest(txnId);
        commitTxnRequest.setExclWriteEnabled(conf.getBoolVar(HiveConf.ConfVars.TXN_WRITE_X_LOCK));
        if (replPolicy != null) {
            commitTxnRequest.setReplPolicy(replPolicy);
            commitTxnRequest.setTxn_type(TxnType.DEFAULT);
        }
        getMS().commitTxn(commitTxnRequest);
    } catch (NoSuchTxnException e) {
        LOG.error("Metastore could not find " + JavaUtils.txnIdToString(txnId));
        throw new LockException(e, ErrorMsg.TXN_NO_SUCH_TRANSACTION, JavaUtils.txnIdToString(txnId));
    } catch (TxnAbortedException e) {
        LockException le = new LockException(e, ErrorMsg.TXN_ABORTED, JavaUtils.txnIdToString(txnId), e.getMessage());
        LOG.error(le.getMessage());
        throw le;
    } catch (TException e) {
        throw new LockException(ErrorMsg.METASTORE_COMMUNICATION_FAILED.getMsg(), e);
    } finally {
        // do all new reset in resetTxnInfo method to make sure that same code is there for replCommitTxn flow.
        resetTxnInfo();
    }
}
Also used : CommitTxnRequest(org.apache.hadoop.hive.metastore.api.CommitTxnRequest) TException(org.apache.thrift.TException) TxnAbortedException(org.apache.hadoop.hive.metastore.api.TxnAbortedException) NoSuchLockException(org.apache.hadoop.hive.metastore.api.NoSuchLockException) NoSuchTxnException(org.apache.hadoop.hive.metastore.api.NoSuchTxnException)

Example 29 with CommitTxnRequest

use of org.apache.hadoop.hive.metastore.api.CommitTxnRequest in project hive by apache.

the class TestTxnHandler method testValidTxnsNoneOpen.

@Test
public void testValidTxnsNoneOpen() throws Exception {
    txnHandler.openTxns(new OpenTxnRequest(2, "me", "localhost"));
    txnHandler.commitTxn(new CommitTxnRequest(1));
    txnHandler.commitTxn(new CommitTxnRequest(2));
    GetOpenTxnsInfoResponse txnsInfo = txnHandler.getOpenTxnsInfo();
    assertEquals(2L, txnsInfo.getTxn_high_water_mark());
    assertEquals(0, txnsInfo.getOpen_txns().size());
    GetOpenTxnsResponse txns = txnHandler.getOpenTxns();
    assertEquals(2L, txns.getTxn_high_water_mark());
    assertEquals(0, txns.getOpen_txns().size());
}
Also used : CommitTxnRequest(org.apache.hadoop.hive.metastore.api.CommitTxnRequest) GetOpenTxnsResponse(org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse) OpenTxnRequest(org.apache.hadoop.hive.metastore.api.OpenTxnRequest) GetOpenTxnsInfoResponse(org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse) Test(org.junit.Test)

Example 30 with CommitTxnRequest

use of org.apache.hadoop.hive.metastore.api.CommitTxnRequest in project hive by apache.

the class TestTxnHandler method testValidTxnsSomeOpen.

@Test
public void testValidTxnsSomeOpen() throws Exception {
    txnHandler.openTxns(new OpenTxnRequest(3, "me", "localhost"));
    txnHandler.abortTxn(new AbortTxnRequest(1));
    txnHandler.commitTxn(new CommitTxnRequest(2));
    GetOpenTxnsInfoResponse txnsInfo = txnHandler.getOpenTxnsInfo();
    assertEquals(3L, txnsInfo.getTxn_high_water_mark());
    assertEquals(2, txnsInfo.getOpen_txns().size());
    assertEquals(1L, txnsInfo.getOpen_txns().get(0).getId());
    assertEquals(TxnState.ABORTED, txnsInfo.getOpen_txns().get(0).getState());
    assertEquals(3L, txnsInfo.getOpen_txns().get(1).getId());
    assertEquals(TxnState.OPEN, txnsInfo.getOpen_txns().get(1).getState());
    GetOpenTxnsResponse txns = txnHandler.getOpenTxns();
    assertEquals(3L, txns.getTxn_high_water_mark());
    assertEquals(2, txns.getOpen_txns().size());
    boolean[] saw = new boolean[4];
    for (int i = 0; i < saw.length; i++) saw[i] = false;
    for (Long tid : txns.getOpen_txns()) {
        saw[tid.intValue()] = true;
    }
    assertTrue(saw[1]);
    assertFalse(saw[2]);
    assertTrue(saw[3]);
}
Also used : CommitTxnRequest(org.apache.hadoop.hive.metastore.api.CommitTxnRequest) GetOpenTxnsResponse(org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse) OpenTxnRequest(org.apache.hadoop.hive.metastore.api.OpenTxnRequest) AbortTxnRequest(org.apache.hadoop.hive.metastore.api.AbortTxnRequest) GetOpenTxnsInfoResponse(org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse) Test(org.junit.Test)

Aggregations

CommitTxnRequest (org.apache.hadoop.hive.metastore.api.CommitTxnRequest)46 Test (org.junit.Test)41 ArrayList (java.util.ArrayList)27 LockComponent (org.apache.hadoop.hive.metastore.api.LockComponent)27 LockRequest (org.apache.hadoop.hive.metastore.api.LockRequest)27 LockResponse (org.apache.hadoop.hive.metastore.api.LockResponse)27 Table (org.apache.hadoop.hive.metastore.api.Table)26 ShowCompactRequest (org.apache.hadoop.hive.metastore.api.ShowCompactRequest)22 ShowCompactResponse (org.apache.hadoop.hive.metastore.api.ShowCompactResponse)22 Partition (org.apache.hadoop.hive.metastore.api.Partition)16 ShowCompactResponseElement (org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement)16 OpenTxnRequest (org.apache.hadoop.hive.metastore.api.OpenTxnRequest)10 GetOpenTxnsResponse (org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse)7 OpenTxnsResponse (org.apache.hadoop.hive.metastore.api.OpenTxnsResponse)6 AbortTxnRequest (org.apache.hadoop.hive.metastore.api.AbortTxnRequest)5 ValidTxnList (org.apache.hadoop.hive.common.ValidTxnList)4 AllocateTableWriteIdsRequest (org.apache.hadoop.hive.metastore.api.AllocateTableWriteIdsRequest)4 AllocateTableWriteIdsResponse (org.apache.hadoop.hive.metastore.api.AllocateTableWriteIdsResponse)4 CompactionRequest (org.apache.hadoop.hive.metastore.api.CompactionRequest)3 GetOpenTxnsInfoResponse (org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse)3