use of org.apache.hadoop.hive.metastore.api.CommitTxnRequest in project hive by apache.
the class TestCleaner method testReadyForCleaningPileup.
@Test
public void testReadyForCleaningPileup() throws Exception {
String dbName = "default";
String tblName = "trfcp";
String partName = "ds=today";
Table t = newTable(dbName, tblName, true);
Partition p = newPartition(t, "today");
// block cleaner with an open txn
long blockingTxn = openTxn();
// minor compaction
addBaseFile(t, p, 20L, 20);
addDeltaFile(t, p, 21L, 21L, 1);
addDeltaFile(t, p, 22L, 22L, 1);
burnThroughTransactions(dbName, tblName, 22);
CompactionRequest rqst = new CompactionRequest(dbName, tblName, CompactionType.MINOR);
rqst.setPartitionname(partName);
compactInTxn(rqst);
addDeltaFile(t, p, 21, 22, 2);
startCleaner();
// make sure cleaner didn't remove anything, and cleaning is still queued
List<Path> paths = getDirectories(conf, t, p);
Assert.assertEquals("Expected 4 files after minor compaction, instead these files were present " + paths, 4, paths.size());
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
Assert.assertEquals("Expected 1 compaction in queue, got: " + rsp.getCompacts(), 1, rsp.getCompactsSize());
Assert.assertEquals(TxnStore.CLEANING_RESPONSE, rsp.getCompacts().get(0).getState());
Assert.assertEquals(CompactionType.MINOR, rsp.getCompacts().get(0).getType());
// major compaction
addDeltaFile(t, p, 23L, 23L, 1);
addDeltaFile(t, p, 24L, 24L, 1);
burnThroughTransactions(dbName, tblName, 2);
rqst = new CompactionRequest(dbName, tblName, CompactionType.MAJOR);
rqst.setPartitionname(partName);
long compactTxn = compactInTxn(rqst);
addBaseFile(t, p, 24, 24, compactTxn);
startCleaner();
// make sure cleaner didn't remove anything, and 2 cleaning are still queued
paths = getDirectories(conf, t, p);
Assert.assertEquals("Expected 7 files after minor compaction, instead these files were present " + paths, 7, paths.size());
rsp = txnHandler.showCompact(new ShowCompactRequest());
Assert.assertEquals("Expected 2 compactions in queue, got: " + rsp.getCompacts(), 2, rsp.getCompactsSize());
Assert.assertEquals(TxnStore.CLEANING_RESPONSE, rsp.getCompacts().get(0).getState());
Assert.assertEquals(TxnStore.CLEANING_RESPONSE, rsp.getCompacts().get(1).getState());
// unblock the cleaner and run again
txnHandler.commitTxn(new CommitTxnRequest(blockingTxn));
startCleaner();
startCleaner();
// make sure cleaner removed everything below base_24, and both compactions are successful
paths = getDirectories(conf, t, p);
Assert.assertEquals(1, paths.size());
rsp = txnHandler.showCompact(new ShowCompactRequest());
Assert.assertEquals("Expected 2 compactions in queue, got: " + rsp.getCompacts(), 2, rsp.getCompactsSize());
Assert.assertEquals(TxnStore.SUCCEEDED_RESPONSE, rsp.getCompacts().get(0).getState());
Assert.assertEquals(TxnStore.SUCCEEDED_RESPONSE, rsp.getCompacts().get(1).getState());
}
use of org.apache.hadoop.hive.metastore.api.CommitTxnRequest in project hive by apache.
the class TestDeltaFilesMetrics method testDeltaFileMetricUnpartitionedTable.
@Test
public void testDeltaFileMetricUnpartitionedTable() throws Exception {
String dbName = "default";
String tblName = "dp";
Table t = newTable(dbName, tblName, false);
List<LockComponent> components = new ArrayList<>();
addBaseFile(t, null, 20L, 20);
addDeltaFile(t, null, 21L, 22L, 2);
addDeltaFile(t, null, 23L, 24L, 20);
components.add(createLockComponent(dbName, tblName, null));
burnThroughTransactions(dbName, tblName, 24);
long txnId = openTxn();
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnId);
LockResponse res = txnHandler.lock(req);
Assert.assertEquals(LockState.ACQUIRED, res.getState());
long writeId = allocateWriteId(dbName, tblName, txnId);
Assert.assertEquals(25, writeId);
txnHandler.commitTxn(new CommitTxnRequest(txnId));
startInitiator();
TimeUnit.SECONDS.sleep(2);
// 2 active deltas
// 1 small delta
// 0 obsolete deltas
verifyDeltaMetricsMatch(ImmutableMap.of(dbName + "." + tblName, 2), MetricsConstants.COMPACTION_NUM_DELTAS);
verifyDeltaMetricsMatch(ImmutableMap.of(dbName + "." + tblName, 1), MetricsConstants.COMPACTION_NUM_SMALL_DELTAS);
verifyDeltaMetricsMatch(ImmutableMap.of(), MetricsConstants.COMPACTION_NUM_OBSOLETE_DELTAS);
startWorker();
TimeUnit.SECONDS.sleep(2);
// 0 active delta
// 0 small delta
// 2 obsolete delta
verifyDeltaMetricsMatch(ImmutableMap.of(), MetricsConstants.COMPACTION_NUM_DELTAS);
verifyDeltaMetricsMatch(ImmutableMap.of(), MetricsConstants.COMPACTION_NUM_SMALL_DELTAS);
verifyDeltaMetricsMatch(ImmutableMap.of(dbName + "." + tblName, 2), MetricsConstants.COMPACTION_NUM_OBSOLETE_DELTAS);
startCleaner();
TimeUnit.SECONDS.sleep(2);
// 0 active delta
// 0 small delta
// 0 obsolete delta
verifyDeltaMetricsMatch(ImmutableMap.of(), MetricsConstants.COMPACTION_NUM_DELTAS);
verifyDeltaMetricsMatch(ImmutableMap.of(), MetricsConstants.COMPACTION_NUM_SMALL_DELTAS);
verifyDeltaMetricsMatch(ImmutableMap.of(), MetricsConstants.COMPACTION_NUM_OBSOLETE_DELTAS);
}
use of org.apache.hadoop.hive.metastore.api.CommitTxnRequest in project hive by apache.
the class DbTxnManager method commitTxn.
@Override
public void commitTxn() throws LockException {
if (!isTxnOpen()) {
throw new RuntimeException("Attempt to commit before opening a transaction");
}
try {
// do all new clear in clearLocksAndHB method to make sure that same code is there for replCommitTxn flow.
clearLocksAndHB();
LOG.debug("Committing txn " + JavaUtils.txnIdToString(txnId));
CommitTxnRequest commitTxnRequest = new CommitTxnRequest(txnId);
commitTxnRequest.setExclWriteEnabled(conf.getBoolVar(HiveConf.ConfVars.TXN_WRITE_X_LOCK));
if (replPolicy != null) {
commitTxnRequest.setReplPolicy(replPolicy);
commitTxnRequest.setTxn_type(TxnType.DEFAULT);
}
getMS().commitTxn(commitTxnRequest);
} catch (NoSuchTxnException e) {
LOG.error("Metastore could not find " + JavaUtils.txnIdToString(txnId));
throw new LockException(e, ErrorMsg.TXN_NO_SUCH_TRANSACTION, JavaUtils.txnIdToString(txnId));
} catch (TxnAbortedException e) {
LockException le = new LockException(e, ErrorMsg.TXN_ABORTED, JavaUtils.txnIdToString(txnId), e.getMessage());
LOG.error(le.getMessage());
throw le;
} catch (TException e) {
throw new LockException(ErrorMsg.METASTORE_COMMUNICATION_FAILED.getMsg(), e);
} finally {
// do all new reset in resetTxnInfo method to make sure that same code is there for replCommitTxn flow.
resetTxnInfo();
}
}
use of org.apache.hadoop.hive.metastore.api.CommitTxnRequest in project hive by apache.
the class TestTxnHandler method testValidTxnsNoneOpen.
@Test
public void testValidTxnsNoneOpen() throws Exception {
txnHandler.openTxns(new OpenTxnRequest(2, "me", "localhost"));
txnHandler.commitTxn(new CommitTxnRequest(1));
txnHandler.commitTxn(new CommitTxnRequest(2));
GetOpenTxnsInfoResponse txnsInfo = txnHandler.getOpenTxnsInfo();
assertEquals(2L, txnsInfo.getTxn_high_water_mark());
assertEquals(0, txnsInfo.getOpen_txns().size());
GetOpenTxnsResponse txns = txnHandler.getOpenTxns();
assertEquals(2L, txns.getTxn_high_water_mark());
assertEquals(0, txns.getOpen_txns().size());
}
use of org.apache.hadoop.hive.metastore.api.CommitTxnRequest in project hive by apache.
the class TestTxnHandler method testValidTxnsSomeOpen.
@Test
public void testValidTxnsSomeOpen() throws Exception {
txnHandler.openTxns(new OpenTxnRequest(3, "me", "localhost"));
txnHandler.abortTxn(new AbortTxnRequest(1));
txnHandler.commitTxn(new CommitTxnRequest(2));
GetOpenTxnsInfoResponse txnsInfo = txnHandler.getOpenTxnsInfo();
assertEquals(3L, txnsInfo.getTxn_high_water_mark());
assertEquals(2, txnsInfo.getOpen_txns().size());
assertEquals(1L, txnsInfo.getOpen_txns().get(0).getId());
assertEquals(TxnState.ABORTED, txnsInfo.getOpen_txns().get(0).getState());
assertEquals(3L, txnsInfo.getOpen_txns().get(1).getId());
assertEquals(TxnState.OPEN, txnsInfo.getOpen_txns().get(1).getState());
GetOpenTxnsResponse txns = txnHandler.getOpenTxns();
assertEquals(3L, txns.getTxn_high_water_mark());
assertEquals(2, txns.getOpen_txns().size());
boolean[] saw = new boolean[4];
for (int i = 0; i < saw.length; i++) saw[i] = false;
for (Long tid : txns.getOpen_txns()) {
saw[tid.intValue()] = true;
}
assertTrue(saw[1]);
assertFalse(saw[2]);
assertTrue(saw[3]);
}
Aggregations