use of org.apache.hadoop.hive.metastore.api.OpenTxnRequest in project hive by apache.
the class TestTxnHandler method allocateNextWriteIdRetriesAfterDetectingConflictingConcurrentInsert.
@Test
public void allocateNextWriteIdRetriesAfterDetectingConflictingConcurrentInsert() throws Exception {
String dbName = "abc";
String tableName = "def";
int numTxns = 2;
int iterations = 20;
// use TxnHandler instance w/ increased retry limit
long originalLimit = MetastoreConf.getLongVar(conf, MetastoreConf.ConfVars.HMS_HANDLER_ATTEMPTS);
MetastoreConf.setLongVar(conf, MetastoreConf.ConfVars.HMS_HANDLER_ATTEMPTS, iterations + 1);
TxnStore txnHandler = TxnUtils.getTxnStore(conf);
try (Connection dbConn = ((TxnHandler) txnHandler).getDbConn(Connection.TRANSACTION_READ_COMMITTED);
Statement stmt = dbConn.createStatement()) {
// run this multiple times to get write-write conflicts with relatively high chance
for (int i = 0; i < iterations; ++i) {
// make sure these 2 tables have no records of our dbName.tableName
// this ensures that allocateTableWriteIds() will try to insert into next_write_id (instead of update)
stmt.executeUpdate("TRUNCATE TABLE \"NEXT_WRITE_ID\"");
stmt.executeUpdate("TRUNCATE TABLE \"TXN_TO_WRITE_ID\"");
dbConn.commit();
OpenTxnsResponse resp = txnHandler.openTxns(new OpenTxnRequest(numTxns, "me", "localhost"));
AllocateTableWriteIdsRequest request = new AllocateTableWriteIdsRequest(dbName, tableName);
resp.getTxn_ids().forEach(request::addToTxnIds);
// thread 1: allocating write ids for dbName.tableName
CompletableFuture<AllocateTableWriteIdsResponse> future1 = CompletableFuture.supplyAsync(() -> {
try {
return txnHandler.allocateTableWriteIds(request);
} catch (Exception e) {
throw new RuntimeException(e);
}
});
// thread 2: simulating another thread allocating write ids for the same dbName.tableName
// (using direct DB insert as a workaround)
CompletableFuture<Void> future2 = CompletableFuture.runAsync(() -> {
try {
Thread.sleep(10);
stmt.executeUpdate(String.format("INSERT INTO \"NEXT_WRITE_ID\" " + "VALUES ('%s', '%s', 1)", dbName, tableName));
dbConn.commit();
} catch (Exception e) {
LOG.warn("Inserting next_write_id directly into DB failed: " + e.getMessage());
}
});
CompletableFuture.allOf(future1, future2).join();
// validate that all write ids allocation attempts have (eventually) succeeded
AllocateTableWriteIdsResponse result = future1.get();
assertEquals(2, result.getTxnToWriteIds().size());
assertEquals(i * numTxns + 1, result.getTxnToWriteIds().get(0).getTxnId());
assertEquals(1, result.getTxnToWriteIds().get(0).getWriteId());
assertEquals(i * numTxns + 2, result.getTxnToWriteIds().get(1).getTxnId());
assertEquals(2, result.getTxnToWriteIds().get(1).getWriteId());
}
// restore to original retry limit value
MetastoreConf.setLongVar(conf, MetastoreConf.ConfVars.HMS_HANDLER_ATTEMPTS, originalLimit);
}
}
use of org.apache.hadoop.hive.metastore.api.OpenTxnRequest in project hive by apache.
the class TestTxnHandler method replOpenTxnForTest.
private List<Long> replOpenTxnForTest(long startId, int numTxn, String replPolicy) throws Exception {
conf.setIntVar(HiveConf.ConfVars.HIVE_TXN_MAX_OPEN_BATCH, numTxn);
long lastId = startId + numTxn - 1;
OpenTxnRequest rqst = new OpenTxnRequest(numTxn, "me", "localhost");
rqst.setReplPolicy(replPolicy);
rqst.setReplSrcTxnIds(LongStream.rangeClosed(startId, lastId).boxed().collect(Collectors.toList()));
rqst.setTxn_type(TxnType.REPL_CREATED);
OpenTxnsResponse openedTxns = txnHandler.openTxns(rqst);
List<Long> txnList = openedTxns.getTxn_ids();
assertEquals(txnList.size(), numTxn);
int numTxnPresentNow = TestTxnDbUtil.countQueryAgent(conf, "SELECT COUNT(*) FROM \"TXNS\" WHERE \"TXN_ID\" >= " + txnList.get(0) + " and \"TXN_ID\" <= " + txnList.get(numTxn - 1));
assertEquals(numTxn, numTxnPresentNow);
checkReplTxnForTest(startId, lastId, replPolicy, txnList);
return txnList;
}
use of org.apache.hadoop.hive.metastore.api.OpenTxnRequest in project hive by apache.
the class BaseReplicationScenariosAcidTables method openTxns.
List<Long> openTxns(int numTxns, TxnStore txnHandler, HiveConf primaryConf) throws Throwable {
OpenTxnsResponse otResp = txnHandler.openTxns(new OpenTxnRequest(numTxns, "u1", "localhost"));
List<Long> txns = otResp.getTxn_ids();
String txnIdRange = " txn_id >= " + txns.get(0) + " and txn_id <= " + txns.get(numTxns - 1);
Assert.assertEquals(TestTxnDbUtil.queryToString(primaryConf, "select * from TXNS"), numTxns, TestTxnDbUtil.countQueryAgent(primaryConf, "select count(*) from TXNS where txn_state = 'o' and " + txnIdRange));
return txns;
}
use of org.apache.hadoop.hive.metastore.api.OpenTxnRequest in project hive by apache.
the class TestAcidTxnCleanerService method cleansEmptyAbortedBatchTxns.
@Test
public void cleansEmptyAbortedBatchTxns() throws Exception {
// add one non-empty aborted txn
openNonEmptyThenAbort();
// add a batch of empty, aborted txns
txnHandler.setOpenTxnTimeOutMillis(30000);
MetastoreConf.setLongVar(conf, MetastoreConf.ConfVars.TXN_MAX_OPEN_BATCH, TxnStore.TIMED_OUT_TXN_ABORT_BATCH_SIZE + 50);
OpenTxnsResponse resp = txnHandler.openTxns(new OpenTxnRequest(TxnStore.TIMED_OUT_TXN_ABORT_BATCH_SIZE + 50, "user", "hostname"));
txnHandler.setOpenTxnTimeOutMillis(1);
txnHandler.abortTxns(new AbortTxnsRequest(resp.getTxn_ids()));
GetOpenTxnsResponse openTxns = txnHandler.getOpenTxns();
Assert.assertEquals(TxnStore.TIMED_OUT_TXN_ABORT_BATCH_SIZE + 50 + 1, openTxns.getOpen_txnsSize());
underTest.run();
openTxns = txnHandler.getOpenTxns();
Assert.assertEquals(2, openTxns.getOpen_txnsSize());
Assert.assertTrue("The max txnId should be at least", getMaxTxnId() >= TxnStore.TIMED_OUT_TXN_ABORT_BATCH_SIZE + 50 + 1);
}
use of org.apache.hadoop.hive.metastore.api.OpenTxnRequest in project hive by apache.
the class TestOpenTxn method testGapWithOldOpen.
@Test
public void testGapWithOldOpen() throws Exception {
OpenTxnRequest openTxnRequest = new OpenTxnRequest(1, "me", "localhost");
txnHandler.openTxns(openTxnRequest);
Thread.sleep(1000);
long second = txnHandler.openTxns(openTxnRequest).getTxn_ids().get(0);
deleteTransaction(second);
txnHandler.openTxns(openTxnRequest);
GetOpenTxnsResponse openTxns = txnHandler.getOpenTxns();
Assert.assertEquals(3, openTxns.getOpen_txnsSize());
}
Aggregations