use of org.apache.hadoop.hive.metastore.api.TxnType in project hive by apache.
the class TxnHandler method enqueueLockWithRetry.
/**
* This enters locks into the queue in {@link #LOCK_WAITING} mode.
*
* Isolation Level Notes:
* 1. We use S4U (withe read_committed) to generate the next (ext) lock id. This serializes
* any 2 {@code enqueueLockWithRetry()} calls.
* 2. We use S4U on the relevant TXNS row to block any concurrent abort/commit/etc operations
* @see #checkLockWithRetry(Connection, long, long, boolean)
*/
private ConnectionLockIdPair enqueueLockWithRetry(LockRequest rqst) throws NoSuchTxnException, TxnAbortedException, MetaException {
boolean success = false;
Connection dbConn = null;
try {
Statement stmt = null;
try {
lockInternal();
dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
long txnid = rqst.getTxnid();
stmt = dbConn.createStatement();
if (isValidTxn(txnid)) {
// this also ensures that txn is still there in expected state
TxnType txnType = getOpenTxnTypeAndLock(stmt, txnid);
if (txnType == null) {
ensureValidTxn(dbConn, txnid, stmt);
shouldNeverHappen(txnid);
}
}
/* Insert txn components and hive locks (with a temp extLockId) first, before getting the next lock ID in a select-for-update.
This should minimize the scope of the S4U and decrease the table lock duration. */
insertTxnComponents(txnid, rqst, dbConn);
long tempExtLockId = insertHiveLocksWithTemporaryExtLockId(txnid, dbConn, rqst);
/**
* Get the next lock id.
* This has to be atomic with adding entries to HIVE_LOCK entries (1st add in W state) to prevent a race.
* Suppose ID gen is a separate txn and 2 concurrent lock() methods are running. 1st one generates nl_next=7,
* 2nd nl_next=8. Then 8 goes first to insert into HIVE_LOCKS and acquires the locks. Then 7 unblocks,
* and add it's W locks but it won't see locks from 8 since to be 'fair' {@link #checkLock(java.sql.Connection, long)}
* doesn't block on locks acquired later than one it's checking
*/
long extLockId = getNextLockIdForUpdate(dbConn, stmt);
incrementLockIdAndUpdateHiveLocks(stmt, extLockId, tempExtLockId);
dbConn.commit();
success = true;
return new ConnectionLockIdPair(dbConn, extLockId);
} catch (SQLException e) {
LOG.error("enqueueLock failed for request: {}. Exception msg: {}", rqst, getMessage(e));
rollbackDBConn(dbConn);
checkRetryable(e, "enqueueLockWithRetry(" + rqst + ")");
throw new MetaException("Unable to update transaction database " + StringUtils.stringifyException(e));
} finally {
closeStmt(stmt);
if (!success) {
/* This needs to return a "live" connection to be used by operation that follows it.
Thus it only closes Connection on failure/retry. */
closeDbConn(dbConn);
}
unlockInternal();
}
} catch (RetryException e) {
LOG.debug("Going to retry enqueueLock for request: {}, after catching RetryException with message: {}", rqst, e.getMessage());
return enqueueLockWithRetry(rqst);
}
}
use of org.apache.hadoop.hive.metastore.api.TxnType in project hive by apache.
the class TxnHandler method getAbortedAndReadOnlyTxns.
/**
* Get txns from the list that are either aborted or read-only.
* @param txnIds list of txns to be evaluated for aborted state/read-only status
* @param stmt db statement
*/
private String getAbortedAndReadOnlyTxns(List<Long> txnIds, Statement stmt) throws SQLException {
List<String> queries = new ArrayList<>();
StringBuilder prefix = new StringBuilder();
// Check if any of the txns in the list are either aborted or read-only.
prefix.append("SELECT \"TXN_ID\", \"TXN_STATE\", \"TXN_TYPE\" FROM \"TXNS\" WHERE ");
TxnUtils.buildQueryWithINClause(conf, queries, prefix, new StringBuilder(), txnIds, "\"TXN_ID\"", false, false);
StringBuilder txnInfo = new StringBuilder();
for (String query : queries) {
LOG.debug("Going to execute query <" + query + ">");
try (ResultSet rs = stmt.executeQuery(query)) {
while (rs.next()) {
long txnId = rs.getLong(1);
TxnStatus txnState = TxnStatus.fromString(rs.getString(2));
TxnType txnType = TxnType.findByValue(rs.getInt(3));
if (txnState != TxnStatus.OPEN) {
txnInfo.append("{").append(txnId).append(",").append(txnState).append("}");
} else if (txnType == TxnType.READ_ONLY) {
txnInfo.append("{").append(txnId).append(",read-only}");
}
}
}
}
return txnInfo.toString();
}
use of org.apache.hadoop.hive.metastore.api.TxnType in project hive by apache.
the class TxnHandler method addDynamicPartitions.
/**
* Retry-by-caller note:
* This may be retried after dbConn.commit. At worst, it will create duplicate entries in
* TXN_COMPONENTS which won't affect anything. See more comments in {@link #commitTxn(CommitTxnRequest)}
*/
@Override
@RetrySemantics.SafeToRetry
public void addDynamicPartitions(AddDynamicPartitions rqst) throws NoSuchTxnException, TxnAbortedException, MetaException {
Connection dbConn = null;
Statement stmt = null;
try {
try {
lockInternal();
dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
stmt = dbConn.createStatement();
TxnType txnType = getOpenTxnTypeAndLock(stmt, rqst.getTxnid());
if (txnType == null) {
// ensures txn is still there and in expected state
ensureValidTxn(dbConn, rqst.getTxnid(), stmt);
shouldNeverHappen(rqst.getTxnid());
}
// for RU this may be null so we should default it to 'u' which is most restrictive
OperationType ot = OperationType.UPDATE;
if (rqst.isSetOperationType()) {
ot = OperationType.fromDataOperationType(rqst.getOperationType());
}
Long writeId = rqst.getWriteid();
try (PreparedStatement pstmt = dbConn.prepareStatement(TXN_COMPONENTS_INSERT_QUERY)) {
int insertCounter = 0;
for (String partName : rqst.getPartitionnames()) {
pstmt.setLong(1, rqst.getTxnid());
pstmt.setString(2, normalizeCase(rqst.getDbname()));
pstmt.setString(3, normalizeCase(rqst.getTablename()));
pstmt.setString(4, partName);
pstmt.setString(5, ot.getSqlConst());
pstmt.setObject(6, writeId);
pstmt.addBatch();
insertCounter++;
if (insertCounter % maxBatchSize == 0) {
LOG.debug("Executing a batch of <" + TXN_COMPONENTS_INSERT_QUERY + "> queries. Batch size: " + maxBatchSize);
pstmt.executeBatch();
}
}
if (insertCounter % maxBatchSize != 0) {
LOG.debug("Executing a batch of <" + TXN_COMPONENTS_INSERT_QUERY + "> queries. Batch size: " + insertCounter % maxBatchSize);
pstmt.executeBatch();
}
}
try (PreparedStatement pstmt = dbConn.prepareStatement(TXN_COMPONENTS_DP_DELETE_QUERY)) {
pstmt.setLong(1, rqst.getTxnid());
pstmt.setString(2, normalizeCase(rqst.getDbname()));
pstmt.setString(3, normalizeCase(rqst.getTablename()));
pstmt.execute();
}
LOG.debug("Going to commit");
dbConn.commit();
} catch (SQLException e) {
LOG.debug("Going to rollback: ", e);
rollbackDBConn(dbConn);
checkRetryable(e, "addDynamicPartitions(" + rqst + ")");
throw new MetaException("Unable to insert into from transaction database " + StringUtils.stringifyException(e));
} finally {
close(null, stmt, dbConn);
unlockInternal();
}
} catch (RetryException e) {
addDynamicPartitions(rqst);
}
}
Aggregations