use of java.sql.Statement in project hive by apache.
the class CompactionTxnHandler method purgeCompactionHistory.
/**
* For any given compactable entity (partition; table if not partitioned) the history of compactions
* may look like "sssfffaaasffss", for example. The idea is to retain the tail (most recent) of the
* history such that a configurable number of each type of state is present. Any other entries
* can be purged. This scheme has advantage of always retaining the last failure/success even if
* it's not recent.
* @throws MetaException
*/
@Override
@RetrySemantics.SafeToRetry
public void purgeCompactionHistory() throws MetaException {
Connection dbConn = null;
Statement stmt = null;
ResultSet rs = null;
List<Long> deleteSet = new ArrayList<>();
RetentionCounters rc = null;
try {
try {
dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
stmt = dbConn.createStatement();
/*cc_id is monotonically increasing so for any entity sorts in order of compaction history,
thus this query groups by entity and withing group sorts most recent first*/
rs = stmt.executeQuery("select cc_id, cc_database, cc_table, cc_partition, cc_state from " + "COMPLETED_COMPACTIONS order by cc_database, cc_table, cc_partition, cc_id desc");
String lastCompactedEntity = null;
/*In each group, walk from most recent and count occurences of each state type. Once you
* have counted enough (for each state) to satisfy retention policy, delete all other
* instances of this status.*/
while (rs.next()) {
CompactionInfo ci = new CompactionInfo(rs.getLong(1), rs.getString(2), rs.getString(3), rs.getString(4), rs.getString(5).charAt(0));
if (!ci.getFullPartitionName().equals(lastCompactedEntity)) {
lastCompactedEntity = ci.getFullPartitionName();
rc = new RetentionCounters(conf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_ATTEMPTED), getFailedCompactionRetention(), conf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_SUCCEEDED));
}
checkForDeletion(deleteSet, ci, rc);
}
close(rs);
if (deleteSet.size() <= 0) {
return;
}
List<String> queries = new ArrayList<String>();
StringBuilder prefix = new StringBuilder();
StringBuilder suffix = new StringBuilder();
prefix.append("delete from COMPLETED_COMPACTIONS where ");
suffix.append("");
TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, deleteSet, "cc_id", false, false);
for (String query : queries) {
LOG.debug("Going to execute update <" + query + ">");
int count = stmt.executeUpdate(query);
LOG.debug("Removed " + count + " records from COMPLETED_COMPACTIONS");
}
dbConn.commit();
} catch (SQLException e) {
rollbackDBConn(dbConn);
checkRetryable(dbConn, e, "purgeCompactionHistory()");
throw new MetaException("Unable to connect to transaction database " + StringUtils.stringifyException(e));
} finally {
close(rs, stmt, dbConn);
}
} catch (RetryException ex) {
purgeCompactionHistory();
}
}
use of java.sql.Statement in project hive by apache.
the class TxnDbUtil method cleanDb.
public static void cleanDb() throws Exception {
int retryCount = 0;
while (++retryCount <= 3) {
boolean success = true;
Connection conn = null;
Statement stmt = null;
try {
conn = getConnection();
stmt = conn.createStatement();
// We want to try these, whether they succeed or fail.
try {
stmt.execute("DROP INDEX HL_TXNID_INDEX");
} catch (SQLException e) {
if (!("42X65".equals(e.getSQLState()) && 30000 == e.getErrorCode())) {
//42X65/3000 means index doesn't exist
LOG.error("Unable to drop index HL_TXNID_INDEX " + e.getMessage() + "State=" + e.getSQLState() + " code=" + e.getErrorCode() + " retryCount=" + retryCount);
success = false;
}
}
success &= dropTable(stmt, "TXN_COMPONENTS", retryCount);
success &= dropTable(stmt, "COMPLETED_TXN_COMPONENTS", retryCount);
success &= dropTable(stmt, "TXNS", retryCount);
success &= dropTable(stmt, "NEXT_TXN_ID", retryCount);
success &= dropTable(stmt, "HIVE_LOCKS", retryCount);
success &= dropTable(stmt, "NEXT_LOCK_ID", retryCount);
success &= dropTable(stmt, "COMPACTION_QUEUE", retryCount);
success &= dropTable(stmt, "NEXT_COMPACTION_QUEUE_ID", retryCount);
success &= dropTable(stmt, "COMPLETED_COMPACTIONS", retryCount);
success &= dropTable(stmt, "AUX_TABLE", retryCount);
success &= dropTable(stmt, "WRITE_SET", retryCount);
} finally {
closeResources(conn, stmt, null);
}
if (success) {
return;
}
}
}
use of java.sql.Statement in project hive by apache.
the class TxnDbUtil method countQueryAgent.
/**
* Utility method used to run COUNT queries like "select count(*) from ..." against metastore tables
* @param countQuery countQuery text
* @return count countQuery result
* @throws Exception
*/
public static int countQueryAgent(String countQuery) throws Exception {
Connection conn = null;
Statement stmt = null;
ResultSet rs = null;
try {
conn = getConnection();
stmt = conn.createStatement();
rs = stmt.executeQuery(countQuery);
if (!rs.next()) {
return 0;
}
return rs.getInt(1);
} finally {
closeResources(conn, stmt, rs);
}
}
use of java.sql.Statement in project hive by apache.
the class TxnDbUtil method queryToString.
public static String queryToString(String query, boolean includeHeader) throws Exception {
Connection conn = null;
Statement stmt = null;
ResultSet rs = null;
StringBuilder sb = new StringBuilder();
try {
conn = getConnection();
stmt = conn.createStatement();
rs = stmt.executeQuery(query);
ResultSetMetaData rsmd = rs.getMetaData();
if (includeHeader) {
for (int colPos = 1; colPos <= rsmd.getColumnCount(); colPos++) {
sb.append(rsmd.getColumnName(colPos)).append(" ");
}
sb.append('\n');
}
while (rs.next()) {
for (int colPos = 1; colPos <= rsmd.getColumnCount(); colPos++) {
sb.append(rs.getObject(colPos)).append(" ");
}
sb.append('\n');
}
} finally {
closeResources(conn, stmt, rs);
}
return sb.toString();
}
use of java.sql.Statement in project hive by apache.
the class CompactionTxnHandler method revokeTimedoutWorkers.
/**
* This call will return all compaction queue
* entries assigned to a worker but over the timeout back to the initiated state.
* This should be called by the initiator on start up and occasionally when running to clean up
* after dead threads. At start up {@link #revokeFromLocalWorkers(String)} should be called
* first.
* @param timeout number of milliseconds since start time that should elapse before a worker is
* declared dead.
*/
@Override
@RetrySemantics.Idempotent
public void revokeTimedoutWorkers(long timeout) throws MetaException {
try {
Connection dbConn = null;
Statement stmt = null;
try {
dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
long latestValidStart = getDbTime(dbConn) - timeout;
stmt = dbConn.createStatement();
String s = "update COMPACTION_QUEUE set cq_worker_id = null, cq_start = null, cq_state = '" + INITIATED_STATE + "' where cq_state = '" + WORKING_STATE + "' and cq_start < " + latestValidStart;
LOG.debug("Going to execute update <" + s + ">");
// It isn't an error if the following returns no rows, as the local workers could have died
// with nothing assigned to them.
stmt.executeUpdate(s);
LOG.debug("Going to commit");
dbConn.commit();
} catch (SQLException e) {
LOG.error("Unable to change dead worker's records back to initiated state " + e.getMessage());
LOG.debug("Going to rollback");
rollbackDBConn(dbConn);
checkRetryable(dbConn, e, "revokeTimedoutWorkers(timeout:" + timeout + ")");
throw new MetaException("Unable to connect to transaction database " + StringUtils.stringifyException(e));
} finally {
closeStmt(stmt);
closeDbConn(dbConn);
}
} catch (RetryException e) {
revokeTimedoutWorkers(timeout);
}
}
Aggregations