use of java.sql.PreparedStatement in project hive by apache.
the class TxnDbUtil method countLockComponents.
/**
* A tool to count the number of partitions, tables,
* and databases locked by a particular lockId.
*
* @param lockId lock id to look for lock components
*
* @return number of components, or 0 if there is no lock
*/
public static int countLockComponents(long lockId) throws Exception {
Connection conn = null;
PreparedStatement stmt = null;
ResultSet rs = null;
try {
conn = getConnection();
stmt = conn.prepareStatement("SELECT count(*) FROM hive_locks WHERE hl_lock_ext_id = ?");
stmt.setLong(1, lockId);
rs = stmt.executeQuery();
if (!rs.next()) {
return 0;
}
return rs.getInt(1);
} finally {
closeResources(conn, stmt, rs);
}
}
use of java.sql.PreparedStatement in project hive by apache.
the class CompactionTxnHandler method markFailed.
/**
* If there is an entry in compaction_queue with ci.id, remove it
* Make entry in completed_compactions with status 'f'.
* If there is no entry in compaction_queue, it means Initiator failed to even schedule a compaction,
* which we record as ATTEMPTED_STATE entry in history.
*/
@Override
@RetrySemantics.CannotRetry
public void markFailed(CompactionInfo ci) throws MetaException {
//todo: this should take "comment" as parameter to set in CC_META_INFO to provide some context for the failure
try {
Connection dbConn = null;
Statement stmt = null;
PreparedStatement pStmt = null;
ResultSet rs = null;
try {
dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
stmt = dbConn.createStatement();
rs = stmt.executeQuery("select CQ_ID, CQ_DATABASE, CQ_TABLE, CQ_PARTITION, CQ_STATE, CQ_TYPE, CQ_TBLPROPERTIES, CQ_WORKER_ID, CQ_START, CQ_RUN_AS, CQ_HIGHEST_TXN_ID, CQ_META_INFO, CQ_HADOOP_JOB_ID from COMPACTION_QUEUE WHERE CQ_ID = " + ci.id);
if (rs.next()) {
ci = CompactionInfo.loadFullFromCompactionQueue(rs);
String s = "delete from COMPACTION_QUEUE where cq_id = " + ci.id;
LOG.debug("Going to execute update <" + s + ">");
int updCnt = stmt.executeUpdate(s);
} else {
if (ci.id > 0) {
//the record with valid CQ_ID has disappeared - this is a sign of something wrong
throw new IllegalStateException("No record with CQ_ID=" + ci.id + " found in COMPACTION_QUEUE");
}
}
if (ci.id == 0) {
//The failure occurred before we even made an entry in COMPACTION_QUEUE
//generate ID so that we can make an entry in COMPLETED_COMPACTIONS
ci.id = generateCompactionQueueId(stmt);
//mostly this indicates that the Initiator is paying attention to some table even though
//compactions are not happening.
ci.state = ATTEMPTED_STATE;
//this is not strictly accurate, but 'type' cannot be null.
if (ci.type == null) {
ci.type = CompactionType.MINOR;
}
ci.start = getDbTime(dbConn);
} else {
ci.state = FAILED_STATE;
}
close(rs, stmt, null);
pStmt = dbConn.prepareStatement("insert into COMPLETED_COMPACTIONS(CC_ID, CC_DATABASE, CC_TABLE, CC_PARTITION, CC_STATE, CC_TYPE, CC_TBLPROPERTIES, CC_WORKER_ID, CC_START, CC_END, CC_RUN_AS, CC_HIGHEST_TXN_ID, CC_META_INFO, CC_HADOOP_JOB_ID) VALUES(?,?,?,?,?, ?,?,?,?,?, ?,?,?,?)");
CompactionInfo.insertIntoCompletedCompactions(pStmt, ci, getDbTime(dbConn));
int updCount = pStmt.executeUpdate();
LOG.debug("Going to commit");
closeStmt(pStmt);
dbConn.commit();
} catch (SQLException e) {
LOG.warn("markFailed(" + ci.id + "):" + e.getMessage());
LOG.debug("Going to rollback");
rollbackDBConn(dbConn);
try {
checkRetryable(dbConn, e, "markFailed(" + ci + ")");
} catch (MetaException ex) {
LOG.error("Unable to connect to transaction database " + StringUtils.stringifyException(ex));
}
LOG.error("markFailed(" + ci + ") failed: " + e.getMessage(), e);
} finally {
close(rs, stmt, null);
close(null, pStmt, dbConn);
}
} catch (RetryException e) {
markFailed(ci);
}
}
use of java.sql.PreparedStatement in project hive by apache.
the class CompactionTxnHandler method markCleaned.
/**
* This will remove an entry from the queue after
* it has been compacted.
*
* @param info info on the compaction entry to remove
*/
@Override
@RetrySemantics.CannotRetry
public void markCleaned(CompactionInfo info) throws MetaException {
try {
Connection dbConn = null;
Statement stmt = null;
PreparedStatement pStmt = null;
ResultSet rs = null;
try {
dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
stmt = dbConn.createStatement();
rs = stmt.executeQuery("select CQ_ID, CQ_DATABASE, CQ_TABLE, CQ_PARTITION, CQ_STATE, CQ_TYPE, CQ_TBLPROPERTIES, CQ_WORKER_ID, CQ_START, CQ_RUN_AS, CQ_HIGHEST_TXN_ID, CQ_META_INFO, CQ_HADOOP_JOB_ID from COMPACTION_QUEUE WHERE CQ_ID = " + info.id);
if (rs.next()) {
info = CompactionInfo.loadFullFromCompactionQueue(rs);
} else {
throw new IllegalStateException("No record with CQ_ID=" + info.id + " found in COMPACTION_QUEUE");
}
close(rs);
String s = "delete from COMPACTION_QUEUE where cq_id = " + info.id;
LOG.debug("Going to execute update <" + s + ">");
int updCount = stmt.executeUpdate(s);
if (updCount != 1) {
LOG.error("Unable to delete compaction record: " + info + ". Update count=" + updCount);
LOG.debug("Going to rollback");
dbConn.rollback();
}
pStmt = dbConn.prepareStatement("insert into COMPLETED_COMPACTIONS(CC_ID, CC_DATABASE, CC_TABLE, CC_PARTITION, CC_STATE, CC_TYPE, CC_TBLPROPERTIES, CC_WORKER_ID, CC_START, CC_END, CC_RUN_AS, CC_HIGHEST_TXN_ID, CC_META_INFO, CC_HADOOP_JOB_ID) VALUES(?,?,?,?,?, ?,?,?,?,?, ?,?,?,?)");
info.state = SUCCEEDED_STATE;
CompactionInfo.insertIntoCompletedCompactions(pStmt, info, getDbTime(dbConn));
updCount = pStmt.executeUpdate();
// Remove entries from completed_txn_components as well, so we don't start looking there
// again but only up to the highest txn ID include in this compaction job.
//highestTxnId will be NULL in upgrade scenarios
s = "delete from COMPLETED_TXN_COMPONENTS where ctc_database = '" + info.dbname + "' and " + "ctc_table = '" + info.tableName + "'";
if (info.partName != null) {
s += " and ctc_partition = '" + info.partName + "'";
}
if (info.highestTxnId != 0) {
s += " and ctc_txnid <= " + info.highestTxnId;
}
LOG.debug("Going to execute update <" + s + ">");
if (stmt.executeUpdate(s) < 1) {
LOG.error("Expected to remove at least one row from completed_txn_components when " + "marking compaction entry as clean!");
}
s = "select distinct txn_id from TXNS, TXN_COMPONENTS where txn_id = tc_txnid and txn_state = '" + TXN_ABORTED + "' and tc_database = '" + info.dbname + "' and tc_table = '" + info.tableName + "'" + (info.highestTxnId == 0 ? "" : " and txn_id <= " + info.highestTxnId);
if (info.partName != null)
s += " and tc_partition = '" + info.partName + "'";
LOG.debug("Going to execute update <" + s + ">");
rs = stmt.executeQuery(s);
List<Long> txnids = new ArrayList<>();
while (rs.next()) txnids.add(rs.getLong(1));
// Remove entries from txn_components, as there may be aborted txn components
if (txnids.size() > 0) {
List<String> queries = new ArrayList<String>();
// Prepare prefix and suffix
StringBuilder prefix = new StringBuilder();
StringBuilder suffix = new StringBuilder();
prefix.append("delete from TXN_COMPONENTS where ");
//because 1 txn may include different partitions/tables even in auto commit mode
suffix.append(" and tc_database = ");
suffix.append(quoteString(info.dbname));
suffix.append(" and tc_table = ");
suffix.append(quoteString(info.tableName));
if (info.partName != null) {
suffix.append(" and tc_partition = ");
suffix.append(quoteString(info.partName));
}
// Populate the complete query with provided prefix and suffix
TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, txnids, "tc_txnid", true, false);
for (String query : queries) {
LOG.debug("Going to execute update <" + query + ">");
int rc = stmt.executeUpdate(query);
LOG.debug("Removed " + rc + " records from txn_components");
// Don't bother cleaning from the txns table. A separate call will do that. We don't
// know here which txns still have components from other tables or partitions in the
// table, so we don't know which ones we can and cannot clean.
}
}
LOG.debug("Going to commit");
dbConn.commit();
} catch (SQLException e) {
LOG.error("Unable to delete from compaction queue " + e.getMessage());
LOG.debug("Going to rollback");
rollbackDBConn(dbConn);
checkRetryable(dbConn, e, "markCleaned(" + info + ")");
throw new MetaException("Unable to connect to transaction database " + StringUtils.stringifyException(e));
} finally {
closeStmt(pStmt);
close(rs, stmt, dbConn);
}
} catch (RetryException e) {
markCleaned(info);
}
}
use of java.sql.PreparedStatement in project hive by apache.
the class cbo_rp_TestJdbcDriver2 method testPrepareSetTimestamp.
@Test
public void testPrepareSetTimestamp() throws SQLException, ParseException {
String sql = String.format("SELECT * FROM %s WHERE c17 = ?", dataTypeTableName);
try (PreparedStatement ps = con.prepareStatement(sql)) {
Timestamp timestamp = Timestamp.valueOf("2012-04-22 09:00:00.123456789");
ps.setTimestamp(1, timestamp);
// Ensure we find the single row which matches our timestamp (where field 1 has value 1)
try (ResultSet resultSet = ps.executeQuery()) {
assertTrue(resultSet.next());
assertEquals(1, resultSet.getInt(1));
assertFalse(resultSet.next());
}
}
}
use of java.sql.PreparedStatement in project hive by apache.
the class cbo_rp_TestJdbcDriver2 method createPreapredStatementUsingSetObject.
private PreparedStatement createPreapredStatementUsingSetObject(String sql) throws SQLException {
PreparedStatement ps = con.prepareStatement(sql);
//setBoolean
ps.setObject(1, true);
//setBoolean
ps.setObject(2, true);
//setShort
ps.setObject(3, Short.valueOf("1"));
//setInt
ps.setObject(4, 2);
//setFloat
ps.setObject(5, 3f);
//setDouble
ps.setObject(6, Double.valueOf(4));
//setString
ps.setObject(7, "test'string\"");
//setLong
ps.setObject(8, 5L);
//setByte
ps.setObject(9, (byte) 1);
//setByte
ps.setObject(10, (byte) 1);
//setString
ps.setString(11, "2012-01-01");
//setTimestamp
ps.setObject(12, Timestamp.valueOf("2012-04-22 09:00:00.123456789"));
ps.setMaxRows(2);
return ps;
}
Aggregations