use of org.apache.hadoop.hive.metastore.utils.StringableMap in project hive by apache.
the class TxnHandler method compact.
@Override
@RetrySemantics.Idempotent
public CompactionResponse compact(CompactionRequest rqst) throws MetaException {
// Put a compaction request in the queue.
try {
Connection dbConn = null;
Statement stmt = null;
TxnStore.MutexAPI.LockHandle handle = null;
try {
lockInternal();
/**
* MUTEX_KEY.CompactionScheduler lock ensures that there is only 1 entry in
* Initiated/Working state for any resource. This ensures that we don't run concurrent
* compactions for any resource.
*/
handle = getMutexAPI().acquireLock(MUTEX_KEY.CompactionScheduler.name());
dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
stmt = dbConn.createStatement();
long id = generateCompactionQueueId(stmt);
StringBuilder sb = new StringBuilder("select cq_id, cq_state from COMPACTION_QUEUE where").append(" cq_state IN(").append(quoteChar(INITIATED_STATE)).append(",").append(quoteChar(WORKING_STATE)).append(") AND cq_database=").append(quoteString(rqst.getDbname())).append(" AND cq_table=").append(quoteString(rqst.getTablename())).append(" AND ");
if (rqst.getPartitionname() == null) {
sb.append("cq_partition is null");
} else {
sb.append("cq_partition=").append(quoteString(rqst.getPartitionname()));
}
LOG.debug("Going to execute query <" + sb.toString() + ">");
ResultSet rs = stmt.executeQuery(sb.toString());
if (rs.next()) {
long enqueuedId = rs.getLong(1);
String state = compactorStateToResponse(rs.getString(2).charAt(0));
LOG.info("Ignoring request to compact " + rqst.getDbname() + "/" + rqst.getTablename() + "/" + rqst.getPartitionname() + " since it is already " + quoteString(state) + " with id=" + enqueuedId);
return new CompactionResponse(enqueuedId, state, false);
}
close(rs);
StringBuilder buf = new StringBuilder("insert into COMPACTION_QUEUE (cq_id, cq_database, " + "cq_table, ");
String partName = rqst.getPartitionname();
if (partName != null)
buf.append("cq_partition, ");
buf.append("cq_state, cq_type");
if (rqst.getProperties() != null) {
buf.append(", cq_tblproperties");
}
if (rqst.getRunas() != null)
buf.append(", cq_run_as");
buf.append(") values (");
buf.append(id);
buf.append(", '");
buf.append(rqst.getDbname());
buf.append("', '");
buf.append(rqst.getTablename());
buf.append("', '");
if (partName != null) {
buf.append(partName);
buf.append("', '");
}
buf.append(INITIATED_STATE);
buf.append("', '");
switch(rqst.getType()) {
case MAJOR:
buf.append(MAJOR_TYPE);
break;
case MINOR:
buf.append(MINOR_TYPE);
break;
default:
LOG.debug("Going to rollback");
dbConn.rollback();
throw new MetaException("Unexpected compaction type " + rqst.getType().toString());
}
if (rqst.getProperties() != null) {
buf.append("', '");
buf.append(new StringableMap(rqst.getProperties()).toString());
}
if (rqst.getRunas() != null) {
buf.append("', '");
buf.append(rqst.getRunas());
}
buf.append("')");
String s = buf.toString();
LOG.debug("Going to execute update <" + s + ">");
stmt.executeUpdate(s);
LOG.debug("Going to commit");
dbConn.commit();
return new CompactionResponse(id, INITIATED_RESPONSE, true);
} catch (SQLException e) {
LOG.debug("Going to rollback");
rollbackDBConn(dbConn);
checkRetryable(dbConn, e, "compact(" + rqst + ")");
throw new MetaException("Unable to select from transaction database " + StringUtils.stringifyException(e));
} finally {
closeStmt(stmt);
closeDbConn(dbConn);
if (handle != null) {
handle.releaseLocks();
}
unlockInternal();
}
} catch (RetryException e) {
return compact(rqst);
}
}
Aggregations