use of org.apache.hadoop.hive.metastore.api.CompactionInfoStruct in project hive by apache.
the class TxnHandler method getLatestCommittedCompactionInfo.
/**
* We assume this is only called by metadata cache server to know if there are new base/delta files should be read.
* The query filters compactions by state and only returns SUCCEEDED or READY_FOR_CLEANING compactions because
* only these two states means there are new files ready to be read.
*/
@RetrySemantics.ReadOnly
public GetLatestCommittedCompactionInfoResponse getLatestCommittedCompactionInfo(GetLatestCommittedCompactionInfoRequest rqst) throws MetaException {
GetLatestCommittedCompactionInfoResponse response = new GetLatestCommittedCompactionInfoResponse(new ArrayList<>());
Connection dbConn = null;
PreparedStatement pst = null;
ResultSet rs = null;
try {
try {
dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
List<String> params = new ArrayList<>();
// This query combines the result sets of SUCCEEDED compactions and READY_FOR_CLEANING compactions
// We also sort the result by CC_ID in descending order so that we can keep only the latest record
// according to the order in result set
StringBuilder sb = new StringBuilder().append("SELECT * FROM (").append(" SELECT").append(" \"CC_ID\", \"CC_DATABASE\", \"CC_TABLE\", \"CC_PARTITION\", \"CC_TYPE\"").append(" FROM \"COMPLETED_COMPACTIONS\"").append(" WHERE \"CC_STATE\" = " + quoteChar(SUCCEEDED_STATE)).append(" UNION ALL").append(" SELECT").append(" \"CQ_ID\" AS \"CC_ID\", \"CQ_DATABASE\" AS \"CC_DATABASE\"").append(" ,\"CQ_TABLE\" AS \"CC_TABLE\", \"CQ_PARTITION\" AS \"CC_PARTITION\"").append(" ,\"CQ_TYPE\" AS \"CC_TYPE\"").append(" FROM \"COMPACTION_QUEUE\"").append(" WHERE \"CQ_STATE\" = " + quoteChar(READY_FOR_CLEANING)).append(") AS compactions ").append(" WHERE \"CC_DATABASE\" = ? AND \"CC_TABLE\" = ?");
params.add(rqst.getDbname());
params.add(rqst.getTablename());
if (rqst.getPartitionnamesSize() > 0) {
sb.append(" AND \"CC_PARTITION\" IN (");
sb.append(String.join(",", Collections.nCopies(rqst.getPartitionnamesSize(), "?")));
sb.append(")");
params.addAll(rqst.getPartitionnames());
}
if (rqst.isSetLastCompactionId()) {
sb.append(" AND \"CC_ID\" > ?");
params.add(String.valueOf(rqst.getLastCompactionId()));
}
sb.append(" ORDER BY \"CC_ID\" DESC");
pst = sqlGenerator.prepareStmtWithParameters(dbConn, sb.toString(), params);
LOG.debug("Going to execute query <" + sb.toString() + ">");
rs = pst.executeQuery();
Set<String> partitionSet = new HashSet<>();
while (rs.next()) {
CompactionInfoStruct lci = new CompactionInfoStruct();
lci.setId(rs.getLong(1));
lci.setDbname(rs.getString(2));
lci.setTablename(rs.getString(3));
String partition = rs.getString(4);
if (!rs.wasNull()) {
lci.setPartitionname(partition);
}
lci.setType(dbCompactionType2ThriftType(rs.getString(5).charAt(0)));
// Only put the latest record of each partition into response
if (!partitionSet.contains(partition)) {
response.addToCompactions(lci);
partitionSet.add(partition);
}
}
} catch (SQLException e) {
LOG.error("Unable to execute query " + e.getMessage());
checkRetryable(e, "getLatestCommittedCompactionInfo");
} finally {
close(rs, pst, dbConn);
}
return response;
} catch (RetryException e) {
return getLatestCommittedCompactionInfo(rqst);
}
}
use of org.apache.hadoop.hive.metastore.api.CompactionInfoStruct in project hive by apache.
the class CompactionInfo method compactionInfoToOptionalStruct.
public static OptionalCompactionInfoStruct compactionInfoToOptionalStruct(CompactionInfo ci) {
CompactionInfoStruct cis = compactionInfoToStruct(ci);
OptionalCompactionInfoStruct ocis = new OptionalCompactionInfoStruct();
if (cis != null) {
ocis.setCi(cis);
}
return ocis;
}
use of org.apache.hadoop.hive.metastore.api.CompactionInfoStruct in project hive by apache.
the class CompactionInfo method compactionInfoToStruct.
public static CompactionInfoStruct compactionInfoToStruct(CompactionInfo ci) {
if (ci == null) {
return null;
}
CompactionInfoStruct cr = new CompactionInfoStruct(ci.id, ci.dbname, ci.tableName, ci.type);
cr.setPartitionname(ci.partName);
cr.setRunas(ci.runAs);
cr.setProperties(ci.properties);
cr.setToomanyaborts(ci.tooManyAborts);
cr.setHasoldabort(ci.hasOldAbort);
cr.setStart(ci.start);
cr.setState(Character.toString(ci.state));
cr.setWorkerId(ci.workerId);
cr.setHighestWriteId(ci.highestWriteId);
cr.setErrorMessage(ci.errorMessage);
cr.setEnqueueTime(ci.enqueueTime);
return cr;
}
Aggregations