use of org.apache.hadoop.hive.metastore.api.CompactionResponse in project hive by apache.
the class Initiator method requestCompaction.
private void requestCompaction(CompactionInfo ci, String runAs, CompactionType type) throws MetaException {
CompactionRequest rqst = new CompactionRequest(ci.dbname, ci.tableName, type);
if (ci.partName != null)
rqst.setPartitionname(ci.partName);
rqst.setRunas(runAs);
LOG.info("Requesting compaction: " + rqst);
CompactionResponse resp = txnHandler.compact(rqst);
if (resp.isAccepted()) {
ci.id = resp.getId();
}
}
use of org.apache.hadoop.hive.metastore.api.CompactionResponse in project hive by apache.
the class DDLTask method compact.
private int compact(Hive db, AlterTableSimpleDesc desc) throws HiveException {
Table tbl = db.getTable(desc.getTableName());
if (!AcidUtils.isTransactionalTable(tbl)) {
throw new HiveException(ErrorMsg.NONACID_COMPACTION_NOT_SUPPORTED, tbl.getDbName(), tbl.getTableName());
}
String partName = null;
if (desc.getPartSpec() == null) {
// Compaction can only be done on the whole table if the table is non-partitioned.
if (tbl.isPartitioned()) {
throw new HiveException(ErrorMsg.NO_COMPACTION_PARTITION);
}
} else {
Map<String, String> partSpec = desc.getPartSpec();
List<Partition> partitions = db.getPartitions(tbl, partSpec);
if (partitions.size() > 1) {
throw new HiveException(ErrorMsg.TOO_MANY_COMPACTION_PARTITIONS);
} else if (partitions.size() == 0) {
throw new HiveException(ErrorMsg.INVALID_PARTITION_SPEC);
}
partName = partitions.get(0).getName();
}
CompactionResponse resp = db.compact2(tbl.getDbName(), tbl.getTableName(), partName, desc.getCompactionType(), desc.getProps());
if (resp.isAccepted()) {
console.printInfo("Compaction enqueued with id " + resp.getId());
} else {
console.printInfo("Compaction already enqueued with id " + resp.getId() + "; State is " + resp.getState());
}
if (desc.isBlocking() && resp.isAccepted()) {
StringBuilder progressDots = new StringBuilder();
long waitTimeMs = 1000;
wait: while (true) {
// double wait time until 5min
waitTimeMs = waitTimeMs * 2;
waitTimeMs = waitTimeMs < 5 * 60 * 1000 ? waitTimeMs : 5 * 60 * 1000;
try {
Thread.sleep(waitTimeMs);
} catch (InterruptedException ex) {
console.printInfo("Interrupted while waiting for compaction with id=" + resp.getId());
break;
}
// this could be expensive when there are a lot of compactions....
// todo: update to search by ID once HIVE-13353 is done
ShowCompactResponse allCompactions = db.showCompactions();
for (ShowCompactResponseElement compaction : allCompactions.getCompacts()) {
if (resp.getId() != compaction.getId()) {
continue;
}
switch(compaction.getState()) {
case TxnStore.WORKING_RESPONSE:
case TxnStore.INITIATED_RESPONSE:
// still working
console.printInfo(progressDots.toString());
progressDots.append(".");
continue wait;
default:
// done
console.printInfo("Compaction with id " + resp.getId() + " finished with status: " + compaction.getState());
break wait;
}
}
}
}
return 0;
}
use of org.apache.hadoop.hive.metastore.api.CompactionResponse in project hive by apache.
the class TxnHandler method compact.
@Override
@RetrySemantics.Idempotent
public CompactionResponse compact(CompactionRequest rqst) throws MetaException {
// Put a compaction request in the queue.
try {
Connection dbConn = null;
Statement stmt = null;
TxnStore.MutexAPI.LockHandle handle = null;
try {
lockInternal();
/**
* MUTEX_KEY.CompactionScheduler lock ensures that there is only 1 entry in
* Initiated/Working state for any resource. This ensures that we don't run concurrent
* compactions for any resource.
*/
handle = getMutexAPI().acquireLock(MUTEX_KEY.CompactionScheduler.name());
dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
stmt = dbConn.createStatement();
long id = generateCompactionQueueId(stmt);
StringBuilder sb = new StringBuilder("select cq_id, cq_state from COMPACTION_QUEUE where").append(" cq_state IN(").append(quoteChar(INITIATED_STATE)).append(",").append(quoteChar(WORKING_STATE)).append(") AND cq_database=").append(quoteString(rqst.getDbname())).append(" AND cq_table=").append(quoteString(rqst.getTablename())).append(" AND ");
if (rqst.getPartitionname() == null) {
sb.append("cq_partition is null");
} else {
sb.append("cq_partition=").append(quoteString(rqst.getPartitionname()));
}
LOG.debug("Going to execute query <" + sb.toString() + ">");
ResultSet rs = stmt.executeQuery(sb.toString());
if (rs.next()) {
long enqueuedId = rs.getLong(1);
String state = compactorStateToResponse(rs.getString(2).charAt(0));
LOG.info("Ignoring request to compact " + rqst.getDbname() + "/" + rqst.getTablename() + "/" + rqst.getPartitionname() + " since it is already " + quoteString(state) + " with id=" + enqueuedId);
return new CompactionResponse(enqueuedId, state, false);
}
close(rs);
StringBuilder buf = new StringBuilder("insert into COMPACTION_QUEUE (cq_id, cq_database, " + "cq_table, ");
String partName = rqst.getPartitionname();
if (partName != null)
buf.append("cq_partition, ");
buf.append("cq_state, cq_type");
if (rqst.getProperties() != null) {
buf.append(", cq_tblproperties");
}
if (rqst.getRunas() != null)
buf.append(", cq_run_as");
buf.append(") values (");
buf.append(id);
buf.append(", '");
buf.append(rqst.getDbname());
buf.append("', '");
buf.append(rqst.getTablename());
buf.append("', '");
if (partName != null) {
buf.append(partName);
buf.append("', '");
}
buf.append(INITIATED_STATE);
buf.append("', '");
switch(rqst.getType()) {
case MAJOR:
buf.append(MAJOR_TYPE);
break;
case MINOR:
buf.append(MINOR_TYPE);
break;
default:
LOG.debug("Going to rollback");
dbConn.rollback();
throw new MetaException("Unexpected compaction type " + rqst.getType().toString());
}
if (rqst.getProperties() != null) {
buf.append("', '");
buf.append(new StringableMap(rqst.getProperties()).toString());
}
if (rqst.getRunas() != null) {
buf.append("', '");
buf.append(rqst.getRunas());
}
buf.append("')");
String s = buf.toString();
LOG.debug("Going to execute update <" + s + ">");
stmt.executeUpdate(s);
LOG.debug("Going to commit");
dbConn.commit();
return new CompactionResponse(id, INITIATED_RESPONSE, true);
} catch (SQLException e) {
LOG.debug("Going to rollback");
rollbackDBConn(dbConn);
checkRetryable(dbConn, e, "compact(" + rqst + ")");
throw new MetaException("Unable to select from transaction database " + StringUtils.stringifyException(e));
} finally {
closeStmt(stmt);
closeDbConn(dbConn);
if (handle != null) {
handle.releaseLocks();
}
unlockInternal();
}
} catch (RetryException e) {
return compact(rqst);
}
}
use of org.apache.hadoop.hive.metastore.api.CompactionResponse in project hive by apache.
the class TestTxnHandler method testCompactWhenAlreadyCompacting.
/**
* Once a Compaction for a given resource is scheduled/working, we should not
* schedule another one to prevent concurrent compactions for the same resource.
* @throws Exception
*/
@Test
public void testCompactWhenAlreadyCompacting() throws Exception {
CompactionRequest rqst = new CompactionRequest("foo", "bar", CompactionType.MAJOR);
rqst.setPartitionname("ds=today");
CompactionResponse resp = txnHandler.compact(rqst);
Assert.assertEquals(resp, new CompactionResponse(1, TxnStore.INITIATED_RESPONSE, true));
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
List<ShowCompactResponseElement> compacts = rsp.getCompacts();
assertEquals(1, compacts.size());
rqst.setType(CompactionType.MINOR);
resp = txnHandler.compact(rqst);
Assert.assertEquals(resp, new CompactionResponse(1, TxnStore.INITIATED_RESPONSE, false));
rsp = txnHandler.showCompact(new ShowCompactRequest());
compacts = rsp.getCompacts();
assertEquals(1, compacts.size());
ShowCompactResponseElement c = compacts.get(0);
assertEquals("foo", c.getDbname());
assertEquals("bar", c.getTablename());
assertEquals("ds=today", c.getPartitionname());
assertEquals(CompactionType.MAJOR, c.getType());
assertEquals("initiated", c.getState());
assertEquals(0L, c.getStart());
}
Aggregations