use of org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement in project hive by apache.
the class ShowCompactionsOperation method execute.
@Override
public int execute() throws HiveException {
SessionState sessionState = SessionState.get();
// Call the metastore to get the status of all known compactions (completed get purged eventually)
ShowCompactResponse rsp = context.getDb().showCompactions();
// Write the results into the file
try (DataOutputStream os = ShowUtils.getOutputStream(new Path(desc.getResFile()), context)) {
// Write a header for cliDriver
if (!sessionState.isHiveServerQuery()) {
writeHeader(os);
}
if (rsp.getCompacts() != null) {
for (ShowCompactResponseElement e : rsp.getCompacts()) {
writeRow(os, e);
}
}
} catch (IOException e) {
LOG.warn("show compactions: ", e);
return 1;
}
return 0;
}
use of org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement in project hive by apache.
the class Initiator method foundCurrentOrFailedCompactions.
private boolean foundCurrentOrFailedCompactions(ShowCompactResponse compactions, CompactionInfo ci) throws MetaException {
if (compactions.getCompacts() == null) {
return false;
}
List<ShowCompactResponseElement> filteredElements = compactions.getCompacts().stream().filter(e -> e.getDbname().equals(ci.dbname) && e.getTablename().equals(ci.tableName) && (e.getPartitionname() == null && ci.partName == null || e.getPartitionname().equals(ci.partName))).collect(Collectors.toList());
// Figure out if there are any currently running compactions on the same table or partition.
if (filteredElements.stream().anyMatch(e -> TxnStore.WORKING_RESPONSE.equals(e.getState()) || TxnStore.INITIATED_RESPONSE.equals(e.getState()))) {
LOG.info("Found currently initiated or working compaction for " + ci.getFullPartitionName() + " so we will not initiate another compaction");
return true;
}
// Check if there is already sufficient number of consecutive failures for this table/partition
// so that no new automatic compactions needs to be scheduled.
int failedThreshold = MetastoreConf.getIntVar(conf, MetastoreConf.ConfVars.COMPACTOR_INITIATOR_FAILED_THRESHOLD);
LongSummaryStatistics failedStats = filteredElements.stream().filter(e -> TxnStore.SUCCEEDED_RESPONSE.equals(e.getState()) || TxnStore.FAILED_RESPONSE.equals(e.getState())).sorted(Comparator.comparingLong(ShowCompactResponseElement::getId).reversed()).limit(failedThreshold).filter(e -> TxnStore.FAILED_RESPONSE.equals(e.getState())).collect(Collectors.summarizingLong(ShowCompactResponseElement::getEnqueueTime));
// If the last attempt was too long ago, ignore the failed threshold and try compaction again
long retryTime = MetastoreConf.getTimeVar(conf, MetastoreConf.ConfVars.COMPACTOR_INITIATOR_FAILED_RETRY_TIME, TimeUnit.MILLISECONDS);
boolean needsRetry = (retryTime > 0) && (failedStats.getMax() + retryTime < System.currentTimeMillis());
if (failedStats.getCount() == failedThreshold && !needsRetry) {
LOG.warn("Will not initiate compaction for " + ci.getFullPartitionName() + " since last " + MetastoreConf.ConfVars.COMPACTOR_INITIATOR_FAILED_THRESHOLD + " attempts to compact it failed.");
ci.errorMessage = "Compaction is not initiated since last " + MetastoreConf.ConfVars.COMPACTOR_INITIATOR_FAILED_THRESHOLD + " consecutive compaction attempts failed)";
txnHandler.markFailed(ci);
return true;
}
return false;
}
use of org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement in project hive by apache.
the class TestTxnHandler method testCompactWhenAlreadyCompacting.
/**
* Once a Compaction for a given resource is scheduled/working, we should not
* schedule another one to prevent concurrent compactions for the same resource.
* @throws Exception
*/
@Test
public void testCompactWhenAlreadyCompacting() throws Exception {
CompactionRequest rqst = new CompactionRequest("foo", "bar", CompactionType.MAJOR);
rqst.setPartitionname("ds=today");
CompactionResponse resp = txnHandler.compact(rqst);
Assert.assertEquals(resp, new CompactionResponse(1, TxnStore.INITIATED_RESPONSE, true));
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
List<ShowCompactResponseElement> compacts = rsp.getCompacts();
assertEquals(1, compacts.size());
rqst.setType(CompactionType.MINOR);
resp = txnHandler.compact(rqst);
Assert.assertEquals(resp, new CompactionResponse(1, TxnStore.INITIATED_RESPONSE, false));
rsp = txnHandler.showCompact(new ShowCompactRequest());
compacts = rsp.getCompacts();
assertEquals(1, compacts.size());
ShowCompactResponseElement c = compacts.get(0);
assertEquals("foo", c.getDbname());
assertEquals("bar", c.getTablename());
assertEquals("ds=today", c.getPartitionname());
assertEquals(CompactionType.MAJOR, c.getType());
assertEquals("initiated", c.getState());
assertEquals(0L, c.getStart());
}
use of org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement in project hive by apache.
the class TestCompactionTxnHandler method countCompactionsInHistory.
private void countCompactionsInHistory(String dbName, String tableName, String partition, int expectedSucceeded, int expectedFailed, int expectedDidNotInitiate) throws MetaException {
ShowCompactResponse resp = txnHandler.showCompact(new ShowCompactRequest());
List<ShowCompactResponseElement> filteredToPartition = resp.getCompacts().stream().filter(e -> e.getDbname().equals(dbName) && e.getTablename().equals(tableName) && (partition == null || partition.equals(e.getPartitionname()))).collect(Collectors.toList());
assertEquals(expectedSucceeded, filteredToPartition.stream().filter(e -> e.getState().equals(TxnStore.SUCCEEDED_RESPONSE)).count());
assertEquals(expectedFailed, filteredToPartition.stream().filter(e -> e.getState().equals(TxnStore.FAILED_RESPONSE)).count());
assertEquals(expectedDidNotInitiate, filteredToPartition.stream().filter(e -> e.getState().equals(TxnStore.DID_NOT_INITIATE_RESPONSE)).count());
}
use of org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement in project hive by apache.
the class TestCompactionTxnHandler method testFindNextToClean.
@Test
public void testFindNextToClean() throws Exception {
CompactionRequest rqst = new CompactionRequest("foo", "bar", CompactionType.MINOR);
rqst.setPartitionname("ds=today");
txnHandler.compact(rqst);
assertEquals(0, txnHandler.findReadyToClean(0, 0).size());
CompactionInfo ci = txnHandler.findNextToCompact(aFindNextCompactRequest("fred", WORKER_VERSION));
assertNotNull(ci);
ci.highestWriteId = 41;
txnHandler.updateCompactorState(ci, 0);
txnHandler.markCompacted(ci);
assertNull(txnHandler.findNextToCompact(aFindNextCompactRequest("fred", WORKER_VERSION)));
List<CompactionInfo> toClean = txnHandler.findReadyToClean(0, 0);
assertEquals(1, toClean.size());
assertNull(txnHandler.findNextToCompact(aFindNextCompactRequest("fred", WORKER_VERSION)));
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
List<ShowCompactResponseElement> compacts = rsp.getCompacts();
assertEquals(1, compacts.size());
ShowCompactResponseElement c = compacts.get(0);
assertEquals("foo", c.getDbname());
assertEquals("bar", c.getTablename());
assertEquals("ds=today", c.getPartitionname());
assertEquals(CompactionType.MINOR, c.getType());
assertEquals("ready for cleaning", c.getState());
assertNull(c.getWorkerid());
}
Aggregations