use of org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement in project hive by apache.
the class TestCompactionMetrics method testWorkingAgeMetricsOrder.
@Test
public void testWorkingAgeMetricsOrder() {
ShowCompactResponse scr = new ShowCompactResponse();
long start = System.currentTimeMillis();
List<ShowCompactResponseElement> elements = ImmutableList.of(generateElement(15, "db3", "tb5", null, CompactionType.MINOR, TxnStore.WORKING_RESPONSE, start, false, "4.0.0", "4.0.0", start - 1_000L), generateElement(16, "db3", "tb6", null, CompactionType.MINOR, TxnStore.WORKING_RESPONSE, start, false, "4.0.0", "4.0.0", start - 15_000L));
scr.setCompacts(elements);
AcidMetricService.updateMetricsFromShowCompact(scr);
// Check that the age is older than 10s
Assert.assertTrue(Metrics.getOrCreateGauge(MetricsConstants.COMPACTION_OLDEST_WORKING_AGE).intValue() > 10);
// Check the reverse order
elements = ImmutableList.of(generateElement(16, "db3", "tb6", null, CompactionType.MINOR, TxnStore.WORKING_RESPONSE, start, false, "4.0.0", "4.0.0", start - 25_000L), generateElement(15, "db3", "tb5", null, CompactionType.MINOR, TxnStore.WORKING_RESPONSE, start, false, "4.0.0", "4.0.0", start - 1_000L));
scr.setCompacts(elements);
AcidMetricService.updateMetricsFromShowCompact(scr);
// Check that the age is older than 20s
Assert.assertTrue(Metrics.getOrCreateGauge(MetricsConstants.COMPACTION_OLDEST_WORKING_AGE).intValue() > 20);
}
use of org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement in project hive by apache.
the class FilterUtils method filterCompactionsIfEnabled.
/**
* Filter the list of compactions if filtering is enabled. Otherwise, return original list
*
* @param isFilterEnabled true: filtering is enabled; false: filtering is disabled.
* @param filterHook the object that does filtering
* @param compactions the list of compactions
* @return the list of compactions that user has access or original list if filtering is disabled;
* @throws MetaException
*/
public static List<ShowCompactResponseElement> filterCompactionsIfEnabled(boolean isFilterEnabled, MetaStoreFilterHook filterHook, String catName, List<ShowCompactResponseElement> compactions) throws MetaException {
if (isFilterEnabled) {
List<ShowCompactResponseElement> result = new ArrayList<>(compactions.size());
// DBName -> List of TableNames map used for checking access rights for non partitioned tables
Map<String, List<String>> nonPartTables = new HashMap<>();
// DBName -> TableName -> List of PartitionNames map used for checking access rights for
// partitioned tables
Map<String, Map<String, List<String>>> partTables = new HashMap<>();
for (ShowCompactResponseElement c : compactions) {
if (c.getPartitionname() == null) {
nonPartTables.computeIfAbsent(c.getDbname(), k -> new ArrayList<>());
if (!nonPartTables.get(c.getDbname()).contains(c.getTablename())) {
nonPartTables.get(c.getDbname()).add(c.getTablename());
}
} else {
partTables.computeIfAbsent(c.getDbname(), k -> new HashMap<>());
partTables.get(c.getDbname()).computeIfAbsent(c.getTablename(), k -> new ArrayList<>());
if (!partTables.get(c.getDbname()).get(c.getTablename()).contains(c.getPartitionname())) {
partTables.get(c.getDbname()).get(c.getTablename()).add(c.getPartitionname());
}
}
}
// Checking non partitioned table access rights
for (Map.Entry<String, List<String>> e : nonPartTables.entrySet()) {
nonPartTables.put(e.getKey(), filterHook.filterTableNames(catName, e.getKey(), e.getValue()));
}
// Checking partitioned table access rights
for (Map.Entry<String, Map<String, List<String>>> dbName : partTables.entrySet()) {
for (Map.Entry<String, List<String>> tableName : dbName.getValue().entrySet()) {
dbName.getValue().put(tableName.getKey(), filterHook.filterPartitionNames(catName, dbName.getKey(), tableName.getKey(), tableName.getValue()));
}
}
// Add the compactions to the response only we have access right
for (ShowCompactResponseElement c : compactions) {
if (c.getPartitionname() == null) {
if (nonPartTables.get(c.getDbname()).contains(c.getTablename())) {
result.add(c);
}
} else {
if (partTables.get(c.getDbname()).get(c.getTablename()).contains(c.getPartitionname())) {
result.add(c);
}
}
}
return result;
} else {
return compactions;
}
}
use of org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement in project hive by apache.
the class PreUpgradeTool method prepareAcidUpgradeInternal.
/*
* todo: change script comments to a preamble instead of a footer
*/
private void prepareAcidUpgradeInternal() throws HiveException, TException, IOException {
if (!isAcidEnabled(conf)) {
LOG.info("acid is off, there can't be any acid tables - nothing to compact");
return;
}
IMetaStoreClient hms = metaStoreClient.get();
LOG.debug("Looking for databases");
String exceptionMsg = null;
List<String> databases;
CompactTablesState compactTablesState;
try {
// TException
databases = hms.getDatabases(runOptions.getDbRegex());
LOG.debug("Found " + databases.size() + " databases to process");
ForkJoinPool processTablePool = new ForkJoinPool(runOptions.getTablePoolSize(), new NamedForkJoinWorkerThreadFactory("Table-"), getUncaughtExceptionHandler(), false);
compactTablesState = databases.stream().map(dbName -> processDatabase(dbName, processTablePool, runOptions)).reduce(CompactTablesState::merge).orElse(CompactTablesState.empty());
} catch (Exception e) {
if (isAccessControlException(e)) {
exceptionMsg = "Unable to get databases. Pre-upgrade tool requires read-access " + "to databases and tables to determine if a table has to be compacted. " + "Set " + HiveConf.ConfVars.HIVE_METASTORE_AUTHORIZATION_AUTH_READS.varname + " config to " + "false to allow read-access to databases and tables and retry the pre-upgrade tool again..";
}
throw new HiveException(exceptionMsg, e);
}
makeCompactionScript(compactTablesState, runOptions.getOutputDir());
if (runOptions.isExecute()) {
while (compactTablesState.getMetaInfo().getCompactionIds().size() > 0) {
LOG.debug("Will wait for " + compactTablesState.getMetaInfo().getCompactionIds().size() + " compactions to complete");
ShowCompactResponse resp = hms.showCompactions();
for (ShowCompactResponseElement e : resp.getCompacts()) {
final String state = e.getState();
boolean removed;
switch(state) {
case TxnStore.CLEANING_RESPONSE:
case TxnStore.SUCCEEDED_RESPONSE:
removed = compactTablesState.getMetaInfo().getCompactionIds().remove(e.getId());
if (removed) {
LOG.debug("Required compaction succeeded: " + e.toString());
}
break;
case TxnStore.ATTEMPTED_RESPONSE:
case TxnStore.FAILED_RESPONSE:
removed = compactTablesState.getMetaInfo().getCompactionIds().remove(e.getId());
if (removed) {
LOG.warn("Required compaction failed: " + e.toString());
}
break;
case TxnStore.INITIATED_RESPONSE:
// LOG.debug("Still waiting on: " + e.toString());
break;
case TxnStore.WORKING_RESPONSE:
LOG.debug("Still working on: " + e.toString());
break;
default:
// shouldn't be any others
LOG.error("Unexpected state for : " + e.toString());
}
}
if (compactTablesState.getMetaInfo().getCompactionIds().size() > 0) {
try {
if (callback != null) {
callback.onWaitForCompaction();
}
Thread.sleep(pollIntervalMs);
} catch (InterruptedException ex) {
// this only responds to ^C
}
}
}
}
}
use of org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement in project hive by apache.
the class TestInitiator method compactPartitionHighDeltaPct.
@Test
public void compactPartitionHighDeltaPct() throws Exception {
Table t = newTable("default", "cphdp", true);
Partition p = newPartition(t, "today");
addBaseFile(t, p, 20L, 20);
addDeltaFile(t, p, 21L, 22L, 2);
addDeltaFile(t, p, 23L, 24L, 2);
burnThroughTransactions("default", "cphdp", 23);
long txnid = openTxn();
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.PARTITION, "default");
comp.setTablename("cphdp");
comp.setPartitionname("ds=today");
comp.setOperationType(DataOperationType.UPDATE);
List<LockComponent> components = new ArrayList<LockComponent>(1);
components.add(comp);
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
LockResponse res = txnHandler.lock(req);
long writeid = allocateWriteId("default", "cphdp", txnid);
Assert.assertEquals(24, writeid);
txnHandler.commitTxn(new CommitTxnRequest(txnid));
startInitiator();
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
List<ShowCompactResponseElement> compacts = rsp.getCompacts();
Assert.assertEquals(1, compacts.size());
Assert.assertEquals("initiated", compacts.get(0).getState());
Assert.assertEquals("cphdp", compacts.get(0).getTablename());
Assert.assertEquals("ds=today", compacts.get(0).getPartitionname());
Assert.assertEquals(CompactionType.MAJOR, compacts.get(0).getType());
}
use of org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement in project hive by apache.
the class TestInitiator method compactPartitionTooManyDeltas.
@Test
public void compactPartitionTooManyDeltas() throws Exception {
Table t = newTable("default", "cptmd", true);
Partition p = newPartition(t, "today");
addBaseFile(t, p, 200L, 200);
addDeltaFile(t, p, 201L, 201L, 1);
addDeltaFile(t, p, 202L, 202L, 1);
addDeltaFile(t, p, 203L, 203L, 1);
addDeltaFile(t, p, 204L, 204L, 1);
addDeltaFile(t, p, 205L, 205L, 1);
addDeltaFile(t, p, 206L, 206L, 1);
addDeltaFile(t, p, 207L, 207L, 1);
addDeltaFile(t, p, 208L, 208L, 1);
addDeltaFile(t, p, 209L, 209L, 1);
addDeltaFile(t, p, 210L, 210L, 1);
addDeltaFile(t, p, 211L, 211L, 1);
burnThroughTransactions("default", "cptmd", 210);
long txnid = openTxn();
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.PARTITION, "default");
comp.setTablename("cptmd");
comp.setPartitionname("ds=today");
comp.setOperationType(DataOperationType.UPDATE);
List<LockComponent> components = new ArrayList<LockComponent>(1);
components.add(comp);
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
LockResponse res = txnHandler.lock(req);
long writeid = allocateWriteId("default", "cptmd", txnid);
Assert.assertEquals(211, writeid);
txnHandler.commitTxn(new CommitTxnRequest(txnid));
startInitiator();
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
List<ShowCompactResponseElement> compacts = rsp.getCompacts();
Assert.assertEquals(1, compacts.size());
Assert.assertEquals("initiated", compacts.get(0).getState());
Assert.assertEquals("cptmd", compacts.get(0).getTablename());
Assert.assertEquals("ds=today", compacts.get(0).getPartitionname());
Assert.assertEquals(CompactionType.MINOR, compacts.get(0).getType());
}
Aggregations