use of org.apache.hadoop.hive.metastore.MsckInfo in project hive by apache.
the class MsckOperation method execute.
@Override
public int execute() throws HiveException, IOException, TException {
try {
Msck msck = new Msck(false, false);
msck.init(Msck.getMsckConf(context.getDb().getConf()));
msck.updateExpressionProxy(getProxyClass(context.getDb().getConf()));
TableName tableName = HiveTableName.of(desc.getTableName());
long partitionExpirySeconds = -1L;
try (HiveMetaStoreClient msc = new HiveMetaStoreClient(context.getConf())) {
boolean msckEnablePartitionRetention = MetastoreConf.getBoolVar(context.getConf(), MetastoreConf.ConfVars.MSCK_REPAIR_ENABLE_PARTITION_RETENTION);
if (msckEnablePartitionRetention) {
Table table = msc.getTable(SessionState.get().getCurrentCatalog(), tableName.getDb(), tableName.getTable());
String qualifiedTableName = Warehouse.getCatalogQualifiedTableName(table);
partitionExpirySeconds = PartitionManagementTask.getRetentionPeriodInSeconds(table);
LOG.info("{} - Retention period ({}s) for partition is enabled for MSCK REPAIR..", qualifiedTableName, partitionExpirySeconds);
}
}
MsckInfo msckInfo = new MsckInfo(SessionState.get().getCurrentCatalog(), tableName.getDb(), tableName.getTable(), desc.getFilterExp(), desc.getResFile(), desc.isRepairPartitions(), desc.isAddPartitions(), desc.isDropPartitions(), partitionExpirySeconds);
return msck.repair(msckInfo);
} catch (MetaException e) {
LOG.error("Unable to create msck instance.", e);
return 1;
} catch (SemanticException e) {
LOG.error("Msck failed.", e);
return 1;
}
}
Aggregations