use of org.apache.hadoop.hive.metastore.api.InvalidObjectException in project hive by apache.
the class HiveAlterHandler method alterTableUpdateTableColumnStats.
@VisibleForTesting
void alterTableUpdateTableColumnStats(RawStore msdb, Table oldTable, Table newTable) throws MetaException, InvalidObjectException {
String dbName = oldTable.getDbName().toLowerCase();
String tableName = org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier(oldTable.getTableName());
String newDbName = newTable.getDbName().toLowerCase();
String newTableName = org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier(newTable.getTableName());
try {
List<FieldSchema> oldCols = oldTable.getSd().getCols();
List<FieldSchema> newCols = newTable.getSd().getCols();
List<ColumnStatisticsObj> newStatsObjs = new ArrayList<>();
ColumnStatistics colStats = null;
boolean updateColumnStats = true;
// Nothing to update if everything is the same
if (newDbName.equals(dbName) && newTableName.equals(tableName) && MetaStoreUtils.columnsIncludedByNameType(oldCols, newCols)) {
updateColumnStats = false;
}
if (updateColumnStats) {
List<String> oldColNames = new ArrayList<>(oldCols.size());
for (FieldSchema oldCol : oldCols) {
oldColNames.add(oldCol.getName());
}
// Collect column stats which need to be rewritten and remove old stats
colStats = msdb.getTableColumnStatistics(dbName, tableName, oldColNames);
if (colStats == null) {
updateColumnStats = false;
} else {
List<ColumnStatisticsObj> statsObjs = colStats.getStatsObj();
if (statsObjs != null) {
List<String> deletedCols = new ArrayList<>();
for (ColumnStatisticsObj statsObj : statsObjs) {
boolean found = false;
for (FieldSchema newCol : newCols) {
if (statsObj.getColName().equalsIgnoreCase(newCol.getName()) && statsObj.getColType().equalsIgnoreCase(newCol.getType())) {
found = true;
break;
}
}
if (found) {
if (!newDbName.equals(dbName) || !newTableName.equals(tableName)) {
msdb.deleteTableColumnStatistics(dbName, tableName, statsObj.getColName());
newStatsObjs.add(statsObj);
deletedCols.add(statsObj.getColName());
}
} else {
msdb.deleteTableColumnStatistics(dbName, tableName, statsObj.getColName());
deletedCols.add(statsObj.getColName());
}
}
StatsSetupConst.removeColumnStatsState(newTable.getParameters(), deletedCols);
}
}
}
// Change to new table and append stats for the new table
msdb.alterTable(dbName, tableName, newTable);
if (updateColumnStats && !newStatsObjs.isEmpty()) {
ColumnStatisticsDesc statsDesc = colStats.getStatsDesc();
statsDesc.setDbName(newDbName);
statsDesc.setTableName(newTableName);
colStats.setStatsObj(newStatsObjs);
msdb.updateTableColumnStatistics(colStats);
}
} catch (NoSuchObjectException nsoe) {
LOG.debug("Could not find db entry." + nsoe);
} catch (InvalidInputException e) {
// should not happen since the input were verified before passed in
throw new InvalidObjectException("Invalid inputs to update table column stats: " + e);
}
}
use of org.apache.hadoop.hive.metastore.api.InvalidObjectException in project hive by apache.
the class HCatClientHMSImpl method addPartition.
@Override
public void addPartition(HCatAddPartitionDesc partInfo) throws HCatException {
Table tbl = null;
try {
tbl = hmsClient.getTable(partInfo.getDatabaseName(), partInfo.getTableName());
// TODO: Should be moved out.
if (tbl.getPartitionKeysSize() == 0) {
throw new HCatException("The table " + partInfo.getTableName() + " is not partitioned.");
}
HCatTable hcatTable = new HCatTable(tbl);
HCatPartition hcatPartition = partInfo.getHCatPartition();
// This is only required to support the deprecated methods in HCatAddPartitionDesc.Builder.
if (hcatPartition == null) {
hcatPartition = partInfo.getHCatPartition(hcatTable);
}
hmsClient.add_partition(hcatPartition.toHivePartition());
} catch (InvalidObjectException e) {
throw new HCatException("InvalidObjectException while adding partition.", e);
} catch (AlreadyExistsException e) {
throw new HCatException("AlreadyExistsException while adding partition.", e);
} catch (MetaException e) {
throw new HCatException("MetaException while adding partition.", e);
} catch (NoSuchObjectException e) {
throw new ObjectNotFoundException("The table " + partInfo.getTableName() + " is could not be found.", e);
} catch (TException e) {
throw new ConnectionFailureException("TException while adding partition.", e);
}
}
use of org.apache.hadoop.hive.metastore.api.InvalidObjectException in project hive by apache.
the class HCatClientHMSImpl method addPartitions.
/*
* @param partInfoList
* @return The size of the list of partitions.
* @throws HCatException,ConnectionFailureException
* @see org.apache.hive.hcatalog.api.HCatClient#addPartitions(java.util.List)
*/
@Override
public int addPartitions(List<HCatAddPartitionDesc> partInfoList) throws HCatException {
int numPartitions = -1;
if ((partInfoList == null) || (partInfoList.size() == 0)) {
throw new HCatException("The partition list is null or empty.");
}
Table tbl = null;
try {
tbl = hmsClient.getTable(partInfoList.get(0).getDatabaseName(), partInfoList.get(0).getTableName());
HCatTable hcatTable = new HCatTable(tbl);
ArrayList<Partition> ptnList = new ArrayList<Partition>();
for (HCatAddPartitionDesc desc : partInfoList) {
HCatPartition hCatPartition = desc.getHCatPartition();
// This is required only to support the deprecated HCatAddPartitionDesc.Builder interfaces.
if (hCatPartition == null) {
hCatPartition = desc.getHCatPartition(hcatTable);
}
ptnList.add(hCatPartition.toHivePartition());
}
numPartitions = hmsClient.add_partitions(ptnList);
} catch (InvalidObjectException e) {
throw new HCatException("InvalidObjectException while adding partition.", e);
} catch (AlreadyExistsException e) {
throw new HCatException("AlreadyExistsException while adding partition.", e);
} catch (MetaException e) {
throw new HCatException("MetaException while adding partition.", e);
} catch (NoSuchObjectException e) {
throw new ObjectNotFoundException("The table " + partInfoList.get(0).getTableName() + " is could not be found.", e);
} catch (TException e) {
throw new ConnectionFailureException("TException while adding partition.", e);
}
return numPartitions;
}
use of org.apache.hadoop.hive.metastore.api.InvalidObjectException in project hive by apache.
the class HBaseStore method grantPrivileges.
@Override
public boolean grantPrivileges(PrivilegeBag privileges) throws InvalidObjectException, MetaException, NoSuchObjectException {
boolean commit = false;
openTransaction();
try {
for (HiveObjectPrivilege priv : privileges.getPrivileges()) {
// Locate the right object to deal with
PrivilegeInfo privilegeInfo = findPrivilegeToGrantOrRevoke(priv);
// Now, let's see if we've already got this privilege
for (PrivilegeGrantInfo info : privilegeInfo.grants) {
if (info.getPrivilege().equals(priv.getGrantInfo().getPrivilege())) {
throw new InvalidObjectException(priv.getPrincipalName() + " already has " + priv.getGrantInfo().getPrivilege() + " on " + privilegeInfo.typeErrMsg);
}
}
privilegeInfo.grants.add(priv.getGrantInfo());
writeBackGrantOrRevoke(priv, privilegeInfo);
}
commit = true;
return true;
} finally {
commitOrRoleBack(commit);
}
}
use of org.apache.hadoop.hive.metastore.api.InvalidObjectException in project hive by apache.
the class ObjectStore method alterIndex.
@Override
public void alterIndex(String dbname, String baseTblName, String name, Index newIndex) throws InvalidObjectException, MetaException {
boolean success = false;
try {
openTransaction();
name = HiveStringUtils.normalizeIdentifier(name);
baseTblName = HiveStringUtils.normalizeIdentifier(baseTblName);
dbname = HiveStringUtils.normalizeIdentifier(dbname);
MIndex newi = convertToMIndex(newIndex);
if (newi == null) {
throw new InvalidObjectException("new index is invalid");
}
MIndex oldi = getMIndex(dbname, baseTblName, name);
if (oldi == null) {
throw new MetaException("index " + name + " doesn't exist");
}
// For now only alter parameters are allowed
oldi.setParameters(newi.getParameters());
// commit the changes
success = commitTransaction();
} finally {
if (!success) {
rollbackTransaction();
}
}
}
Aggregations