use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project hive by apache.
the class HiveAlterHandler method alterTableUpdateTableColumnStats.
@VisibleForTesting
void alterTableUpdateTableColumnStats(RawStore msdb, Table oldTable, Table newTable) throws MetaException, InvalidObjectException {
String dbName = oldTable.getDbName().toLowerCase();
String tableName = org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier(oldTable.getTableName());
String newDbName = newTable.getDbName().toLowerCase();
String newTableName = org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier(newTable.getTableName());
try {
List<FieldSchema> oldCols = oldTable.getSd().getCols();
List<FieldSchema> newCols = newTable.getSd().getCols();
List<ColumnStatisticsObj> newStatsObjs = new ArrayList<>();
ColumnStatistics colStats = null;
boolean updateColumnStats = true;
// Nothing to update if everything is the same
if (newDbName.equals(dbName) && newTableName.equals(tableName) && MetaStoreUtils.columnsIncludedByNameType(oldCols, newCols)) {
updateColumnStats = false;
}
if (updateColumnStats) {
List<String> oldColNames = new ArrayList<>(oldCols.size());
for (FieldSchema oldCol : oldCols) {
oldColNames.add(oldCol.getName());
}
// Collect column stats which need to be rewritten and remove old stats
colStats = msdb.getTableColumnStatistics(dbName, tableName, oldColNames);
if (colStats == null) {
updateColumnStats = false;
} else {
List<ColumnStatisticsObj> statsObjs = colStats.getStatsObj();
if (statsObjs != null) {
List<String> deletedCols = new ArrayList<>();
for (ColumnStatisticsObj statsObj : statsObjs) {
boolean found = false;
for (FieldSchema newCol : newCols) {
if (statsObj.getColName().equalsIgnoreCase(newCol.getName()) && statsObj.getColType().equalsIgnoreCase(newCol.getType())) {
found = true;
break;
}
}
if (found) {
if (!newDbName.equals(dbName) || !newTableName.equals(tableName)) {
msdb.deleteTableColumnStatistics(dbName, tableName, statsObj.getColName());
newStatsObjs.add(statsObj);
deletedCols.add(statsObj.getColName());
}
} else {
msdb.deleteTableColumnStatistics(dbName, tableName, statsObj.getColName());
deletedCols.add(statsObj.getColName());
}
}
StatsSetupConst.removeColumnStatsState(newTable.getParameters(), deletedCols);
}
}
}
// Change to new table and append stats for the new table
msdb.alterTable(dbName, tableName, newTable);
if (updateColumnStats && !newStatsObjs.isEmpty()) {
ColumnStatisticsDesc statsDesc = colStats.getStatsDesc();
statsDesc.setDbName(newDbName);
statsDesc.setTableName(newTableName);
colStats.setStatsObj(newStatsObjs);
msdb.updateTableColumnStatistics(colStats);
}
} catch (NoSuchObjectException nsoe) {
LOG.debug("Could not find db entry." + nsoe);
} catch (InvalidInputException e) {
// should not happen since the input were verified before passed in
throw new InvalidObjectException("Invalid inputs to update table column stats: " + e);
}
}
use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project hive by apache.
the class Hive method getPartition.
/**
* Returns partition metadata
*
* @param tbl
* the partition's table
* @param partSpec
* partition keys and values
* @param forceCreate
* if this is true and partition doesn't exist then a partition is
* created
* @param partPath the path where the partition data is located
* @param inheritTableSpecs whether to copy over the table specs for if/of/serde
* @param newFiles An optional list of new files that were moved into this partition. If
* non-null these will be included in the DML event sent to the metastore.
* @return result partition object or null if there is no partition
* @throws HiveException
*/
public Partition getPartition(Table tbl, Map<String, String> partSpec, boolean forceCreate, String partPath, boolean inheritTableSpecs) throws HiveException {
tbl.validatePartColumnNames(partSpec, true);
List<String> pvals = new ArrayList<String>();
for (FieldSchema field : tbl.getPartCols()) {
String val = partSpec.get(field.getName());
// enable dynamic partitioning
if ((val == null && !HiveConf.getBoolVar(conf, HiveConf.ConfVars.DYNAMICPARTITIONING)) || (val != null && val.length() == 0)) {
throw new HiveException("get partition: Value for key " + field.getName() + " is null or empty");
} else if (val != null) {
pvals.add(val);
}
}
org.apache.hadoop.hive.metastore.api.Partition tpart = null;
try {
tpart = getSynchronizedMSC().getPartitionWithAuthInfo(tbl.getDbName(), tbl.getTableName(), pvals, getUserName(), getGroupNames());
} catch (NoSuchObjectException nsoe) {
// this means no partition exists for the given partition
// key value pairs - thrift cannot handle null return values, hence
// getPartition() throws NoSuchObjectException to indicate null partition
tpart = null;
} catch (Exception e) {
LOG.error(StringUtils.stringifyException(e));
throw new HiveException(e);
}
try {
if (forceCreate) {
if (tpart == null) {
LOG.debug("creating partition for table " + tbl.getTableName() + " with partition spec : " + partSpec);
try {
tpart = getSynchronizedMSC().appendPartition(tbl.getDbName(), tbl.getTableName(), pvals);
} catch (AlreadyExistsException aee) {
LOG.debug("Caught already exists exception, trying to alter partition instead");
tpart = getSynchronizedMSC().getPartitionWithAuthInfo(tbl.getDbName(), tbl.getTableName(), pvals, getUserName(), getGroupNames());
alterPartitionSpec(tbl, partSpec, tpart, inheritTableSpecs, partPath);
} catch (Exception e) {
if (CheckJDOException.isJDODataStoreException(e)) {
// Using utility method above, so that JDODataStoreException doesn't
// have to be used here. This helps avoid adding jdo dependency for
// hcatalog client uses
LOG.debug("Caught JDO exception, trying to alter partition instead");
tpart = getSynchronizedMSC().getPartitionWithAuthInfo(tbl.getDbName(), tbl.getTableName(), pvals, getUserName(), getGroupNames());
if (tpart == null) {
// in creating the partition, since the partition still doesn't exist.
throw e;
}
alterPartitionSpec(tbl, partSpec, tpart, inheritTableSpecs, partPath);
} else {
throw e;
}
}
} else {
alterPartitionSpec(tbl, partSpec, tpart, inheritTableSpecs, partPath);
fireInsertEvent(tbl, partSpec, true, null);
}
}
if (tpart == null) {
return null;
}
} catch (Exception e) {
LOG.error(StringUtils.stringifyException(e));
throw new HiveException(e);
}
return new Partition(tbl, tpart);
}
use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project hive by apache.
the class Hive method createPartitions.
public List<Partition> createPartitions(AddPartitionDesc addPartitionDesc) throws HiveException {
Table tbl = getTable(addPartitionDesc.getDbName(), addPartitionDesc.getTableName());
int size = addPartitionDesc.getPartitionCount();
List<org.apache.hadoop.hive.metastore.api.Partition> in = new ArrayList<org.apache.hadoop.hive.metastore.api.Partition>(size);
for (int i = 0; i < size; ++i) {
in.add(convertAddSpecToMetaPartition(tbl, addPartitionDesc.getPartition(i)));
}
List<Partition> out = new ArrayList<Partition>();
try {
if (!addPartitionDesc.getReplicationSpec().isInReplicationScope()) {
// TODO: normally, the result is not necessary; might make sense to pass false
for (org.apache.hadoop.hive.metastore.api.Partition outPart : getMSC().add_partitions(in, addPartitionDesc.isIfNotExists(), true)) {
out.add(new Partition(tbl, outPart));
}
} else {
// For replication add-ptns, we need to follow a insert-if-not-exist, alter-if-exists scenario.
// TODO : ideally, we should push this mechanism to the metastore, because, otherwise, we have
// no choice but to iterate over the partitions here.
List<org.apache.hadoop.hive.metastore.api.Partition> partsToAdd = new ArrayList<>();
List<org.apache.hadoop.hive.metastore.api.Partition> partsToAlter = new ArrayList<>();
List<String> part_names = new ArrayList<>();
for (org.apache.hadoop.hive.metastore.api.Partition p : in) {
part_names.add(Warehouse.makePartName(tbl.getPartitionKeys(), p.getValues()));
try {
org.apache.hadoop.hive.metastore.api.Partition ptn = getMSC().getPartition(addPartitionDesc.getDbName(), addPartitionDesc.getTableName(), p.getValues());
if (addPartitionDesc.getReplicationSpec().allowReplacementInto(ptn.getParameters())) {
partsToAlter.add(p);
}
// else ptn already exists, but we do nothing with it.
} catch (NoSuchObjectException nsoe) {
// if the object does not exist, we want to add it.
partsToAdd.add(p);
}
}
for (org.apache.hadoop.hive.metastore.api.Partition outPart : getMSC().add_partitions(partsToAdd, addPartitionDesc.isIfNotExists(), true)) {
out.add(new Partition(tbl, outPart));
}
getMSC().alter_partitions(addPartitionDesc.getDbName(), addPartitionDesc.getTableName(), partsToAlter, null);
for (org.apache.hadoop.hive.metastore.api.Partition outPart : getMSC().getPartitionsByNames(addPartitionDesc.getDbName(), addPartitionDesc.getTableName(), part_names)) {
out.add(new Partition(tbl, outPart));
}
}
} catch (Exception e) {
LOG.error(StringUtils.stringifyException(e));
throw new HiveException(e);
}
return out;
}
use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project hive by apache.
the class SessionHiveMetaStoreClient method dropTempTable.
private void dropTempTable(org.apache.hadoop.hive.metastore.api.Table table, boolean deleteData, EnvironmentContext envContext) throws MetaException, TException, NoSuchObjectException, UnsupportedOperationException {
String dbName = table.getDbName().toLowerCase();
String tableName = table.getTableName().toLowerCase();
// Determine the temp table path
Path tablePath = null;
String pathStr = table.getSd().getLocation();
if (pathStr != null) {
try {
tablePath = new Path(table.getSd().getLocation());
if (!getWh().isWritable(tablePath.getParent())) {
throw new MetaException("Table metadata not deleted since " + tablePath.getParent() + " is not writable by " + SecurityUtils.getUser());
}
} catch (IOException err) {
MetaException metaException = new MetaException("Error checking temp table path for " + table.getTableName());
metaException.initCause(err);
throw metaException;
}
}
// Remove table entry from SessionState
Map<String, Table> tables = getTempTablesForDatabase(dbName);
if (tables == null || tables.remove(tableName) == null) {
throw new MetaException("Could not find temp table entry for " + StatsUtils.getFullyQualifiedTableName(dbName, tableName));
}
// Delete table data
if (deleteData && !MetaStoreUtils.isExternalTable(table)) {
try {
boolean ifPurge = false;
if (envContext != null) {
ifPurge = Boolean.parseBoolean(envContext.getProperties().get("ifPurge"));
}
getWh().deleteDir(tablePath, true, ifPurge, false);
} catch (Exception err) {
LOG.error("Failed to delete temp table directory: " + tablePath, err);
// Forgive error
}
}
}
use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project hive by apache.
the class DDLTask method dropDatabase.
/**
* Drop a Database
* @param db
* @param dropDb
* @return Always returns 0
* @throws HiveException
*/
private int dropDatabase(Hive db, DropDatabaseDesc dropDb) throws HiveException {
try {
String dbName = dropDb.getDatabaseName();
ReplicationSpec replicationSpec = dropDb.getReplicationSpec();
if (replicationSpec.isInReplicationScope()) {
Database database = db.getDatabase(dbName);
if (database == null || !replicationSpec.allowEventReplacementInto(database.getParameters())) {
return 0;
}
}
db.dropDatabase(dbName, true, dropDb.getIfExists(), dropDb.isCasdade());
// Unregister the functions as well
if (dropDb.isCasdade()) {
FunctionRegistry.unregisterPermanentFunctions(dbName);
}
} catch (NoSuchObjectException ex) {
throw new HiveException(ex, ErrorMsg.DATABASE_NOT_EXISTS, dropDb.getDatabaseName());
}
return 0;
}
Aggregations