use of org.apache.hadoop.hive.metastore.api.InvalidObjectException in project hive by apache.
the class HiveAlterHandler method updatePartColumnStats.
private void updatePartColumnStats(RawStore msdb, String dbName, String tableName, List<String> partVals, Partition newPart) throws MetaException, InvalidObjectException {
dbName = HiveStringUtils.normalizeIdentifier(dbName);
tableName = HiveStringUtils.normalizeIdentifier(tableName);
String newDbName = HiveStringUtils.normalizeIdentifier(newPart.getDbName());
String newTableName = HiveStringUtils.normalizeIdentifier(newPart.getTableName());
Table oldTable = msdb.getTable(dbName, tableName);
if (oldTable == null) {
return;
}
try {
String oldPartName = Warehouse.makePartName(oldTable.getPartitionKeys(), partVals);
String newPartName = Warehouse.makePartName(oldTable.getPartitionKeys(), newPart.getValues());
if (!dbName.equals(newDbName) || !tableName.equals(newTableName) || !oldPartName.equals(newPartName)) {
msdb.deletePartitionColumnStatistics(dbName, tableName, oldPartName, partVals, null);
} else {
Partition oldPartition = msdb.getPartition(dbName, tableName, partVals);
if (oldPartition == null) {
return;
}
if (oldPartition.getSd() != null && newPart.getSd() != null) {
List<FieldSchema> oldCols = oldPartition.getSd().getCols();
if (!MetaStoreUtils.columnsIncluded(oldCols, newPart.getSd().getCols())) {
updatePartColumnStatsForAlterColumns(msdb, oldPartition, oldPartName, partVals, oldCols, newPart);
}
}
}
} catch (NoSuchObjectException nsoe) {
LOG.debug("Could not find db entry." + nsoe);
//ignore
} catch (InvalidInputException iie) {
throw new InvalidObjectException("Invalid input to update partition column stats." + iie);
}
}
use of org.apache.hadoop.hive.metastore.api.InvalidObjectException in project hive by apache.
the class HBaseStore method addRole.
@Override
public boolean addRole(String roleName, String ownerName) throws InvalidObjectException, MetaException, NoSuchObjectException {
int now = (int) (System.currentTimeMillis() / 1000);
Role role = new Role(roleName, now, ownerName);
boolean commit = false;
openTransaction();
try {
if (getHBase().getRole(roleName) != null) {
throw new InvalidObjectException("Role " + roleName + " already exists");
}
getHBase().putRole(role);
commit = true;
return true;
} catch (IOException e) {
LOG.error("Unable to create role ", e);
throw new MetaException("Unable to read from or write to hbase " + e.getMessage());
} finally {
commitOrRoleBack(commit);
}
}
use of org.apache.hadoop.hive.metastore.api.InvalidObjectException in project metacat by Netflix.
the class HiveConnectorTableService method create.
/**
* Create a table.
*
* @param requestContext The request context
* @param tableInfo The resource metadata
*/
@Override
public void create(@Nonnull @NonNull final ConnectorContext requestContext, @Nonnull @NonNull final TableInfo tableInfo) {
final QualifiedName tableName = tableInfo.getName();
try {
final Table table = hiveMetacatConverters.fromTableInfo(tableInfo);
updateTable(requestContext, table, tableInfo);
metacatHiveClient.createTable(table);
} catch (AlreadyExistsException exception) {
throw new TableAlreadyExistsException(tableName, exception);
} catch (MetaException exception) {
throw new InvalidMetaException(tableName, exception);
} catch (NoSuchObjectException | InvalidObjectException exception) {
throw new DatabaseNotFoundException(QualifiedName.ofDatabase(tableName.getCatalogName(), tableName.getDatabaseName()), exception);
} catch (TException exception) {
throw new ConnectorException(String.format("Failed create hive table %s", tableName), exception);
}
}
use of org.apache.hadoop.hive.metastore.api.InvalidObjectException in project metacat by Netflix.
the class MetacatHiveClient method addDropPartitions.
/**
* {@inheritDoc}.
*/
@Override
public void addDropPartitions(final String dbName, final String tableName, final List<Partition> partitions, final List<String> delPartitionNames) throws TException {
try (HiveMetastoreClient client = createMetastoreClient()) {
try {
dropHivePartitions(client, dbName, tableName, delPartitionNames);
client.add_partitions(partitions);
} catch (MetaException | InvalidObjectException e) {
throw new InvalidMetaException("One or more partitions are invalid.", e);
} catch (TException e) {
throw new TException(String.format("Internal server error adding/dropping partitions for table %s.%s", dbName, tableName), e);
}
}
}
use of org.apache.hadoop.hive.metastore.api.InvalidObjectException in project metacat by Netflix.
the class MetacatHMSHandler method add_drop_partitions.
/**
* Adds and drops partitions in one transaction.
*
* @param databaseName database name
* @param tableName table name
* @param addParts list of partitions
* @param dropParts list of partition values
* @param deleteData if true, deletes the data
* @return true if successful
* @throws NoSuchObjectException Exception if table does not exists
* @throws MetaException Exception if
* @throws TException any internal exception
*/
@SuppressWarnings({ "checkstyle:methodname" })
public boolean add_drop_partitions(final String databaseName, final String tableName, final List<Partition> addParts, final List<List<String>> dropParts, final boolean deleteData) throws NoSuchObjectException, MetaException, TException {
startFunction("add_drop_partitions : db=" + databaseName + " tbl=" + tableName);
if (addParts.size() == 0 && dropParts.size() == 0) {
return true;
}
for (List<String> partVals : dropParts) {
LOG.info("Drop Partition values:" + partVals);
}
for (Partition part : addParts) {
LOG.info("Add Partition values:" + part);
}
boolean ret = false;
Exception ex = null;
try {
ret = addDropPartitionsCore(getMS(), databaseName, tableName, addParts, dropParts, false, null);
} catch (Exception e) {
ex = e;
if (e instanceof MetaException) {
throw (MetaException) e;
} else if (e instanceof InvalidObjectException) {
throw (InvalidObjectException) e;
} else if (e instanceof AlreadyExistsException) {
throw (AlreadyExistsException) e;
} else if (e instanceof NoSuchObjectException) {
throw (NoSuchObjectException) e;
} else {
throw newMetaException(e);
}
} finally {
endFunction("drop_partitions", ret, ex, tableName);
}
return ret;
}
Aggregations