use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project hive by apache.
the class ObjectStore method dropTable.
@Override
public boolean dropTable(String dbName, String tableName) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException {
boolean success = false;
try {
openTransaction();
MTable tbl = getMTable(dbName, tableName);
pm.retrieve(tbl);
if (tbl != null) {
// first remove all the grants
List<MTablePrivilege> tabGrants = listAllTableGrants(dbName, tableName);
if (tabGrants != null && tabGrants.size() > 0) {
pm.deletePersistentAll(tabGrants);
}
List<MTableColumnPrivilege> tblColGrants = listTableAllColumnGrants(dbName, tableName);
if (tblColGrants != null && tblColGrants.size() > 0) {
pm.deletePersistentAll(tblColGrants);
}
List<MPartitionPrivilege> partGrants = this.listTableAllPartitionGrants(dbName, tableName);
if (partGrants != null && partGrants.size() > 0) {
pm.deletePersistentAll(partGrants);
}
List<MPartitionColumnPrivilege> partColGrants = listTableAllPartitionColumnGrants(dbName, tableName);
if (partColGrants != null && partColGrants.size() > 0) {
pm.deletePersistentAll(partColGrants);
}
// delete column statistics if present
try {
deleteTableColumnStatistics(dbName, tableName, null);
} catch (NoSuchObjectException e) {
LOG.info("Found no table level column statistics associated with db " + dbName + " table " + tableName + " record to delete");
}
List<MConstraint> tabConstraints = listAllTableConstraintsWithOptionalConstraintName(dbName, tableName, null);
if (tabConstraints != null && tabConstraints.size() > 0) {
pm.deletePersistentAll(tabConstraints);
}
preDropStorageDescriptor(tbl.getSd());
// then remove the table
pm.deletePersistentAll(tbl);
}
success = commitTransaction();
} finally {
if (!success) {
rollbackTransaction();
}
}
return success;
}
use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project hive by apache.
the class DDLSemanticAnalyzer method analyzeDropIndex.
private void analyzeDropIndex(ASTNode ast) throws SemanticException {
String indexName = unescapeIdentifier(ast.getChild(0).getText());
String tableName = getUnescapedName((ASTNode) ast.getChild(1));
boolean ifExists = (ast.getFirstChildWithType(HiveParser.TOK_IFEXISTS) != null);
// we want to signal an error if the index doesn't exist and we're
// configured not to ignore this
boolean throwException = !ifExists && !HiveConf.getBoolVar(conf, ConfVars.DROPIGNORESNONEXISTENT);
Table tbl = getTable(tableName, false);
if (throwException && tbl == null) {
throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tableName));
}
try {
Index idx = db.getIndex(tableName, indexName);
} catch (HiveException e) {
if (!(e.getCause() instanceof NoSuchObjectException)) {
throw new SemanticException(ErrorMsg.CANNOT_DROP_INDEX.getMsg("dropping index"), e);
}
if (throwException) {
throw new SemanticException(ErrorMsg.INVALID_INDEX.getMsg(indexName));
}
}
if (tbl != null) {
inputs.add(new ReadEntity(tbl));
}
DropIndexDesc dropIdxDesc = new DropIndexDesc(indexName, tableName, throwException);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), dropIdxDesc), conf));
}
use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project hive by apache.
the class HBaseStore method dropPartitions.
@Override
public void dropPartitions(String dbName, String tblName, List<String> partNames) throws MetaException, NoSuchObjectException {
boolean commit = false;
openTransaction();
try {
for (String partName : partNames) {
dropPartition(dbName, tblName, partNameToVals(partName));
}
commit = true;
} catch (Exception e) {
LOG.error("Unable to drop partitions", e);
throw new NoSuchObjectException("Failure dropping partitions, " + e.getMessage());
} finally {
commitOrRoleBack(commit);
}
}
use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project hive by apache.
the class HBaseStore method getRole.
@Override
public Role getRole(String roleName) throws NoSuchObjectException {
boolean commit = false;
openTransaction();
try {
Role role = getHBase().getRole(roleName);
if (role == null) {
throw new NoSuchObjectException("Unable to find role " + roleName);
}
commit = true;
return role;
} catch (IOException e) {
LOG.error("Unable to get role", e);
throw new NoSuchObjectException("Error reading table " + e.getMessage());
} finally {
commitOrRoleBack(commit);
}
}
use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project hive by apache.
the class HBaseStore method getPartitionsByExprInternal.
private boolean getPartitionsByExprInternal(String dbName, String tblName, ExpressionTree exprTree, short maxParts, List<Partition> result) throws MetaException, NoSuchObjectException {
dbName = HiveStringUtils.normalizeIdentifier(dbName);
tblName = HiveStringUtils.normalizeIdentifier(tblName);
Table table = getTable(dbName, tblName);
if (table == null) {
throw new NoSuchObjectException("Unable to find table " + dbName + "." + tblName);
}
// general hbase filter plan from expression tree
PlanResult planRes = HBaseFilterPlanUtil.getFilterPlan(exprTree, table.getPartitionKeys());
if (LOG.isDebugEnabled()) {
LOG.debug("Hbase Filter Plan generated : " + planRes.plan);
}
// results from scans need to be merged as there can be overlapping results between
// the scans. Use a map of list of partition values to partition for this.
Map<List<String>, Partition> mergedParts = new HashMap<List<String>, Partition>();
for (ScanPlan splan : planRes.plan.getPlans()) {
try {
List<Partition> parts = getHBase().scanPartitions(dbName, tblName, splan.getStartRowSuffix(dbName, tblName, table.getPartitionKeys()), splan.getEndRowSuffix(dbName, tblName, table.getPartitionKeys()), splan.getFilter(table.getPartitionKeys()), -1);
boolean reachedMax = false;
for (Partition part : parts) {
mergedParts.put(part.getValues(), part);
if (mergedParts.size() == maxParts) {
reachedMax = true;
break;
}
}
if (reachedMax) {
break;
}
} catch (IOException e) {
LOG.error("Unable to get partitions", e);
throw new MetaException("Error scanning partitions" + tableNameForErrorMsg(dbName, tblName) + ": " + e);
}
}
for (Entry<List<String>, Partition> mp : mergedParts.entrySet()) {
result.add(mp.getValue());
}
if (LOG.isDebugEnabled()) {
LOG.debug("Matched partitions " + result);
}
// being returned
return !planRes.hasUnsupportedCondition;
}
Aggregations