use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project hive by apache.
the class HBaseStore method getRole.
@Override
public Role getRole(String roleName) throws NoSuchObjectException {
boolean commit = false;
openTransaction();
try {
Role role = getHBase().getRole(roleName);
if (role == null) {
throw new NoSuchObjectException("Unable to find role " + roleName);
}
commit = true;
return role;
} catch (IOException e) {
LOG.error("Unable to get role", e);
throw new NoSuchObjectException("Error reading table " + e.getMessage());
} finally {
commitOrRoleBack(commit);
}
}
use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project hive by apache.
the class HBaseStore method getPartitionsByExprInternal.
private boolean getPartitionsByExprInternal(String dbName, String tblName, ExpressionTree exprTree, short maxParts, List<Partition> result) throws MetaException, NoSuchObjectException {
dbName = HiveStringUtils.normalizeIdentifier(dbName);
tblName = HiveStringUtils.normalizeIdentifier(tblName);
Table table = getTable(dbName, tblName);
if (table == null) {
throw new NoSuchObjectException("Unable to find table " + dbName + "." + tblName);
}
// general hbase filter plan from expression tree
PlanResult planRes = HBaseFilterPlanUtil.getFilterPlan(exprTree, table.getPartitionKeys());
if (LOG.isDebugEnabled()) {
LOG.debug("Hbase Filter Plan generated : " + planRes.plan);
}
// results from scans need to be merged as there can be overlapping results between
// the scans. Use a map of list of partition values to partition for this.
Map<List<String>, Partition> mergedParts = new HashMap<List<String>, Partition>();
for (ScanPlan splan : planRes.plan.getPlans()) {
try {
List<Partition> parts = getHBase().scanPartitions(dbName, tblName, splan.getStartRowSuffix(dbName, tblName, table.getPartitionKeys()), splan.getEndRowSuffix(dbName, tblName, table.getPartitionKeys()), splan.getFilter(table.getPartitionKeys()), -1);
boolean reachedMax = false;
for (Partition part : parts) {
mergedParts.put(part.getValues(), part);
if (mergedParts.size() == maxParts) {
reachedMax = true;
break;
}
}
if (reachedMax) {
break;
}
} catch (IOException e) {
LOG.error("Unable to get partitions", e);
throw new MetaException("Error scanning partitions" + tableNameForErrorMsg(dbName, tblName) + ": " + e);
}
}
for (Entry<List<String>, Partition> mp : mergedParts.entrySet()) {
result.add(mp.getValue());
}
if (LOG.isDebugEnabled()) {
LOG.debug("Matched partitions " + result);
}
// being returned
return !planRes.hasUnsupportedCondition;
}
use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project hive by apache.
the class HBaseStore method alterTable.
@Override
public void alterTable(String dbName, String tableName, Table newTable) throws InvalidObjectException, MetaException {
boolean commit = false;
openTransaction();
try {
Table newTableCopy = newTable.deepCopy();
newTableCopy.setDbName(HiveStringUtils.normalizeIdentifier(newTableCopy.getDbName()));
List<String> oldPartTypes = getTable(dbName, tableName).getPartitionKeys() == null ? null : HBaseUtils.getPartitionKeyTypes(getTable(dbName, tableName).getPartitionKeys());
newTableCopy.setTableName(HiveStringUtils.normalizeIdentifier(newTableCopy.getTableName()));
getHBase().replaceTable(getHBase().getTable(HiveStringUtils.normalizeIdentifier(dbName), HiveStringUtils.normalizeIdentifier(tableName)), newTableCopy);
if (newTable.getPartitionKeys() != null && newTable.getPartitionKeys().size() > 0 && !tableName.equals(newTable.getTableName())) {
// the key.
try {
List<Partition> oldParts = getPartitions(dbName, tableName, -1);
List<Partition> newParts = new ArrayList<>(oldParts.size());
for (Partition oldPart : oldParts) {
Partition newPart = oldPart.deepCopy();
newPart.setTableName(newTable.getTableName());
newParts.add(newPart);
}
getHBase().replacePartitions(oldParts, newParts, oldPartTypes);
} catch (NoSuchObjectException e) {
LOG.debug("No partitions found for old table so not worrying about it");
}
}
commit = true;
} catch (IOException e) {
LOG.error("Unable to alter table " + tableNameForErrorMsg(dbName, tableName), e);
throw new MetaException("Unable to alter table " + tableNameForErrorMsg(dbName, tableName));
} finally {
commitOrRoleBack(commit);
}
}
use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project hive by apache.
the class PigHCatUtil method getTable.
/*
* The job argument is passed so that configuration overrides can be used to initialize
* the metastore configuration in the special case of an embedded metastore
* (hive.metastore.uris = "").
*/
public Table getTable(String location, String hcatServerUri, String hcatServerPrincipal, Job job) throws IOException {
Pair<String, String> loc_server = new Pair<String, String>(location, hcatServerUri);
Table hcatTable = hcatTableCache.get(loc_server);
if (hcatTable != null) {
return hcatTable;
}
Pair<String, String> dbTablePair = PigHCatUtil.getDBTableNames(location);
String dbName = dbTablePair.first;
String tableName = dbTablePair.second;
Table table = null;
IMetaStoreClient client = null;
try {
client = getHiveMetaClient(hcatServerUri, hcatServerPrincipal, PigHCatUtil.class, job);
table = HCatUtil.getTable(client, dbName, tableName);
} catch (NoSuchObjectException nsoe) {
// prettier error messages to frontend
throw new PigException("Table not found : " + nsoe.getMessage(), PIG_EXCEPTION_CODE);
} catch (Exception e) {
throw new IOException(e);
} finally {
HCatUtil.closeHiveClientQuietly(client);
}
hcatTableCache.put(loc_server, table);
return table;
}
use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project hive by apache.
the class HCatClientHMSImpl method addPartition.
@Override
public void addPartition(HCatAddPartitionDesc partInfo) throws HCatException {
Table tbl = null;
try {
tbl = hmsClient.getTable(partInfo.getDatabaseName(), partInfo.getTableName());
// TODO: Should be moved out.
if (tbl.getPartitionKeysSize() == 0) {
throw new HCatException("The table " + partInfo.getTableName() + " is not partitioned.");
}
HCatTable hcatTable = new HCatTable(tbl);
HCatPartition hcatPartition = partInfo.getHCatPartition();
// This is only required to support the deprecated methods in HCatAddPartitionDesc.Builder.
if (hcatPartition == null) {
hcatPartition = partInfo.getHCatPartition(hcatTable);
}
hmsClient.add_partition(hcatPartition.toHivePartition());
} catch (InvalidObjectException e) {
throw new HCatException("InvalidObjectException while adding partition.", e);
} catch (AlreadyExistsException e) {
throw new HCatException("AlreadyExistsException while adding partition.", e);
} catch (MetaException e) {
throw new HCatException("MetaException while adding partition.", e);
} catch (NoSuchObjectException e) {
throw new ObjectNotFoundException("The table " + partInfo.getTableName() + " is could not be found.", e);
} catch (TException e) {
throw new ConnectionFailureException("TException while adding partition.", e);
}
}
Aggregations