use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project hive by apache.
the class ObjectStore method getPartitionPsQueryResults.
/**
* Retrieves a Collection of partition-related results from the database that match
* the partial specification given for a specific table.
* @param dbName the name of the database
* @param tableName the name of the table
* @param part_vals the partial specification values
* @param max_parts the maximum number of partitions to return
* @param resultsCol the metadata column of the data to return, e.g. partitionName, etc.
* if resultsCol is empty or null, a collection of MPartition objects is returned
* @throws NoSuchObjectException
* @results A Collection of partition-related items from the db that match the partial spec
* for a table. The type of each item in the collection corresponds to the column
* you want results for. E.g., if resultsCol is partitionName, the Collection
* has types of String, and if resultsCol is null, the types are MPartition.
*/
private Collection getPartitionPsQueryResults(String dbName, String tableName, List<String> part_vals, short max_parts, String resultsCol, QueryWrapper queryWrapper) throws MetaException, NoSuchObjectException {
dbName = HiveStringUtils.normalizeIdentifier(dbName);
tableName = HiveStringUtils.normalizeIdentifier(tableName);
Table table = getTable(dbName, tableName);
if (table == null) {
throw new NoSuchObjectException(dbName + "." + tableName + " table not found");
}
List<FieldSchema> partCols = table.getPartitionKeys();
int numPartKeys = partCols.size();
if (part_vals.size() > numPartKeys) {
throw new MetaException("Incorrect number of partition values." + " numPartKeys=" + numPartKeys + ", part_val=" + part_vals.size());
}
partCols = partCols.subList(0, part_vals.size());
// Construct a pattern of the form: partKey=partVal/partKey2=partVal2/...
// where partVal is either the escaped partition value given as input,
// or a regex of the form ".*"
// This works because the "=" and "/" separating key names and partition key/values
// are not escaped.
String partNameMatcher = Warehouse.makePartName(partCols, part_vals, ".*");
// add ".*" to the regex to match anything else afterwards the partial spec.
if (part_vals.size() < numPartKeys) {
partNameMatcher += ".*";
}
Query query = queryWrapper.query = pm.newQuery(MPartition.class);
StringBuilder queryFilter = new StringBuilder("table.database.name == dbName");
queryFilter.append(" && table.tableName == tableName");
queryFilter.append(" && partitionName.matches(partialRegex)");
query.setFilter(queryFilter.toString());
query.declareParameters("java.lang.String dbName, " + "java.lang.String tableName, java.lang.String partialRegex");
if (max_parts >= 0) {
// User specified a row limit, set it on the Query
query.setRange(0, max_parts);
}
if (resultsCol != null && !resultsCol.isEmpty()) {
query.setResult(resultsCol);
}
return (Collection) query.execute(dbName, tableName, partNameMatcher);
}
use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project hive by apache.
the class ObjectStore method getNumPartitionsByExpr.
@Override
public int getNumPartitionsByExpr(String dbName, String tblName, byte[] expr) throws MetaException, NoSuchObjectException {
final ExpressionTree exprTree = PartFilterExprUtil.makeExpressionTree(expressionProxy, expr);
// Need to be final to pass it to an inner class
final byte[] tempExpr = expr;
return new GetHelper<Integer>(dbName, tblName, true, true) {
private SqlFilterForPushdown filter = new SqlFilterForPushdown();
@Override
protected String describeResult() {
return "Partition count";
}
protected boolean canUseDirectSql(GetHelper<Integer> ctx) throws MetaException {
return directSql.generateSqlFilterForPushdown(ctx.getTable(), exprTree, filter);
}
;
@Override
protected Integer getSqlResult(GetHelper<Integer> ctx) throws MetaException {
return directSql.getNumPartitionsViaSqlFilter(filter);
}
@Override
protected Integer getJdoResult(GetHelper<Integer> ctx) throws MetaException, NoSuchObjectException {
Integer numPartitions = null;
if (exprTree != null) {
try {
numPartitions = getNumPartitionsViaOrmFilter(ctx.getTable(), exprTree, true);
} catch (MetaException e) {
numPartitions = null;
}
}
// if numPartitions could not be obtained from ORM filters, then get number partitions names, and count them
if (numPartitions == null) {
List<String> filteredPartNames = new ArrayList<String>();
getPartitionNamesPrunedByExprNoTxn(ctx.getTable(), tempExpr, "", (short) -1, filteredPartNames);
numPartitions = filteredPartNames.size();
}
return numPartitions;
}
}.run(true);
}
use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project hive by apache.
the class ObjectStore method setMetaStoreSchemaVersion.
@Override
public void setMetaStoreSchemaVersion(String schemaVersion, String comment) throws MetaException {
MVersionTable mSchemaVer;
boolean commited = false;
boolean recordVersion = HiveConf.getBoolVar(getConf(), HiveConf.ConfVars.METASTORE_SCHEMA_VERIFICATION_RECORD_VERSION);
if (!recordVersion) {
LOG.warn("setMetaStoreSchemaVersion called but recording version is disabled: " + "version = " + schemaVersion + ", comment = " + comment);
return;
}
LOG.warn("Setting metastore schema version in db to " + schemaVersion);
try {
mSchemaVer = getMSchemaVersion();
} catch (NoSuchObjectException e) {
// if the version doesn't exist, then create it
mSchemaVer = new MVersionTable();
}
mSchemaVer.setSchemaVersion(schemaVersion);
mSchemaVer.setVersionComment(comment);
try {
openTransaction();
pm.makePersistent(mSchemaVer);
commited = commitTransaction();
} finally {
if (!commited) {
rollbackTransaction();
}
}
}
use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project hive by apache.
the class ObjectStore method getMSchemaVersion.
@SuppressWarnings("unchecked")
private MVersionTable getMSchemaVersion() throws NoSuchObjectException, MetaException {
boolean committed = false;
Query query = null;
List<MVersionTable> mVerTables = new ArrayList<MVersionTable>();
try {
openTransaction();
query = pm.newQuery(MVersionTable.class);
try {
mVerTables = (List<MVersionTable>) query.execute();
pm.retrieveAll(mVerTables);
} catch (JDODataStoreException e) {
if (e.getCause() instanceof MissingTableException) {
throw new MetaException("Version table not found. " + "The metastore is not upgraded to " + MetaStoreSchemaInfo.getHiveSchemaVersion());
} else {
throw e;
}
}
committed = commitTransaction();
if (mVerTables.isEmpty()) {
throw new NoSuchObjectException("No matching version found");
}
if (mVerTables.size() > 1) {
String msg = "Metastore contains multiple versions (" + mVerTables.size() + ") ";
for (MVersionTable version : mVerTables) {
msg += "[ version = " + version.getSchemaVersion() + ", comment = " + version.getVersionComment() + " ] ";
}
throw new MetaException(msg.trim());
}
return mVerTables.get(0);
} finally {
if (!committed) {
rollbackTransaction();
}
if (query != null) {
query.closeAll();
}
}
}
use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project hive by apache.
the class ObjectStore method getPartition.
@Override
public Partition getPartition(String dbName, String tableName, List<String> part_vals) throws NoSuchObjectException, MetaException {
openTransaction();
Partition part = convertToPart(getMPartition(dbName, tableName, part_vals));
commitTransaction();
if (part == null) {
throw new NoSuchObjectException("partition values=" + part_vals.toString());
}
part.setValues(part_vals);
return part;
}
Aggregations