use of org.apache.hadoop.hive.ql.metadata.HiveException in project hive by apache.
the class DDLTask method showGrants.
private int showGrants(Hive db, ShowGrantDesc showGrantDesc) throws HiveException {
HiveAuthorizer authorizer = getSessionAuthorizer(db);
try {
List<HivePrivilegeInfo> privInfos = authorizer.showPrivileges(getAuthorizationTranslator(authorizer).getHivePrincipal(showGrantDesc.getPrincipalDesc()), getAuthorizationTranslator(authorizer).getHivePrivilegeObject(showGrantDesc.getHiveObj()));
boolean testMode = conf.getBoolVar(HiveConf.ConfVars.HIVE_IN_TEST);
writeToFile(writeGrantInfo(privInfos, testMode), showGrantDesc.getResFile());
} catch (IOException e) {
throw new HiveException("Error in show grant statement", e);
}
return 0;
}
use of org.apache.hadoop.hive.ql.metadata.HiveException in project SQLWindowing by hbutani.
the class PTFOperator method processOp.
@Override
public void processOp(Object row, int tag) throws HiveException {
try {
if (!isMapOperator) {
/*
* checkif current row belongs to the current accumulated Partition:
* - If not:
* - process the current Partition
* - reset input Partition
* - set currentKey to the newKey if it is null or has changed.
*/
newKeys.getNewKey(row, inputPart.getOI());
boolean keysAreEqual = (currentKeys != null && newKeys != null) ? newKeys.equals(currentKeys) : false;
if (currentKeys != null && !keysAreEqual) {
processInputPartition();
inputPart = RuntimeUtils.createFirstPartitionForChain(qDef, inputObjInspectors[0], hiveConf, isMapOperator);
}
if (currentKeys == null || !keysAreEqual) {
if (currentKeys == null) {
currentKeys = newKeys.copyKey();
} else {
currentKeys.copyKey(newKeys);
}
}
}
// add row to current Partition.
inputPart.append(row);
} catch (WindowingException we) {
throw new HiveException("Cannot process PTFOperator.", we);
}
}
use of org.apache.hadoop.hive.ql.metadata.HiveException in project hive by apache.
the class SemanticAnalyzer method comparePathKeyStrength.
/**
* Compares to path key encryption strenghts.
*
* @param p1 Path to an HDFS file system
* @param p2 Path to an HDFS file system
* @return -1 if strength is weak; 0 if is equals; 1 if it is stronger
* @throws HiveException If an error occurs while comparing key strengths.
*/
private int comparePathKeyStrength(Path p1, Path p2) throws HiveException {
HadoopShims.HdfsEncryptionShim hdfsEncryptionShim;
hdfsEncryptionShim = SessionState.get().getHdfsEncryptionShim();
if (hdfsEncryptionShim != null) {
try {
return hdfsEncryptionShim.comparePathKeyStrength(p1, p2);
} catch (Exception e) {
throw new HiveException("Unable to compare key strength for " + p1 + " and " + p2 + " : " + e, e);
}
}
// Non-encrypted path (or equals strength)
return 0;
}
use of org.apache.hadoop.hive.ql.metadata.HiveException in project hive by apache.
the class SemanticAnalyzer method isPathReadOnly.
/**
* Checks if a given path has read-only access permissions.
*
* @param path The path to check for read-only permissions.
* @return True if the path is read-only; False otherwise.
* @throws HiveException If an error occurs while checking file permissions.
*/
private boolean isPathReadOnly(Path path) throws HiveException {
HiveConf conf = SessionState.get().getConf();
try {
FileSystem fs = path.getFileSystem(conf);
UserGroupInformation ugi = Utils.getUGI();
FileStatus status = fs.getFileStatus(path);
// We just check for writing permissions. If it fails with AccessControException, then it
// means the location may be read-only.
FileUtils.checkFileAccessWithImpersonation(fs, status, FsAction.WRITE, ugi.getUserName());
// Path has writing permissions
return false;
} catch (AccessControlException e) {
// but we take it as if our path is read-only
return true;
} catch (Exception e) {
throw new HiveException("Unable to determine if " + path + " is read only: " + e, e);
}
}
use of org.apache.hadoop.hive.ql.metadata.HiveException in project hive by apache.
the class SemanticAnalyzer method getMaterializationMetadata.
public void getMaterializationMetadata(QB qb) throws SemanticException {
try {
gatherCTEReferences(qb, rootClause);
int threshold = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVE_CTE_MATERIALIZE_THRESHOLD);
for (CTEClause cte : Sets.newHashSet(aliasToCTEs.values())) {
if (threshold >= 0 && cte.reference >= threshold) {
cte.materialize = true;
}
}
} catch (HiveException e) {
// Has to use full name to make sure it does not conflict with
// org.apache.commons.lang.StringUtils
LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e));
if (e instanceof SemanticException) {
throw (SemanticException) e;
}
throw new SemanticException(e.getMessage(), e);
}
}
Aggregations