use of org.apache.hadoop.hive.conf.HiveConf in project hive by apache.
the class CheckTableAccessHook method run.
public void run(HookContext hookContext) {
HiveConf conf = hookContext.getConf();
if (conf.getBoolVar(HiveConf.ConfVars.HIVE_STATS_COLLECT_TABLEKEYS) == false) {
return;
}
QueryPlan plan = hookContext.getQueryPlan();
if (plan == null) {
return;
}
TableAccessInfo tableAccessInfo = hookContext.getQueryPlan().getTableAccessInfo();
if (tableAccessInfo == null || tableAccessInfo.getOperatorToTableAccessMap() == null || tableAccessInfo.getOperatorToTableAccessMap().isEmpty()) {
return;
}
LogHelper console = SessionState.getConsole();
Map<Operator<? extends OperatorDesc>, Map<String, List<String>>> operatorToTableAccessMap = tableAccessInfo.getOperatorToTableAccessMap();
// Must be deterministic order map for consistent q-test output across Java versions
Map<String, String> outputOrderedMap = new LinkedHashMap<String, String>();
for (Map.Entry<Operator<? extends OperatorDesc>, Map<String, List<String>>> tableAccess : operatorToTableAccessMap.entrySet()) {
StringBuilder perOperatorInfo = new StringBuilder();
perOperatorInfo.append("Operator:").append(tableAccess.getKey().getOperatorId()).append("\n");
for (Map.Entry<String, List<String>> entry : tableAccess.getValue().entrySet()) {
perOperatorInfo.append("Table:").append(entry.getKey()).append("\n");
perOperatorInfo.append("Keys:").append(StringUtils.join(entry.getValue(), ',')).append("\n");
}
outputOrderedMap.put(tableAccess.getKey().getOperatorId(), perOperatorInfo.toString());
}
for (String perOperatorInfo : outputOrderedMap.values()) {
console.printError(perOperatorInfo);
}
}
use of org.apache.hadoop.hive.conf.HiveConf in project hive by apache.
the class CommandProcessorFactory method getForHiveCommandInternal.
public static CommandProcessor getForHiveCommandInternal(String[] cmd, HiveConf conf, boolean testOnly) throws SQLException {
HiveCommand hiveCommand = HiveCommand.find(cmd, testOnly);
if (hiveCommand == null || isBlank(cmd[0])) {
return null;
}
if (conf == null) {
conf = new HiveConf();
}
Set<String> availableCommands = new HashSet<String>();
for (String availableCommand : conf.getVar(HiveConf.ConfVars.HIVE_SECURITY_COMMAND_WHITELIST).split(",")) {
availableCommands.add(availableCommand.toLowerCase().trim());
}
if (!availableCommands.contains(cmd[0].trim().toLowerCase())) {
throw new SQLException("Insufficient privileges to execute " + cmd[0], "42000");
}
if (cmd.length > 1 && "reload".equalsIgnoreCase(cmd[0]) && "function".equalsIgnoreCase(cmd[1])) {
// special handling for SQL "reload function"
return null;
}
switch(hiveCommand) {
case SET:
return new SetProcessor();
case RESET:
return new ResetProcessor();
case DFS:
SessionState ss = SessionState.get();
return new DfsProcessor(ss.getConf());
case ADD:
return new AddResourceProcessor();
case LIST:
return new ListResourceProcessor();
case DELETE:
return new DeleteResourceProcessor();
case COMPILE:
return new CompileProcessor();
case RELOAD:
return new ReloadProcessor();
case CRYPTO:
try {
return new CryptoProcessor(SessionState.get().getHdfsEncryptionShim(), conf);
} catch (HiveException e) {
throw new SQLException("Fail to start the command processor due to the exception: ", e);
}
default:
throw new AssertionError("Unknown HiveCommand " + hiveCommand);
}
}
use of org.apache.hadoop.hive.conf.HiveConf in project hive by apache.
the class SetProcessor method setConf.
/**
* @return A console message that is not strong enough to fail the command (e.g. deprecation).
*/
static String setConf(String varname, String key, String varvalue, boolean register) throws IllegalArgumentException {
String result = null;
HiveConf conf = SessionState.get().getConf();
String value = new VariableSubstitution(new HiveVariableSource() {
@Override
public Map<String, String> getHiveVariable() {
return SessionState.get().getHiveVariables();
}
}).substitute(conf, varvalue);
if (conf.getBoolVar(HiveConf.ConfVars.HIVECONFVALIDATION)) {
HiveConf.ConfVars confVars = HiveConf.getConfVars(key);
if (confVars != null) {
if (!confVars.isType(value)) {
StringBuilder message = new StringBuilder();
message.append("'SET ").append(varname).append('=').append(varvalue);
message.append("' FAILED because ").append(key).append(" expects ");
message.append(confVars.typeString()).append(" type value.");
throw new IllegalArgumentException(message.toString());
}
String fail = confVars.validate(value);
if (fail != null) {
StringBuilder message = new StringBuilder();
message.append("'SET ").append(varname).append('=').append(varvalue);
message.append("' FAILED in validation : ").append(fail).append('.');
throw new IllegalArgumentException(message.toString());
}
} else if (!removedConfigs.contains(key) && key.startsWith("hive.")) {
throw new IllegalArgumentException("hive configuration " + key + " does not exists.");
}
}
conf.verifyAndSet(key, value);
if (HiveConf.ConfVars.HIVE_EXECUTION_ENGINE.varname.equals(key)) {
if (!"spark".equals(value)) {
SessionState.get().closeSparkSession();
}
if ("mr".equals(value)) {
result = HiveConf.generateMrDeprecationWarning();
LOG.warn(result);
}
}
if (register) {
SessionState.get().getOverriddenConfigurations().put(key, value);
}
return result;
}
use of org.apache.hadoop.hive.conf.HiveConf in project hive by apache.
the class ResourceMaps method createSessionDirs.
/**
* Create dirs & session paths for this session:
* 1. HDFS scratch dir
* 2. Local scratch dir
* 3. Local downloaded resource dir
* 4. HDFS session path
* 5. hold a lock file in HDFS session dir to indicate the it is in use
* 6. Local session path
* 7. HDFS temp table space
* @param userName
* @throws IOException
*/
private void createSessionDirs(String userName) throws IOException {
HiveConf conf = getConf();
Path rootHDFSDirPath = createRootHDFSDir(conf);
// Now create session specific dirs
String scratchDirPermission = HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIRPERMISSION);
Path path;
// 1. HDFS scratch dir
path = new Path(rootHDFSDirPath, userName);
hdfsScratchDirURIString = path.toUri().toString();
createPath(conf, path, scratchDirPermission, false, false);
// 2. Local scratch dir
path = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.LOCALSCRATCHDIR));
createPath(conf, path, scratchDirPermission, true, false);
// 3. Download resources dir
path = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.DOWNLOADED_RESOURCES_DIR));
createPath(conf, path, scratchDirPermission, true, false);
// Finally, create session paths for this session
// Local & non-local tmp location is configurable. however it is the same across
// all external file systems
String sessionId = getSessionId();
// 4. HDFS session path
hdfsSessionPath = new Path(hdfsScratchDirURIString, sessionId);
createPath(conf, hdfsSessionPath, scratchDirPermission, false, true);
conf.set(HDFS_SESSION_PATH_KEY, hdfsSessionPath.toUri().toString());
// 5. hold a lock file in HDFS session dir to indicate the it is in use
if (conf.getBoolVar(HiveConf.ConfVars.HIVE_SCRATCH_DIR_LOCK)) {
FileSystem fs = hdfsSessionPath.getFileSystem(conf);
FSDataOutputStream hdfsSessionPathInfoFile = fs.create(new Path(hdfsSessionPath, INFO_FILE_NAME), true);
hdfsSessionPathInfoFile.writeUTF("process: " + ManagementFactory.getRuntimeMXBean().getName() + "\n");
hdfsSessionPathInfoFile.close();
hdfsSessionPathLockFile = fs.create(new Path(hdfsSessionPath, LOCK_FILE_NAME), true);
}
// 6. Local session path
localSessionPath = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.LOCALSCRATCHDIR), sessionId);
createPath(conf, localSessionPath, scratchDirPermission, true, true);
conf.set(LOCAL_SESSION_PATH_KEY, localSessionPath.toUri().toString());
// 7. HDFS temp table space
hdfsTmpTableSpace = new Path(hdfsSessionPath, TMP_PREFIX);
// This is a sub-dir under the hdfsSessionPath. Will be removed along with that dir.
// Don't register with deleteOnExit
createPath(conf, hdfsTmpTableSpace, scratchDirPermission, false, false);
conf.set(TMP_TABLE_SPACE_KEY, hdfsTmpTableSpace.toUri().toString());
}
use of org.apache.hadoop.hive.conf.HiveConf in project hive by apache.
the class TestHBaseStoreBitVector method init.
@Before
public void init() throws IOException {
MockitoAnnotations.initMocks(this);
HiveConf conf = new HiveConf();
conf.setBoolean(HBaseReadWrite.NO_CACHE_CONF, true);
store = MockUtils.init(conf, htable, rows);
}
Aggregations