use of org.apache.hadoop.hive.conf.HiveVariableSource in project hive by apache.
the class SetProcessor method setConf.
/**
* @return A console message that is not strong enough to fail the command (e.g. deprecation).
*/
static String setConf(String varname, String key, String varvalue, boolean register) throws IllegalArgumentException {
String result = null;
HiveConf conf = SessionState.get().getConf();
String value = new VariableSubstitution(new HiveVariableSource() {
@Override
public Map<String, String> getHiveVariable() {
return SessionState.get().getHiveVariables();
}
}).substitute(conf, varvalue);
if (conf.getBoolVar(HiveConf.ConfVars.HIVECONFVALIDATION)) {
HiveConf.ConfVars confVars = HiveConf.getConfVars(key);
if (confVars != null) {
if (!confVars.isType(value)) {
StringBuilder message = new StringBuilder();
message.append("'SET ").append(varname).append('=').append(varvalue);
message.append("' FAILED because ").append(key).append(" expects ");
message.append(confVars.typeString()).append(" type value.");
throw new IllegalArgumentException(message.toString());
}
String fail = confVars.validate(value);
if (fail != null) {
StringBuilder message = new StringBuilder();
message.append("'SET ").append(varname).append('=').append(varvalue);
message.append("' FAILED in validation : ").append(fail).append('.');
throw new IllegalArgumentException(message.toString());
}
} else if (!removedConfigs.contains(key) && key.startsWith("hive.")) {
throw new IllegalArgumentException("hive configuration " + key + " does not exists.");
}
}
conf.verifyAndSet(key, value);
if (HiveConf.ConfVars.HIVE_EXECUTION_ENGINE.varname.equals(key)) {
if (!"spark".equals(value)) {
SessionState.get().closeSparkSession();
}
if ("mr".equals(value)) {
result = HiveConf.generateMrDeprecationWarning();
LOG.warn(result);
}
}
if (register) {
SessionState.get().getOverriddenConfigurations().put(key, value);
}
return result;
}
use of org.apache.hadoop.hive.conf.HiveVariableSource in project hive by apache.
the class Driver method compile.
// deferClose indicates if the close/destroy should be deferred when the process has been
// interrupted, it should be set to true if the compile is called within another method like
// runInternal, which defers the close to the called in that method.
private void compile(String command, boolean resetTaskIds, boolean deferClose) throws CommandProcessorResponse {
PerfLogger perfLogger = SessionState.getPerfLogger(true);
perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.DRIVER_RUN);
perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.COMPILE);
lDrvState.stateLock.lock();
try {
lDrvState.driverState = DriverState.COMPILING;
} finally {
lDrvState.stateLock.unlock();
}
command = new VariableSubstitution(new HiveVariableSource() {
@Override
public Map<String, String> getHiveVariable() {
return SessionState.get().getHiveVariables();
}
}).substitute(conf, command);
String queryStr = command;
try {
// command should be redacted to avoid to logging sensitive data
queryStr = HookUtils.redactLogString(conf, command);
} catch (Exception e) {
LOG.warn("WARNING! Query command could not be redacted." + e);
}
checkInterrupted("at beginning of compilation.", null, null);
if (ctx != null && ctx.getExplainAnalyze() != AnalyzeState.RUNNING) {
// close the existing ctx etc before compiling a new query, but does not destroy driver
closeInProcess(false);
}
if (resetTaskIds) {
TaskFactory.resetId();
}
LockedDriverState.setLockedDriverState(lDrvState);
String queryId = queryState.getQueryId();
if (ctx != null) {
setTriggerContext(queryId);
}
// save some info for webUI for use after plan is freed
this.queryDisplay.setQueryStr(queryStr);
this.queryDisplay.setQueryId(queryId);
LOG.info("Compiling command(queryId=" + queryId + "): " + queryStr);
conf.setQueryString(queryStr);
// FIXME: sideeffect will leave the last query set at the session level
SessionState.get().getConf().setQueryString(queryStr);
SessionState.get().setupQueryCurrentTimestamp();
// Whether any error occurred during query compilation. Used for query lifetime hook.
boolean compileError = false;
boolean parseError = false;
try {
// Initialize the transaction manager. This must be done before analyze is called.
if (initTxnMgr != null) {
queryTxnMgr = initTxnMgr;
} else {
queryTxnMgr = SessionState.get().initTxnMgr(conf);
}
if (queryTxnMgr instanceof Configurable) {
((Configurable) queryTxnMgr).setConf(conf);
}
queryState.setTxnManager(queryTxnMgr);
// In case when user Ctrl-C twice to kill Hive CLI JVM, we want to release locks
// if compile is being called multiple times, clear the old shutdownhook
ShutdownHookManager.removeShutdownHook(shutdownRunner);
final HiveTxnManager txnMgr = queryTxnMgr;
shutdownRunner = new Runnable() {
@Override
public void run() {
try {
releaseLocksAndCommitOrRollback(false, txnMgr);
} catch (LockException e) {
LOG.warn("Exception when releasing locks in ShutdownHook for Driver: " + e.getMessage());
}
}
};
ShutdownHookManager.addShutdownHook(shutdownRunner, SHUTDOWN_HOOK_PRIORITY);
checkInterrupted("before parsing and analysing the query", null, null);
if (ctx == null) {
ctx = new Context(conf);
setTriggerContext(queryId);
}
ctx.setRuntimeStatsSource(runtimeStatsSource);
ctx.setCmd(command);
ctx.setHDFSCleanup(true);
perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.PARSE);
// Trigger query hook before compilation
hookRunner.runBeforeParseHook(command);
ASTNode tree;
try {
tree = ParseUtils.parse(command, ctx);
} catch (ParseException e) {
parseError = true;
throw e;
} finally {
hookRunner.runAfterParseHook(command, parseError);
}
perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.PARSE);
hookRunner.runBeforeCompileHook(command);
perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.ANALYZE);
// Flush the metastore cache. This assures that we don't pick up objects from a previous
// query running in this same thread. This has to be done after we get our semantic
// analyzer (this is when the connection to the metastore is made) but before we analyze,
// because at that point we need access to the objects.
Hive.get().getMSC().flushCache();
BaseSemanticAnalyzer sem;
// Do semantic analysis and plan generation
if (hookRunner.hasPreAnalyzeHooks()) {
HiveSemanticAnalyzerHookContext hookCtx = new HiveSemanticAnalyzerHookContextImpl();
hookCtx.setConf(conf);
hookCtx.setUserName(userName);
hookCtx.setIpAddress(SessionState.get().getUserIpAddress());
hookCtx.setCommand(command);
hookCtx.setHiveOperation(queryState.getHiveOperation());
tree = hookRunner.runPreAnalyzeHooks(hookCtx, tree);
sem = SemanticAnalyzerFactory.get(queryState, tree);
openTransaction();
sem.analyze(tree, ctx);
hookCtx.update(sem);
hookRunner.runPostAnalyzeHooks(hookCtx, sem.getAllRootTasks());
} else {
sem = SemanticAnalyzerFactory.get(queryState, tree);
openTransaction();
sem.analyze(tree, ctx);
}
LOG.info("Semantic Analysis Completed");
// Retrieve information about cache usage for the query.
if (conf.getBoolVar(HiveConf.ConfVars.HIVE_QUERY_RESULTS_CACHE_ENABLED)) {
cacheUsage = sem.getCacheUsage();
}
// validate the plan
sem.validate();
perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.ANALYZE);
checkInterrupted("after analyzing query.", null, null);
// get the output schema
schema = getSchema(sem, conf);
plan = new QueryPlan(queryStr, sem, perfLogger.getStartTime(PerfLogger.DRIVER_RUN), queryId, queryState.getHiveOperation(), schema);
conf.set("mapreduce.workflow.id", "hive_" + queryId);
conf.set("mapreduce.workflow.name", queryStr);
// initialize FetchTask right here
if (plan.getFetchTask() != null) {
plan.getFetchTask().initialize(queryState, plan, null, ctx.getOpContext());
}
// do the authorization check
if (!sem.skipAuthorization() && HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED)) {
try {
perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.DO_AUTHORIZATION);
doAuthorization(queryState.getHiveOperation(), sem, command);
} catch (AuthorizationException authExp) {
console.printError("Authorization failed:" + authExp.getMessage() + ". Use SHOW GRANT to get more details.");
errorMessage = authExp.getMessage();
SQLState = "42000";
throw createProcessorResponse(403);
} finally {
perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.DO_AUTHORIZATION);
}
}
if (conf.getBoolVar(ConfVars.HIVE_LOG_EXPLAIN_OUTPUT)) {
String explainOutput = getExplainOutput(sem, plan, tree);
if (explainOutput != null) {
LOG.info("EXPLAIN output for queryid " + queryId + " : " + explainOutput);
if (conf.isWebUiQueryInfoCacheEnabled()) {
queryDisplay.setExplainPlan(explainOutput);
}
}
}
} catch (CommandProcessorResponse cpr) {
throw cpr;
} catch (Exception e) {
checkInterrupted("during query compilation: " + e.getMessage(), null, null);
compileError = true;
ErrorMsg error = ErrorMsg.getErrorMsg(e.getMessage());
errorMessage = "FAILED: " + e.getClass().getSimpleName();
if (error != ErrorMsg.GENERIC_ERROR) {
errorMessage += " [Error " + error.getErrorCode() + "]:";
}
// HIVE-4889
if ((e instanceof IllegalArgumentException) && e.getMessage() == null && e.getCause() != null) {
errorMessage += " " + e.getCause().getMessage();
} else {
errorMessage += " " + e.getMessage();
}
if (error == ErrorMsg.TXNMGR_NOT_ACID) {
errorMessage += ". Failed command: " + queryStr;
}
SQLState = error.getSQLState();
downstreamError = e;
console.printError(errorMessage, "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
throw createProcessorResponse(error.getErrorCode());
} finally {
// before/after execution hook will never be executed.
if (!parseError) {
try {
hookRunner.runAfterCompilationHook(command, compileError);
} catch (Exception e) {
LOG.warn("Failed when invoking query after-compilation hook.", e);
}
}
double duration = perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.COMPILE) / 1000.00;
ImmutableMap<String, Long> compileHMSTimings = dumpMetaCallTimingWithoutEx("compilation");
queryDisplay.setHmsTimings(QueryDisplay.Phase.COMPILATION, compileHMSTimings);
boolean isInterrupted = lDrvState.isAborted();
if (isInterrupted && !deferClose) {
closeInProcess(true);
}
lDrvState.stateLock.lock();
try {
if (isInterrupted) {
lDrvState.driverState = deferClose ? DriverState.EXECUTING : DriverState.ERROR;
} else {
lDrvState.driverState = compileError ? DriverState.ERROR : DriverState.COMPILED;
}
} finally {
lDrvState.stateLock.unlock();
}
if (isInterrupted) {
LOG.info("Compiling command(queryId=" + queryId + ") has been interrupted after " + duration + " seconds");
} else {
LOG.info("Completed compiling command(queryId=" + queryId + "); Time taken: " + duration + " seconds");
}
}
}
use of org.apache.hadoop.hive.conf.HiveVariableSource in project hive by apache.
the class ColumnStatsSemanticAnalyzer method genRewrittenQuery.
private String genRewrittenQuery(List<String> colNames, HiveConf conf, Map<String, String> partSpec, boolean isPartitionStats) throws SemanticException {
StringBuilder rewrittenQueryBuilder = new StringBuilder("select ");
for (int i = 0; i < colNames.size(); i++) {
if (i > 0) {
rewrittenQueryBuilder.append(" , ");
}
String func = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_STATS_NDV_ALGO).toLowerCase();
rewrittenQueryBuilder.append("compute_stats(`");
rewrittenQueryBuilder.append(escapeBackTicks(colNames.get(i)));
rewrittenQueryBuilder.append("`, '" + func + "'");
if ("fm".equals(func)) {
int numBitVectors = 0;
try {
numBitVectors = HiveStatsUtils.getNumBitVectorsForNDVEstimation(conf);
} catch (Exception e) {
throw new SemanticException(e.getMessage());
}
rewrittenQueryBuilder.append(", " + numBitVectors);
}
rewrittenQueryBuilder.append(')');
}
if (isPartitionStats) {
for (FieldSchema fs : tbl.getPartCols()) {
rewrittenQueryBuilder.append(" , `" + fs.getName() + "`");
}
}
rewrittenQueryBuilder.append(" from `");
rewrittenQueryBuilder.append(tbl.getDbName());
rewrittenQueryBuilder.append("`.");
rewrittenQueryBuilder.append("`" + tbl.getTableName() + "`");
isRewritten = true;
// query
if (isPartitionStats) {
rewrittenQueryBuilder.append(genPartitionClause(partSpec));
}
String rewrittenQuery = rewrittenQueryBuilder.toString();
rewrittenQuery = new VariableSubstitution(new HiveVariableSource() {
@Override
public Map<String, String> getHiveVariable() {
return SessionState.get().getHiveVariables();
}
}).substitute(conf, rewrittenQuery);
return rewrittenQuery;
}
use of org.apache.hadoop.hive.conf.HiveVariableSource in project hive by apache.
the class CliDriver method processCmd1.
public CommandProcessorResponse processCmd1(String cmd) throws CommandProcessorException {
CliSessionState ss = (CliSessionState) SessionState.get();
String cmd_trimmed = HiveStringUtils.removeComments(cmd).trim();
String[] tokens = tokenizeCmd(cmd_trimmed);
CommandProcessorResponse response = new CommandProcessorResponse();
if (cmd_trimmed.toLowerCase().equals("quit") || cmd_trimmed.toLowerCase().equals("exit")) {
// if we have come this far - either the previous commands
// are all successful or this is command line. in either case
// this counts as a successful run
ss.close();
System.exit(0);
} else if (tokens[0].equalsIgnoreCase("source")) {
String cmd_1 = getFirstCmd(cmd_trimmed, tokens[0].length());
cmd_1 = new VariableSubstitution(new HiveVariableSource() {
@Override
public Map<String, String> getHiveVariable() {
return SessionState.get().getHiveVariables();
}
}).substitute(ss.getConf(), cmd_1);
File sourceFile = new File(cmd_1);
if (!sourceFile.isFile()) {
console.printError("File: " + cmd_1 + " is not a file.");
throw new CommandProcessorException(1);
} else {
try {
response = processFile(cmd_1);
} catch (IOException e) {
console.printError("Failed processing file " + cmd_1 + " " + e.getLocalizedMessage(), stringifyException(e));
throw new CommandProcessorException(1);
}
}
} else if (cmd_trimmed.startsWith("!")) {
// for shell commands, use unstripped command
String shell_cmd = cmd.trim().substring(1);
shell_cmd = new VariableSubstitution(new HiveVariableSource() {
@Override
public Map<String, String> getHiveVariable() {
return SessionState.get().getHiveVariables();
}
}).substitute(ss.getConf(), shell_cmd);
// shell_cmd = "/bin/bash -c \'" + shell_cmd + "\'";
try {
ShellCmdExecutor executor = new ShellCmdExecutor(shell_cmd, ss.out, ss.err);
int responseCode = executor.execute();
if (responseCode != 0) {
console.printError("Command failed with exit code = " + response);
ss.resetThreadName();
throw new CommandProcessorException(responseCode);
}
response = new CommandProcessorResponse();
} catch (Exception e) {
console.printError("Exception raised from Shell command " + e.getLocalizedMessage(), stringifyException(e));
throw new CommandProcessorException(1);
}
} else {
// local mode
try {
try (CommandProcessor proc = CommandProcessorFactory.get(tokens, (HiveConf) conf)) {
if (proc instanceof IDriver) {
// Let Driver strip comments using sql parser
response = processLocalCmd(cmd, proc, ss);
} else {
response = processLocalCmd(cmd_trimmed, proc, ss);
}
}
} catch (SQLException e) {
console.printError("Failed processing command " + tokens[0] + " " + e.getLocalizedMessage(), org.apache.hadoop.util.StringUtils.stringifyException(e));
throw new CommandProcessorException(1);
} catch (CommandProcessorException e) {
throw e;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
return response;
}
use of org.apache.hadoop.hive.conf.HiveVariableSource in project hive by apache.
the class AddResourceProcessor method run.
@Override
public CommandProcessorResponse run(String command) throws CommandProcessorException {
SessionState ss = SessionState.get();
command = new VariableSubstitution(new HiveVariableSource() {
@Override
public Map<String, String> getHiveVariable() {
return SessionState.get().getHiveVariables();
}
}).substitute(ss.getConf(), command);
String[] tokens = command.split("\\s+");
SessionState.ResourceType t;
if (tokens.length < 2 || (t = SessionState.find_resource_type(tokens[0])) == null) {
console.printError("Usage: add [" + StringUtils.join(SessionState.ResourceType.values(), "|") + "] <value> [<value>]*");
throw new CommandProcessorException(1);
}
CommandProcessorResponse authErrResp = CommandUtil.authorizeCommand(ss, HiveOperationType.ADD, Arrays.asList(tokens));
if (authErrResp != null) {
// there was an authorization issue
return authErrResp;
}
try {
ss.add_resources(t, Arrays.asList(Arrays.copyOfRange(tokens, 1, tokens.length)));
} catch (Exception e) {
throw new CommandProcessorException(e);
}
return new CommandProcessorResponse();
}
Aggregations