Search in sources :

Example 66 with HiveConf

use of org.apache.hadoop.hive.conf.HiveConf in project hive by apache.

the class CheckTableAccessHook method run.

public void run(HookContext hookContext) {
    HiveConf conf = hookContext.getConf();
    if (conf.getBoolVar(HiveConf.ConfVars.HIVE_STATS_COLLECT_TABLEKEYS) == false) {
        return;
    }
    QueryPlan plan = hookContext.getQueryPlan();
    if (plan == null) {
        return;
    }
    TableAccessInfo tableAccessInfo = hookContext.getQueryPlan().getTableAccessInfo();
    if (tableAccessInfo == null || tableAccessInfo.getOperatorToTableAccessMap() == null || tableAccessInfo.getOperatorToTableAccessMap().isEmpty()) {
        return;
    }
    LogHelper console = SessionState.getConsole();
    Map<Operator<? extends OperatorDesc>, Map<String, List<String>>> operatorToTableAccessMap = tableAccessInfo.getOperatorToTableAccessMap();
    // Must be deterministic order map for consistent q-test output across Java versions
    Map<String, String> outputOrderedMap = new LinkedHashMap<String, String>();
    for (Map.Entry<Operator<? extends OperatorDesc>, Map<String, List<String>>> tableAccess : operatorToTableAccessMap.entrySet()) {
        StringBuilder perOperatorInfo = new StringBuilder();
        perOperatorInfo.append("Operator:").append(tableAccess.getKey().getOperatorId()).append("\n");
        for (Map.Entry<String, List<String>> entry : tableAccess.getValue().entrySet()) {
            perOperatorInfo.append("Table:").append(entry.getKey()).append("\n");
            perOperatorInfo.append("Keys:").append(StringUtils.join(entry.getValue(), ',')).append("\n");
        }
        outputOrderedMap.put(tableAccess.getKey().getOperatorId(), perOperatorInfo.toString());
    }
    for (String perOperatorInfo : outputOrderedMap.values()) {
        console.printError(perOperatorInfo);
    }
}
Also used : Operator(org.apache.hadoop.hive.ql.exec.Operator) LogHelper(org.apache.hadoop.hive.ql.session.SessionState.LogHelper) QueryPlan(org.apache.hadoop.hive.ql.QueryPlan) LinkedHashMap(java.util.LinkedHashMap) TableAccessInfo(org.apache.hadoop.hive.ql.parse.TableAccessInfo) HiveConf(org.apache.hadoop.hive.conf.HiveConf) List(java.util.List) OperatorDesc(org.apache.hadoop.hive.ql.plan.OperatorDesc) LinkedHashMap(java.util.LinkedHashMap) Map(java.util.Map)

Example 67 with HiveConf

use of org.apache.hadoop.hive.conf.HiveConf in project hive by apache.

the class CommandProcessorFactory method getForHiveCommandInternal.

public static CommandProcessor getForHiveCommandInternal(String[] cmd, HiveConf conf, boolean testOnly) throws SQLException {
    HiveCommand hiveCommand = HiveCommand.find(cmd, testOnly);
    if (hiveCommand == null || isBlank(cmd[0])) {
        return null;
    }
    if (conf == null) {
        conf = new HiveConf();
    }
    Set<String> availableCommands = new HashSet<String>();
    for (String availableCommand : conf.getVar(HiveConf.ConfVars.HIVE_SECURITY_COMMAND_WHITELIST).split(",")) {
        availableCommands.add(availableCommand.toLowerCase().trim());
    }
    if (!availableCommands.contains(cmd[0].trim().toLowerCase())) {
        throw new SQLException("Insufficient privileges to execute " + cmd[0], "42000");
    }
    if (cmd.length > 1 && "reload".equalsIgnoreCase(cmd[0]) && "function".equalsIgnoreCase(cmd[1])) {
        // special handling for SQL "reload function"
        return null;
    }
    switch(hiveCommand) {
        case SET:
            return new SetProcessor();
        case RESET:
            return new ResetProcessor();
        case DFS:
            SessionState ss = SessionState.get();
            return new DfsProcessor(ss.getConf());
        case ADD:
            return new AddResourceProcessor();
        case LIST:
            return new ListResourceProcessor();
        case DELETE:
            return new DeleteResourceProcessor();
        case COMPILE:
            return new CompileProcessor();
        case RELOAD:
            return new ReloadProcessor();
        case CRYPTO:
            try {
                return new CryptoProcessor(SessionState.get().getHdfsEncryptionShim(), conf);
            } catch (HiveException e) {
                throw new SQLException("Fail to start the command processor due to the exception: ", e);
            }
        default:
            throw new AssertionError("Unknown HiveCommand " + hiveCommand);
    }
}
Also used : SessionState(org.apache.hadoop.hive.ql.session.SessionState) SQLException(java.sql.SQLException) HiveConf(org.apache.hadoop.hive.conf.HiveConf) HashSet(java.util.HashSet)

Example 68 with HiveConf

use of org.apache.hadoop.hive.conf.HiveConf in project hive by apache.

the class SetProcessor method setConf.

/**
   * @return A console message that is not strong enough to fail the command (e.g. deprecation).
   */
static String setConf(String varname, String key, String varvalue, boolean register) throws IllegalArgumentException {
    String result = null;
    HiveConf conf = SessionState.get().getConf();
    String value = new VariableSubstitution(new HiveVariableSource() {

        @Override
        public Map<String, String> getHiveVariable() {
            return SessionState.get().getHiveVariables();
        }
    }).substitute(conf, varvalue);
    if (conf.getBoolVar(HiveConf.ConfVars.HIVECONFVALIDATION)) {
        HiveConf.ConfVars confVars = HiveConf.getConfVars(key);
        if (confVars != null) {
            if (!confVars.isType(value)) {
                StringBuilder message = new StringBuilder();
                message.append("'SET ").append(varname).append('=').append(varvalue);
                message.append("' FAILED because ").append(key).append(" expects ");
                message.append(confVars.typeString()).append(" type value.");
                throw new IllegalArgumentException(message.toString());
            }
            String fail = confVars.validate(value);
            if (fail != null) {
                StringBuilder message = new StringBuilder();
                message.append("'SET ").append(varname).append('=').append(varvalue);
                message.append("' FAILED in validation : ").append(fail).append('.');
                throw new IllegalArgumentException(message.toString());
            }
        } else if (!removedConfigs.contains(key) && key.startsWith("hive.")) {
            throw new IllegalArgumentException("hive configuration " + key + " does not exists.");
        }
    }
    conf.verifyAndSet(key, value);
    if (HiveConf.ConfVars.HIVE_EXECUTION_ENGINE.varname.equals(key)) {
        if (!"spark".equals(value)) {
            SessionState.get().closeSparkSession();
        }
        if ("mr".equals(value)) {
            result = HiveConf.generateMrDeprecationWarning();
            LOG.warn(result);
        }
    }
    if (register) {
        SessionState.get().getOverriddenConfigurations().put(key, value);
    }
    return result;
}
Also used : VariableSubstitution(org.apache.hadoop.hive.conf.VariableSubstitution) HiveVariableSource(org.apache.hadoop.hive.conf.HiveVariableSource) HiveConf(org.apache.hadoop.hive.conf.HiveConf) MetadataTypedColumnsetSerDe.defaultNullString(org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe.defaultNullString)

Example 69 with HiveConf

use of org.apache.hadoop.hive.conf.HiveConf in project hive by apache.

the class ResourceMaps method createSessionDirs.

/**
   * Create dirs & session paths for this session:
   * 1. HDFS scratch dir
   * 2. Local scratch dir
   * 3. Local downloaded resource dir
   * 4. HDFS session path
   * 5. hold a lock file in HDFS session dir to indicate the it is in use
   * 6. Local session path
   * 7. HDFS temp table space
   * @param userName
   * @throws IOException
   */
private void createSessionDirs(String userName) throws IOException {
    HiveConf conf = getConf();
    Path rootHDFSDirPath = createRootHDFSDir(conf);
    // Now create session specific dirs
    String scratchDirPermission = HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIRPERMISSION);
    Path path;
    // 1. HDFS scratch dir
    path = new Path(rootHDFSDirPath, userName);
    hdfsScratchDirURIString = path.toUri().toString();
    createPath(conf, path, scratchDirPermission, false, false);
    // 2. Local scratch dir
    path = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.LOCALSCRATCHDIR));
    createPath(conf, path, scratchDirPermission, true, false);
    // 3. Download resources dir
    path = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.DOWNLOADED_RESOURCES_DIR));
    createPath(conf, path, scratchDirPermission, true, false);
    // Finally, create session paths for this session
    // Local & non-local tmp location is configurable. however it is the same across
    // all external file systems
    String sessionId = getSessionId();
    // 4. HDFS session path
    hdfsSessionPath = new Path(hdfsScratchDirURIString, sessionId);
    createPath(conf, hdfsSessionPath, scratchDirPermission, false, true);
    conf.set(HDFS_SESSION_PATH_KEY, hdfsSessionPath.toUri().toString());
    // 5. hold a lock file in HDFS session dir to indicate the it is in use
    if (conf.getBoolVar(HiveConf.ConfVars.HIVE_SCRATCH_DIR_LOCK)) {
        FileSystem fs = hdfsSessionPath.getFileSystem(conf);
        FSDataOutputStream hdfsSessionPathInfoFile = fs.create(new Path(hdfsSessionPath, INFO_FILE_NAME), true);
        hdfsSessionPathInfoFile.writeUTF("process: " + ManagementFactory.getRuntimeMXBean().getName() + "\n");
        hdfsSessionPathInfoFile.close();
        hdfsSessionPathLockFile = fs.create(new Path(hdfsSessionPath, LOCK_FILE_NAME), true);
    }
    // 6. Local session path
    localSessionPath = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.LOCALSCRATCHDIR), sessionId);
    createPath(conf, localSessionPath, scratchDirPermission, true, true);
    conf.set(LOCAL_SESSION_PATH_KEY, localSessionPath.toUri().toString());
    // 7. HDFS temp table space
    hdfsTmpTableSpace = new Path(hdfsSessionPath, TMP_PREFIX);
    // This is a sub-dir under the hdfsSessionPath. Will be removed along with that dir.
    // Don't register with deleteOnExit
    createPath(conf, hdfsTmpTableSpace, scratchDirPermission, false, false);
    conf.set(TMP_TABLE_SPACE_KEY, hdfsTmpTableSpace.toUri().toString());
}
Also used : Path(org.apache.hadoop.fs.Path) FileSystem(org.apache.hadoop.fs.FileSystem) HiveConf(org.apache.hadoop.hive.conf.HiveConf) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream)

Example 70 with HiveConf

use of org.apache.hadoop.hive.conf.HiveConf in project hive by apache.

the class TestHBaseStoreBitVector method init.

@Before
public void init() throws IOException {
    MockitoAnnotations.initMocks(this);
    HiveConf conf = new HiveConf();
    conf.setBoolean(HBaseReadWrite.NO_CACHE_CONF, true);
    store = MockUtils.init(conf, htable, rows);
}
Also used : HiveConf(org.apache.hadoop.hive.conf.HiveConf) Before(org.junit.Before)

Aggregations

HiveConf (org.apache.hadoop.hive.conf.HiveConf)404 BeforeClass (org.junit.BeforeClass)73 Test (org.junit.Test)66 Path (org.apache.hadoop.fs.Path)54 Before (org.junit.Before)50 Driver (org.apache.hadoop.hive.ql.Driver)46 CliSessionState (org.apache.hadoop.hive.cli.CliSessionState)44 IOException (java.io.IOException)39 ArrayList (java.util.ArrayList)37 File (java.io.File)31 HashMap (java.util.HashMap)26 FileSystem (org.apache.hadoop.fs.FileSystem)26 SessionState (org.apache.hadoop.hive.ql.session.SessionState)22 LinkedHashMap (java.util.LinkedHashMap)17 List (java.util.List)16 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)15 MiniHS2 (org.apache.hive.jdbc.miniHS2.MiniHS2)14 Map (java.util.Map)12 HiveMetaStoreClient (org.apache.hadoop.hive.metastore.HiveMetaStoreClient)12 MetaException (org.apache.hadoop.hive.metastore.api.MetaException)12