Search in sources :

Example 86 with SessionState

use of org.apache.hadoop.hive.ql.session.SessionState in project hive by apache.

the class TestAcidOnTez method setUp.

@Before
public void setUp() throws Exception {
    hiveConf = new HiveConf(this.getClass());
    hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
    hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
    hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, TEST_WAREHOUSE_DIR);
    hiveConf.setVar(HiveConf.ConfVars.HIVEMAPREDMODE, "nonstrict");
    hiveConf.setVar(HiveConf.ConfVars.HIVEINPUTFORMAT, HiveInputFormat.class.getName());
    hiveConf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory");
    TxnDbUtil.setConfValues(hiveConf);
    hiveConf.setInt(MRJobConfig.MAP_MEMORY_MB, 1024);
    hiveConf.setInt(MRJobConfig.REDUCE_MEMORY_MB, 1024);
    TxnDbUtil.prepDb(hiveConf);
    File f = new File(TEST_WAREHOUSE_DIR);
    if (f.exists()) {
        FileUtil.fullyDelete(f);
    }
    if (!(new File(TEST_WAREHOUSE_DIR).mkdirs())) {
        throw new RuntimeException("Could not create " + TEST_WAREHOUSE_DIR);
    }
    SessionState.start(new SessionState(hiveConf));
    d = DriverFactory.newDriver(hiveConf);
    dropTables();
    runStatementOnDriver("create table " + Table.ACIDTBL + "(a int, b int) clustered by (a) into " + BUCKET_COUNT + " buckets stored as orc " + getTblProperties());
    runStatementOnDriver("create table " + Table.ACIDTBLPART + "(a int, b int) partitioned by (p string) clustered by (a) into " + BUCKET_COUNT + " buckets stored as orc " + getTblProperties());
    runStatementOnDriver("create table " + Table.NONACIDORCTBL + "(a int, b int) clustered by (a) into " + BUCKET_COUNT + " buckets stored as orc ");
    runStatementOnDriver("create table " + Table.NONACIDPART + "(a int, b int) partitioned by (p string) stored as orc ");
    runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) values(1,2)");
    runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) values(3,4)");
    runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) values(5,6)");
    runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) values(7,8)");
    runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) values(9,10)");
    runStatementOnDriver("insert into " + Table.NONACIDORCTBL + "(a,b) values(1,2),(3,4),(5,6),(7,8),(9,10)");
}
Also used : HiveInputFormat(org.apache.hadoop.hive.ql.io.HiveInputFormat) SessionState(org.apache.hadoop.hive.ql.session.SessionState) HiveConf(org.apache.hadoop.hive.conf.HiveConf) File(java.io.File) Before(org.junit.Before)

Example 87 with SessionState

use of org.apache.hadoop.hive.ql.session.SessionState in project hive by apache.

the class Hive method createTable.

/**
 * Creates the table with the given objects. It takes additional arguments for
 * primary keys and foreign keys associated with the table.
 *
 * @param tbl
 *          a table object
 * @param ifNotExists
 *          if true, ignore AlreadyExistsException
 * @param primaryKeys
 *          primary key columns associated with the table
 * @param foreignKeys
 *          foreign key columns associated with the table
 * @param uniqueConstraints
 *          UNIQUE constraints associated with the table
 * @param notNullConstraints
 *          NOT NULL constraints associated with the table
 * @param defaultConstraints
 *          DEFAULT constraints associated with the table
 * @param checkConstraints
 *          CHECK constraints associated with the table
 * @throws HiveException
 */
public void createTable(Table tbl, boolean ifNotExists, List<SQLPrimaryKey> primaryKeys, List<SQLForeignKey> foreignKeys, List<SQLUniqueConstraint> uniqueConstraints, List<SQLNotNullConstraint> notNullConstraints, List<SQLDefaultConstraint> defaultConstraints, List<SQLCheckConstraint> checkConstraints) throws HiveException {
    try {
        if (tbl.getDbName() == null || "".equals(tbl.getDbName().trim())) {
            tbl.setDbName(SessionState.get().getCurrentDatabase());
        }
        if (tbl.getCols().size() == 0 || tbl.getSd().getColsSize() == 0) {
            tbl.setFields(HiveMetaStoreUtils.getFieldsFromDeserializer(tbl.getTableName(), tbl.getDeserializer()));
        }
        tbl.checkValidity(conf);
        if (tbl.getParameters() != null) {
            tbl.getParameters().remove(hive_metastoreConstants.DDL_TIME);
        }
        org.apache.hadoop.hive.metastore.api.Table tTbl = tbl.getTTable();
        PrincipalPrivilegeSet principalPrivs = new PrincipalPrivilegeSet();
        SessionState ss = SessionState.get();
        if (ss != null) {
            CreateTableAutomaticGrant grants = ss.getCreateTableGrants();
            if (grants != null) {
                principalPrivs.setUserPrivileges(grants.getUserGrants());
                principalPrivs.setGroupPrivileges(grants.getGroupGrants());
                principalPrivs.setRolePrivileges(grants.getRoleGrants());
                tTbl.setPrivileges(principalPrivs);
            }
        }
        if (primaryKeys == null && foreignKeys == null && uniqueConstraints == null && notNullConstraints == null && defaultConstraints == null && checkConstraints == null) {
            getMSC().createTable(tTbl);
        } else {
            getMSC().createTableWithConstraints(tTbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints);
        }
    } catch (AlreadyExistsException e) {
        if (!ifNotExists) {
            throw new HiveException(e);
        }
    } catch (Exception e) {
        throw new HiveException(e);
    }
}
Also used : SessionState(org.apache.hadoop.hive.ql.session.SessionState) AlreadyExistsException(org.apache.hadoop.hive.metastore.api.AlreadyExistsException) PrincipalPrivilegeSet(org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet) CreateTableAutomaticGrant(org.apache.hadoop.hive.ql.session.CreateTableAutomaticGrant) AlreadyExistsException(org.apache.hadoop.hive.metastore.api.AlreadyExistsException) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) TException(org.apache.thrift.TException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) SerDeException(org.apache.hadoop.hive.serde2.SerDeException) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) HiveMetaException(org.apache.hadoop.hive.metastore.HiveMetaException) FileNotFoundException(java.io.FileNotFoundException) JDODataStoreException(javax.jdo.JDODataStoreException)

Example 88 with SessionState

use of org.apache.hadoop.hive.ql.session.SessionState in project hive by apache.

the class Hive method loadDynamicPartitions.

/**
 * Given a source directory name of the load path, load all dynamically generated partitions
 * into the specified table and return a list of strings that represent the dynamic partition
 * paths.
 * @param loadPath
 * @param tableName
 * @param partSpec
 * @param loadFileType
 * @param numDP number of dynamic partitions
 * @param isAcid true if this is an ACID operation
 * @param writeId writeId, can be 0 unless isAcid == true
 * @return partition map details (PartitionSpec and Partition)
 * @throws HiveException
 */
public Map<Map<String, String>, Partition> loadDynamicPartitions(final Path loadPath, final String tableName, final Map<String, String> partSpec, final LoadFileType loadFileType, final int numDP, final int numLB, final boolean isAcid, final long writeId, final int stmtId, final boolean hasFollowingStatsTask, final AcidUtils.Operation operation, boolean isInsertOverwrite) throws HiveException {
    final Map<Map<String, String>, Partition> partitionsMap = Collections.synchronizedMap(new LinkedHashMap<Map<String, String>, Partition>());
    int poolSize = conf.getInt(ConfVars.HIVE_LOAD_DYNAMIC_PARTITIONS_THREAD_COUNT.varname, 1);
    final ExecutorService pool = Executors.newFixedThreadPool(poolSize, new ThreadFactoryBuilder().setDaemon(true).setNameFormat("load-dynamic-partitions-%d").build());
    // Get all valid partition paths and existing partitions for them (if any)
    final Table tbl = getTable(tableName);
    final Set<Path> validPartitions = getValidPartitionsInPath(numDP, numLB, loadPath, writeId, stmtId, AcidUtils.isInsertOnlyTable(tbl.getParameters()), isInsertOverwrite);
    final int partsToLoad = validPartitions.size();
    final AtomicInteger partitionsLoaded = new AtomicInteger(0);
    final boolean inPlaceEligible = conf.getLong("fs.trash.interval", 0) <= 0 && InPlaceUpdate.canRenderInPlace(conf) && !SessionState.getConsole().getIsSilent();
    final PrintStream ps = (inPlaceEligible) ? SessionState.getConsole().getInfoStream() : null;
    final SessionState parentSession = SessionState.get();
    final List<Future<Void>> futures = Lists.newLinkedList();
    try {
        // for each dynamically created DP directory, construct a full partition spec
        // and load the partition based on that
        final Map<Long, RawStore> rawStoreMap = new ConcurrentHashMap<>();
        for (final Path partPath : validPartitions) {
            // generate a full partition specification
            final LinkedHashMap<String, String> fullPartSpec = Maps.newLinkedHashMap(partSpec);
            if (!Warehouse.makeSpecFromName(fullPartSpec, partPath, new HashSet<String>(partSpec.keySet()))) {
                Utilities.FILE_OP_LOGGER.warn("Ignoring invalid DP directory " + partPath);
                continue;
            }
            futures.add(pool.submit(new Callable<Void>() {

                @Override
                public Void call() throws Exception {
                    try {
                        // move file would require session details (needCopy() invokes SessionState.get)
                        SessionState.setCurrentSessionState(parentSession);
                        LOG.info("New loading path = " + partPath + " with partSpec " + fullPartSpec);
                        // load the partition
                        Partition newPartition = loadPartition(partPath, tbl, fullPartSpec, loadFileType, true, numLB > 0, false, isAcid, hasFollowingStatsTask, writeId, stmtId);
                        partitionsMap.put(fullPartSpec, newPartition);
                        if (inPlaceEligible) {
                            synchronized (ps) {
                                InPlaceUpdate.rePositionCursor(ps);
                                partitionsLoaded.incrementAndGet();
                                InPlaceUpdate.reprintLine(ps, "Loaded : " + partitionsLoaded.get() + "/" + partsToLoad + " partitions.");
                            }
                        }
                        // Add embedded rawstore, so we can cleanup later to avoid memory leak
                        if (getMSC().isLocalMetaStore()) {
                            if (!rawStoreMap.containsKey(Thread.currentThread().getId())) {
                                rawStoreMap.put(Thread.currentThread().getId(), HiveMetaStore.HMSHandler.getRawStore());
                            }
                        }
                        return null;
                    } catch (Exception t) {
                        LOG.error("Exception when loading partition with parameters " + " partPath=" + partPath + ", " + " table=" + tbl.getTableName() + ", " + " partSpec=" + fullPartSpec + ", " + " loadFileType=" + loadFileType.toString() + ", " + " listBucketingLevel=" + numLB + ", " + " isAcid=" + isAcid + ", " + " hasFollowingStatsTask=" + hasFollowingStatsTask, t);
                        throw t;
                    }
                }
            }));
        }
        pool.shutdown();
        LOG.debug("Number of partitions to be added is " + futures.size());
        for (Future future : futures) {
            future.get();
        }
        rawStoreMap.forEach((k, rs) -> rs.shutdown());
    } catch (InterruptedException | ExecutionException e) {
        LOG.debug("Cancelling " + futures.size() + " dynamic loading tasks");
        // cancel other futures
        for (Future future : futures) {
            future.cancel(true);
        }
        throw new HiveException("Exception when loading " + partsToLoad + " in table " + tbl.getTableName() + " with loadPath=" + loadPath, e);
    }
    try {
        if (isAcid) {
            List<String> partNames = new ArrayList<>(partitionsMap.size());
            for (Partition p : partitionsMap.values()) {
                partNames.add(p.getName());
            }
            getMSC().addDynamicPartitions(parentSession.getTxnMgr().getCurrentTxnId(), writeId, tbl.getDbName(), tbl.getTableName(), partNames, AcidUtils.toDataOperationType(operation));
        }
        LOG.info("Loaded " + partitionsMap.size() + " partitions");
        return partitionsMap;
    } catch (TException te) {
        throw new HiveException("Exception updating metastore for acid table " + tableName + " with partitions " + partitionsMap.values(), te);
    }
}
Also used : TException(org.apache.thrift.TException) SessionState(org.apache.hadoop.hive.ql.session.SessionState) ArrayList(java.util.ArrayList) RawStore(org.apache.hadoop.hive.metastore.RawStore) Callable(java.util.concurrent.Callable) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ExecutionException(java.util.concurrent.ExecutionException) LinkedHashSet(java.util.LinkedHashSet) HashSet(java.util.HashSet) Path(org.apache.hadoop.fs.Path) PrintStream(java.io.PrintStream) RelOptHiveTable(org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable) SQLUniqueConstraint(org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint) SQLCheckConstraint(org.apache.hadoop.hive.metastore.api.SQLCheckConstraint) SQLNotNullConstraint(org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint) SQLDefaultConstraint(org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint) AlreadyExistsException(org.apache.hadoop.hive.metastore.api.AlreadyExistsException) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) TException(org.apache.thrift.TException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) SerDeException(org.apache.hadoop.hive.serde2.SerDeException) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) HiveMetaException(org.apache.hadoop.hive.metastore.HiveMetaException) FileNotFoundException(java.io.FileNotFoundException) JDODataStoreException(javax.jdo.JDODataStoreException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ExecutorService(java.util.concurrent.ExecutorService) Future(java.util.concurrent.Future) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) ImmutableMap(com.google.common.collect.ImmutableMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap)

Example 89 with SessionState

use of org.apache.hadoop.hive.ql.session.SessionState in project hive by apache.

the class Hive method moveFile.

// it is assumed that parent directory of the destf should already exist when this
// method is called. when the replace value is true, this method works a little different
// from mv command if the destf is a directory, it replaces the destf instead of moving under
// the destf. in this case, the replaced destf still preserves the original destf's permission
public static boolean moveFile(final HiveConf conf, Path srcf, final Path destf, boolean replace, boolean isSrcLocal) throws HiveException {
    final FileSystem srcFs, destFs;
    try {
        destFs = destf.getFileSystem(conf);
    } catch (IOException e) {
        LOG.error("Failed to get dest fs", e);
        throw new HiveException(e.getMessage(), e);
    }
    try {
        srcFs = srcf.getFileSystem(conf);
    } catch (IOException e) {
        LOG.error("Failed to get src fs", e);
        throw new HiveException(e.getMessage(), e);
    }
    HdfsUtils.HadoopFileStatus destStatus = null;
    // If source path is a subdirectory of the destination path (or the other way around):
    // ex: INSERT OVERWRITE DIRECTORY 'target/warehouse/dest4.out' SELECT src.value WHERE src.key >= 300;
    // where the staging directory is a subdirectory of the destination directory
    // (1) Do not delete the dest dir before doing the move operation.
    // (2) It is assumed that subdir and dir are in same encryption zone.
    // (3) Move individual files from scr dir to dest dir.
    boolean srcIsSubDirOfDest = isSubDir(srcf, destf, srcFs, destFs, isSrcLocal), destIsSubDirOfSrc = isSubDir(destf, srcf, destFs, srcFs, false);
    final String msg = "Unable to move source " + srcf + " to destination " + destf;
    try {
        if (replace) {
            try {
                destStatus = new HdfsUtils.HadoopFileStatus(conf, destFs, destf);
                // to delete the file first
                if (replace && !srcIsSubDirOfDest) {
                    destFs.delete(destf, true);
                    LOG.debug("The path " + destf.toString() + " is deleted");
                }
            } catch (FileNotFoundException ignore) {
            }
        }
        final HdfsUtils.HadoopFileStatus desiredStatus = destStatus;
        final SessionState parentSession = SessionState.get();
        if (isSrcLocal) {
            // For local src file, copy to hdfs
            destFs.copyFromLocalFile(srcf, destf);
            return true;
        } else {
            if (needToCopy(srcf, destf, srcFs, destFs)) {
                // copy if across file system or encryption zones.
                LOG.debug("Copying source " + srcf + " to " + destf + " because HDFS encryption zones are different.");
                return FileUtils.copy(srcf.getFileSystem(conf), srcf, destf.getFileSystem(conf), destf, // delete source
                true, // overwrite destination
                replace, conf);
            } else {
                if (srcIsSubDirOfDest || destIsSubDirOfSrc) {
                    FileStatus[] srcs = destFs.listStatus(srcf, FileUtils.HIDDEN_FILES_PATH_FILTER);
                    List<Future<Void>> futures = new LinkedList<>();
                    final ExecutorService pool = conf.getInt(ConfVars.HIVE_MOVE_FILES_THREAD_COUNT.varname, 25) > 0 ? Executors.newFixedThreadPool(conf.getInt(ConfVars.HIVE_MOVE_FILES_THREAD_COUNT.varname, 25), new ThreadFactoryBuilder().setDaemon(true).setNameFormat("Move-Thread-%d").build()) : null;
                    if (destIsSubDirOfSrc && !destFs.exists(destf)) {
                        if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
                            Utilities.FILE_OP_LOGGER.trace("Creating " + destf);
                        }
                        destFs.mkdirs(destf);
                    }
                    /* Move files one by one because source is a subdirectory of destination */
                    for (final FileStatus srcStatus : srcs) {
                        final Path destFile = new Path(destf, srcStatus.getPath().getName());
                        final String poolMsg = "Unable to move source " + srcStatus.getPath() + " to destination " + destFile;
                        if (null == pool) {
                            boolean success = false;
                            if (destFs instanceof DistributedFileSystem) {
                                ((DistributedFileSystem) destFs).rename(srcStatus.getPath(), destFile, Options.Rename.OVERWRITE);
                                success = true;
                            } else {
                                destFs.delete(destFile, false);
                                success = destFs.rename(srcStatus.getPath(), destFile);
                            }
                            if (!success) {
                                throw new IOException("rename for src path: " + srcStatus.getPath() + " to dest:" + destf + " returned false");
                            }
                        } else {
                            futures.add(pool.submit(new Callable<Void>() {

                                @Override
                                public Void call() throws HiveException {
                                    SessionState.setCurrentSessionState(parentSession);
                                    final String group = srcStatus.getGroup();
                                    try {
                                        boolean success = false;
                                        if (destFs instanceof DistributedFileSystem) {
                                            ((DistributedFileSystem) destFs).rename(srcStatus.getPath(), destFile, Options.Rename.OVERWRITE);
                                            success = true;
                                        } else {
                                            destFs.delete(destFile, false);
                                            success = destFs.rename(srcStatus.getPath(), destFile);
                                        }
                                        if (!success) {
                                            throw new IOException("rename for src path: " + srcStatus.getPath() + " to dest path:" + destFile + " returned false");
                                        }
                                    } catch (Exception e) {
                                        throw getHiveException(e, poolMsg);
                                    }
                                    return null;
                                }
                            }));
                        }
                    }
                    if (null != pool) {
                        pool.shutdown();
                        for (Future<Void> future : futures) {
                            try {
                                future.get();
                            } catch (Exception e) {
                                throw handlePoolException(pool, e);
                            }
                        }
                    }
                    return true;
                } else {
                    if (destFs.rename(srcf, destf)) {
                        return true;
                    }
                    return false;
                }
            }
        }
    } catch (Exception e) {
        throw getHiveException(e, msg);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) SessionState(org.apache.hadoop.hive.ql.session.SessionState) FileStatus(org.apache.hadoop.fs.FileStatus) FileNotFoundException(java.io.FileNotFoundException) IOException(java.io.IOException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) LinkedList(java.util.LinkedList) Callable(java.util.concurrent.Callable) AlreadyExistsException(org.apache.hadoop.hive.metastore.api.AlreadyExistsException) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) TException(org.apache.thrift.TException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) SerDeException(org.apache.hadoop.hive.serde2.SerDeException) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) HiveMetaException(org.apache.hadoop.hive.metastore.HiveMetaException) FileNotFoundException(java.io.FileNotFoundException) JDODataStoreException(javax.jdo.JDODataStoreException) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) ExecutorService(java.util.concurrent.ExecutorService) HdfsUtils(org.apache.hadoop.hive.io.HdfsUtils) Future(java.util.concurrent.Future) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder)

Example 90 with SessionState

use of org.apache.hadoop.hive.ql.session.SessionState in project hive by apache.

the class SessionHiveMetaStoreClient method updateTempTableColumnStats.

private boolean updateTempTableColumnStats(String dbName, String tableName, ColumnStatistics colStats) throws MetaException {
    SessionState ss = SessionState.get();
    if (ss == null) {
        throw new MetaException("No current SessionState, cannot update temporary table stats for " + StatsUtils.getFullyQualifiedTableName(dbName, tableName));
    }
    Map<String, ColumnStatisticsObj> ssTableColStats = getTempTableColumnStatsForTable(dbName, tableName);
    if (ssTableColStats == null) {
        // Add new entry for this table
        ssTableColStats = new HashMap<String, ColumnStatisticsObj>();
        ss.getTempTableColStats().put(StatsUtils.getFullyQualifiedTableName(dbName, tableName), ssTableColStats);
    }
    mergeColumnStats(ssTableColStats, colStats);
    List<String> colNames = new ArrayList<>();
    for (ColumnStatisticsObj obj : colStats.getStatsObj()) {
        colNames.add(obj.getColName());
    }
    org.apache.hadoop.hive.metastore.api.Table table = getTempTable(dbName, tableName);
    StatsSetupConst.setColumnStatsState(table.getParameters(), colNames);
    return true;
}
Also used : SessionState(org.apache.hadoop.hive.ql.session.SessionState) ColumnStatisticsObj(org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj) ArrayList(java.util.ArrayList) MetaException(org.apache.hadoop.hive.metastore.api.MetaException)

Aggregations

SessionState (org.apache.hadoop.hive.ql.session.SessionState)112 IOException (java.io.IOException)28 HiveConf (org.apache.hadoop.hive.conf.HiveConf)22 ArrayList (java.util.ArrayList)14 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)14 Path (org.apache.hadoop.fs.Path)13 MetaException (org.apache.hadoop.hive.metastore.api.MetaException)13 LinkedList (java.util.LinkedList)12 CliSessionState (org.apache.hadoop.hive.cli.CliSessionState)12 File (java.io.File)11 FileNotFoundException (java.io.FileNotFoundException)11 Map (java.util.Map)11 Test (org.junit.Test)10 PrintStream (java.io.PrintStream)9 ExecutionException (java.util.concurrent.ExecutionException)9 HashMap (java.util.HashMap)8 LinkedHashMap (java.util.LinkedHashMap)7 SerDeException (org.apache.hadoop.hive.serde2.SerDeException)7 ThreadFactoryBuilder (com.google.common.util.concurrent.ThreadFactoryBuilder)6 Callable (java.util.concurrent.Callable)6