Search in sources :

Example 91 with HiveConf

use of org.apache.hadoop.hive.conf.HiveConf in project hive by apache.

the class ZooKeeperHiveLockManager method setContext.

/**
   * @param ctx  The lock manager context (containing the Hive configuration file)
   * Start the ZooKeeper client based on the zookeeper cluster specified in the conf.
   **/
@Override
public void setContext(HiveLockManagerCtx ctx) throws LockException {
    this.ctx = ctx;
    HiveConf conf = ctx.getConf();
    sleepTime = conf.getTimeVar(HiveConf.ConfVars.HIVE_LOCK_SLEEP_BETWEEN_RETRIES, TimeUnit.MILLISECONDS);
    numRetriesForLock = conf.getIntVar(HiveConf.ConfVars.HIVE_LOCK_NUMRETRIES);
    numRetriesForUnLock = conf.getIntVar(HiveConf.ConfVars.HIVE_UNLOCK_NUMRETRIES);
    try {
        curatorFramework = CuratorFrameworkSingleton.getInstance(conf);
        parent = conf.getVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_NAMESPACE);
        try {
            curatorFramework.create().withMode(CreateMode.PERSISTENT).forPath("/" + parent, new byte[0]);
        } catch (Exception e) {
            // ignore if the parent already exists
            if (!(e instanceof KeeperException) || ((KeeperException) e).code() != KeeperException.Code.NODEEXISTS) {
                LOG.warn("Unexpected ZK exception when creating parent node /" + parent, e);
            }
        }
    } catch (Exception e) {
        LOG.error("Failed to create curatorFramework object: ", e);
        throw new LockException(ErrorMsg.ZOOKEEPER_CLIENT_COULD_NOT_BE_INITIALIZED.getMsg());
    }
}
Also used : HiveConf(org.apache.hadoop.hive.conf.HiveConf) KeeperException(org.apache.zookeeper.KeeperException) KeeperException(org.apache.zookeeper.KeeperException)

Example 92 with HiveConf

use of org.apache.hadoop.hive.conf.HiveConf in project hive by apache.

the class MapJoinProcessor method generateMapJoinOperator.

public MapJoinOperator generateMapJoinOperator(ParseContext pctx, JoinOperator op, int mapJoinPos) throws SemanticException {
    HiveConf hiveConf = pctx.getConf();
    boolean noCheckOuterJoin = HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTSORTMERGEBUCKETMAPJOIN) && HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTBUCKETMAPJOIN);
    MapJoinOperator mapJoinOp = convertMapJoin(pctx.getConf(), op, op.getConf().isLeftInputJoin(), op.getConf().getBaseSrc(), op.getConf().getMapAliases(), mapJoinPos, noCheckOuterJoin, true);
    // create a dummy select to select all columns
    genSelectPlan(pctx, mapJoinOp);
    return mapJoinOp;
}
Also used : MapJoinOperator(org.apache.hadoop.hive.ql.exec.MapJoinOperator) AbstractMapJoinOperator(org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator) SMBMapJoinOperator(org.apache.hadoop.hive.ql.exec.SMBMapJoinOperator) HiveConf(org.apache.hadoop.hive.conf.HiveConf)

Example 93 with HiveConf

use of org.apache.hadoop.hive.conf.HiveConf in project hive by apache.

the class TableSizeBasedBigTableSelectorForAutoSMJ method getBigTablePosition.

public int getBigTablePosition(ParseContext parseCtx, JoinOperator joinOp, Set<Integer> bigTableCandidates) throws SemanticException {
    int bigTablePos = -1;
    long maxSize = -1;
    HiveConf conf = parseCtx.getConf();
    try {
        List<TableScanOperator> topOps = new ArrayList<TableScanOperator>();
        getListTopOps(joinOp, topOps);
        int currentPos = 0;
        for (TableScanOperator topOp : topOps) {
            if (topOp == null) {
                return -1;
            }
            if (!bigTableCandidates.contains(currentPos)) {
                currentPos++;
                continue;
            }
            Table table = topOp.getConf().getTableMetadata();
            long currentSize = 0;
            if (!table.isPartitioned()) {
                currentSize = getSize(conf, table);
            } else {
                // For partitioned tables, get the size of all the partitions
                PrunedPartitionList partsList = PartitionPruner.prune(topOp, parseCtx, null);
                for (Partition part : partsList.getNotDeniedPartns()) {
                    currentSize += getSize(conf, part);
                }
            }
            if (currentSize > maxSize) {
                maxSize = currentSize;
                bigTablePos = currentPos;
            }
            currentPos++;
        }
    } catch (HiveException e) {
        throw new SemanticException(e.getMessage());
    }
    return bigTablePos;
}
Also used : Partition(org.apache.hadoop.hive.ql.metadata.Partition) TableScanOperator(org.apache.hadoop.hive.ql.exec.TableScanOperator) Table(org.apache.hadoop.hive.ql.metadata.Table) PrunedPartitionList(org.apache.hadoop.hive.ql.parse.PrunedPartitionList) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) ArrayList(java.util.ArrayList) HiveConf(org.apache.hadoop.hive.conf.HiveConf) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException)

Example 94 with HiveConf

use of org.apache.hadoop.hive.conf.HiveConf in project hive by apache.

the class MetaDataExportListener method export_meta_data.

/** Export the metadata to a given path, and then move it to the user's trash */
private void export_meta_data(PreDropTableEvent tableEvent) throws MetaException {
    FileSystem fs = null;
    Table tbl = tableEvent.getTable();
    String name = tbl.getTableName();
    org.apache.hadoop.hive.ql.metadata.Table mTbl = new org.apache.hadoop.hive.ql.metadata.Table(tbl);
    HMSHandler handler = tableEvent.getHandler();
    HiveConf hiveconf = handler.getHiveConf();
    Warehouse wh = new Warehouse(hiveconf);
    Path tblPath = new Path(tbl.getSd().getLocation());
    fs = wh.getFs(tblPath);
    Date now = new Date();
    SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd-HH-mm-ss");
    String dateString = sdf.format(now);
    String exportPathString = hiveconf.getVar(HiveConf.ConfVars.METADATA_EXPORT_LOCATION);
    boolean moveMetadataToTrash = hiveconf.getBoolVar(HiveConf.ConfVars.MOVE_EXPORTED_METADATA_TO_TRASH);
    Path exportPath = null;
    if (exportPathString != null && exportPathString.length() == 0) {
        exportPath = fs.getHomeDirectory();
    } else {
        exportPath = new Path(exportPathString);
    }
    Path metaPath = new Path(exportPath, name + "." + dateString);
    LOG.info("Exporting the metadata of table " + tbl.toString() + " to path " + metaPath.toString());
    try {
        fs.mkdirs(metaPath);
    } catch (IOException e) {
        throw new MetaException(e.getMessage());
    }
    Path outFile = new Path(metaPath, name + EximUtil.METADATA_NAME);
    try {
        SessionState.getConsole().printInfo("Beginning metadata export");
        EximUtil.createExportDump(fs, outFile, mTbl, null, null);
        if (moveMetadataToTrash == true) {
            wh.deleteDir(metaPath, true);
        }
    } catch (IOException e) {
        throw new MetaException(e.getMessage());
    } catch (SemanticException e) {
        throw new MetaException(e.getMessage());
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Warehouse(org.apache.hadoop.hive.metastore.Warehouse) Table(org.apache.hadoop.hive.metastore.api.Table) IOException(java.io.IOException) Date(java.util.Date) FileSystem(org.apache.hadoop.fs.FileSystem) HiveConf(org.apache.hadoop.hive.conf.HiveConf) SimpleDateFormat(java.text.SimpleDateFormat) HMSHandler(org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler) MetaException(org.apache.hadoop.hive.metastore.api.MetaException)

Example 95 with HiveConf

use of org.apache.hadoop.hive.conf.HiveConf in project hive by apache.

the class GenSparkUtils method processFileSink.

public void processFileSink(GenSparkProcContext context, FileSinkOperator fileSink) throws SemanticException {
    ParseContext parseContext = context.parseContext;
    // is INSERT OVERWRITE TABLE
    boolean isInsertTable = GenMapRedUtils.isInsertInto(parseContext, fileSink);
    HiveConf hconf = parseContext.getConf();
    boolean chDir = GenMapRedUtils.isMergeRequired(context.moveTask, hconf, fileSink, context.currentTask, isInsertTable);
    // Set stats config for FileSinkOperators which are cloned from the fileSink
    List<FileSinkOperator> fileSinkList = context.fileSinkMap.get(fileSink);
    if (fileSinkList != null) {
        for (FileSinkOperator fsOp : fileSinkList) {
            fsOp.getConf().setGatherStats(fileSink.getConf().isGatherStats());
            fsOp.getConf().setStatsReliable(fileSink.getConf().isStatsReliable());
        }
    }
    Path finalName = GenMapRedUtils.createMoveTask(context.currentTask, chDir, fileSink, parseContext, context.moveTask, hconf, context.dependencyTask);
    if (chDir) {
        // Merge the files in the destination table/partitions by creating Map-only merge job
        // If underlying data is RCFile a RCFileBlockMerge task would be created.
        LOG.info("using CombineHiveInputformat for the merge job");
        GenMapRedUtils.createMRWorkForMergingFiles(fileSink, finalName, context.dependencyTask, context.moveTask, hconf, context.currentTask);
    }
    FetchTask fetchTask = parseContext.getFetchTask();
    if (fetchTask != null && context.currentTask.getNumChild() == 0) {
        if (fetchTask.isFetchFrom(fileSink.getConf())) {
            context.currentTask.setFetchSource(true);
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FileSinkOperator(org.apache.hadoop.hive.ql.exec.FileSinkOperator) ParseContext(org.apache.hadoop.hive.ql.parse.ParseContext) HiveConf(org.apache.hadoop.hive.conf.HiveConf) FetchTask(org.apache.hadoop.hive.ql.exec.FetchTask)

Aggregations

HiveConf (org.apache.hadoop.hive.conf.HiveConf)404 BeforeClass (org.junit.BeforeClass)73 Test (org.junit.Test)66 Path (org.apache.hadoop.fs.Path)54 Before (org.junit.Before)50 Driver (org.apache.hadoop.hive.ql.Driver)46 CliSessionState (org.apache.hadoop.hive.cli.CliSessionState)44 IOException (java.io.IOException)39 ArrayList (java.util.ArrayList)37 File (java.io.File)31 HashMap (java.util.HashMap)26 FileSystem (org.apache.hadoop.fs.FileSystem)26 SessionState (org.apache.hadoop.hive.ql.session.SessionState)22 LinkedHashMap (java.util.LinkedHashMap)17 List (java.util.List)16 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)15 MiniHS2 (org.apache.hive.jdbc.miniHS2.MiniHS2)14 Map (java.util.Map)12 HiveMetaStoreClient (org.apache.hadoop.hive.metastore.HiveMetaStoreClient)12 MetaException (org.apache.hadoop.hive.metastore.api.MetaException)12