Search in sources :

Example 6 with StatsWork

use of org.apache.hadoop.hive.ql.plan.StatsWork in project hive by apache.

the class LoadSemanticAnalyzer method analyzeInternal.

@Override
public void analyzeInternal(ASTNode ast) throws SemanticException {
    boolean isLocal = false;
    boolean isOverWrite = false;
    Tree fromTree = ast.getChild(0);
    Tree tableTree = ast.getChild(1);
    if (ast.getChildCount() == 4) {
        isLocal = true;
        isOverWrite = true;
    }
    if (ast.getChildCount() == 3) {
        if (ast.getChild(2).getText().toLowerCase().equals("local")) {
            isLocal = true;
        } else {
            isOverWrite = true;
        }
    }
    // initialize load path
    URI fromURI;
    try {
        String fromPath = stripQuotes(fromTree.getText());
        fromURI = initializeFromURI(fromPath, isLocal);
    } catch (IOException e) {
        throw new SemanticException(ErrorMsg.INVALID_PATH.getMsg(fromTree, e.getMessage()), e);
    } catch (URISyntaxException e) {
        throw new SemanticException(ErrorMsg.INVALID_PATH.getMsg(fromTree, e.getMessage()), e);
    }
    // initialize destination table/partition
    TableSpec ts = new TableSpec(db, conf, (ASTNode) tableTree);
    if (ts.tableHandle.isView() || ts.tableHandle.isMaterializedView()) {
        throw new SemanticException(ErrorMsg.DML_AGAINST_VIEW.getMsg());
    }
    if (ts.tableHandle.isNonNative()) {
        throw new SemanticException(ErrorMsg.LOAD_INTO_NON_NATIVE.getMsg());
    }
    if (ts.tableHandle.isStoredAsSubDirectories()) {
        throw new SemanticException(ErrorMsg.LOAD_INTO_STORED_AS_DIR.getMsg());
    }
    List<FieldSchema> parts = ts.tableHandle.getPartitionKeys();
    if ((parts != null && parts.size() > 0) && (ts.partSpec == null || ts.partSpec.size() == 0)) {
        throw new SemanticException(ErrorMsg.NEED_PARTITION_ERROR.getMsg());
    }
    List<String> bucketCols = ts.tableHandle.getBucketCols();
    if (bucketCols != null && !bucketCols.isEmpty()) {
        String error = StrictChecks.checkBucketing(conf);
        if (error != null)
            throw new SemanticException("Please load into an intermediate table" + " and use 'insert... select' to allow Hive to enforce bucketing. " + error);
    }
    // make sure the arguments make sense
    List<FileStatus> files = applyConstraintsAndGetFiles(fromURI, fromTree, isLocal);
    // for managed tables, make sure the file formats match
    if (TableType.MANAGED_TABLE.equals(ts.tableHandle.getTableType()) && conf.getBoolVar(HiveConf.ConfVars.HIVECHECKFILEFORMAT)) {
        ensureFileFormatsMatch(ts, files, fromURI);
    }
    inputs.add(toReadEntity(new Path(fromURI)));
    Task<? extends Serializable> rTask = null;
    // create final load/move work
    boolean preservePartitionSpecs = false;
    Map<String, String> partSpec = ts.getPartSpec();
    if (partSpec == null) {
        partSpec = new LinkedHashMap<String, String>();
        outputs.add(new WriteEntity(ts.tableHandle, (isOverWrite ? WriteEntity.WriteType.INSERT_OVERWRITE : WriteEntity.WriteType.INSERT)));
    } else {
        try {
            Partition part = Hive.get().getPartition(ts.tableHandle, partSpec, false);
            if (part != null) {
                if (isOverWrite) {
                    outputs.add(new WriteEntity(part, WriteEntity.WriteType.INSERT_OVERWRITE));
                } else {
                    outputs.add(new WriteEntity(part, WriteEntity.WriteType.INSERT));
                    // If partition already exists and we aren't overwriting it, then respect
                    // its current location info rather than picking it from the parent TableDesc
                    preservePartitionSpecs = true;
                }
            } else {
                outputs.add(new WriteEntity(ts.tableHandle, (isOverWrite ? WriteEntity.WriteType.INSERT_OVERWRITE : WriteEntity.WriteType.INSERT)));
            }
        } catch (HiveException e) {
            throw new SemanticException(e);
        }
    }
    LoadTableDesc loadTableWork;
    loadTableWork = new LoadTableDesc(new Path(fromURI), Utilities.getTableDesc(ts.tableHandle), partSpec, isOverWrite);
    if (preservePartitionSpecs) {
        // Note : preservePartitionSpecs=true implies inheritTableSpecs=false but
        // but preservePartitionSpecs=false(default) here is not sufficient enough
        // info to set inheritTableSpecs=true
        loadTableWork.setInheritTableSpecs(false);
    }
    Task<? extends Serializable> childTask = TaskFactory.get(new MoveWork(getInputs(), getOutputs(), loadTableWork, null, true, isLocal), conf);
    if (rTask != null) {
        rTask.addDependentTask(childTask);
    } else {
        rTask = childTask;
    }
    rootTasks.add(rTask);
    // The user asked for stats to be collected.
    // Some stats like number of rows require a scan of the data
    // However, some other stats, like number of files, do not require a complete scan
    // Update the stats which do not require a complete scan.
    Task<? extends Serializable> statTask = null;
    if (conf.getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) {
        StatsWork statDesc = new StatsWork(loadTableWork);
        statDesc.setNoStatsAggregator(true);
        statDesc.setClearAggregatorStats(true);
        statDesc.setStatsReliable(conf.getBoolVar(HiveConf.ConfVars.HIVE_STATS_RELIABLE));
        statTask = TaskFactory.get(statDesc, conf);
    }
    // HIVE-3334 has been filed for load file with index auto update
    if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVEINDEXAUTOUPDATE)) {
        IndexUpdater indexUpdater = new IndexUpdater(loadTableWork, getInputs(), conf);
        try {
            List<Task<? extends Serializable>> indexUpdateTasks = indexUpdater.generateUpdateTasks();
            for (Task<? extends Serializable> updateTask : indexUpdateTasks) {
                //LOAD DATA will either have a copy & move or just a move,
                // we always want the update to be dependent on the move
                childTask.addDependentTask(updateTask);
                if (statTask != null) {
                    updateTask.addDependentTask(statTask);
                }
            }
        } catch (HiveException e) {
            console.printInfo("WARNING: could not auto-update stale indexes, indexes are not out of sync");
        }
    } else if (statTask != null) {
        childTask.addDependentTask(statTask);
    }
}
Also used : MoveWork(org.apache.hadoop.hive.ql.plan.MoveWork) Task(org.apache.hadoop.hive.ql.exec.Task) Serializable(java.io.Serializable) FileStatus(org.apache.hadoop.fs.FileStatus) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) URISyntaxException(java.net.URISyntaxException) URI(java.net.URI) StatsWork(org.apache.hadoop.hive.ql.plan.StatsWork) Tree(org.antlr.runtime.tree.Tree) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity) Path(org.apache.hadoop.fs.Path) Partition(org.apache.hadoop.hive.ql.metadata.Partition) IOException(java.io.IOException) LoadTableDesc(org.apache.hadoop.hive.ql.plan.LoadTableDesc)

Example 7 with StatsWork

use of org.apache.hadoop.hive.ql.plan.StatsWork in project hive by apache.

the class ProcessAnalyzeTable method process.

@SuppressWarnings("unchecked")
@Override
public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procContext, Object... nodeOutputs) throws SemanticException {
    GenTezProcContext context = (GenTezProcContext) procContext;
    TableScanOperator tableScan = (TableScanOperator) nd;
    ParseContext parseContext = context.parseContext;
    Class<? extends InputFormat> inputFormat = tableScan.getConf().getTableMetadata().getInputFormatClass();
    if (parseContext.getQueryProperties().isAnalyzeCommand()) {
        assert tableScan.getChildOperators() == null || tableScan.getChildOperators().size() == 0;
        String alias = null;
        for (String a : parseContext.getTopOps().keySet()) {
            if (tableScan == parseContext.getTopOps().get(a)) {
                alias = a;
            }
        }
        assert alias != null;
        TezWork tezWork = context.currentTask.getWork();
        if (inputFormat.equals(OrcInputFormat.class)) {
            // For ORC, all the following statements are the same
            // ANALYZE TABLE T [PARTITION (...)] COMPUTE STATISTICS
            // ANALYZE TABLE T [PARTITION (...)] COMPUTE STATISTICS partialscan;
            // ANALYZE TABLE T [PARTITION (...)] COMPUTE STATISTICS noscan;
            // There will not be any Tez job above this task
            StatsNoJobWork snjWork = new StatsNoJobWork(tableScan.getConf().getTableMetadata().getTableSpec());
            snjWork.setStatsReliable(parseContext.getConf().getBoolVar(HiveConf.ConfVars.HIVE_STATS_RELIABLE));
            // If partition is specified, get pruned partition list
            Set<Partition> confirmedParts = GenMapRedUtils.getConfirmedPartitionsForScan(tableScan);
            if (confirmedParts.size() > 0) {
                Table source = tableScan.getConf().getTableMetadata();
                List<String> partCols = GenMapRedUtils.getPartitionColumns(tableScan);
                PrunedPartitionList partList = new PrunedPartitionList(source, confirmedParts, partCols, false);
                snjWork.setPrunedPartitionList(partList);
            }
            Task<StatsNoJobWork> snjTask = TaskFactory.get(snjWork, parseContext.getConf());
            snjTask.setParentTasks(null);
            context.rootTasks.remove(context.currentTask);
            context.rootTasks.add(snjTask);
            return true;
        } else {
            // ANALYZE TABLE T [PARTITION (...)] COMPUTE STATISTICS;
            // The plan consists of a simple TezTask followed by a StatsTask.
            // The Tez task is just a simple TableScanOperator
            StatsWork statsWork = new StatsWork(tableScan.getConf().getTableMetadata().getTableSpec());
            statsWork.setAggKey(tableScan.getConf().getStatsAggPrefix());
            statsWork.setStatsTmpDir(tableScan.getConf().getTmpStatsDir());
            statsWork.setSourceTask(context.currentTask);
            statsWork.setStatsReliable(parseContext.getConf().getBoolVar(HiveConf.ConfVars.HIVE_STATS_RELIABLE));
            Task<StatsWork> statsTask = TaskFactory.get(statsWork, parseContext.getConf());
            context.currentTask.addDependentTask(statsTask);
            // The plan consists of a StatsTask only.
            if (parseContext.getQueryProperties().isNoScanAnalyzeCommand()) {
                statsTask.setParentTasks(null);
                statsWork.setNoScanAnalyzeCommand(true);
                context.rootTasks.remove(context.currentTask);
                context.rootTasks.add(statsTask);
            }
            // ANALYZE TABLE T [PARTITION (...)] COMPUTE STATISTICS partialscan;
            if (parseContext.getQueryProperties().isPartialScanAnalyzeCommand()) {
                handlePartialScanCommand(tableScan, parseContext, statsWork, context, statsTask);
            }
            // NOTE: here we should use the new partition predicate pushdown API to
            // get a list of pruned list,
            // and pass it to setTaskPlan as the last parameter
            Set<Partition> confirmedPartns = GenMapRedUtils.getConfirmedPartitionsForScan(tableScan);
            PrunedPartitionList partitions = null;
            if (confirmedPartns.size() > 0) {
                Table source = tableScan.getConf().getTableMetadata();
                List<String> partCols = GenMapRedUtils.getPartitionColumns(tableScan);
                partitions = new PrunedPartitionList(source, confirmedPartns, partCols, false);
            }
            MapWork w = utils.createMapWork(context, tableScan, tezWork, partitions);
            w.setGatheringStats(true);
            return true;
        }
    } else if (parseContext.getAnalyzeRewrite() != null) {
        // we need to collect table stats while collecting column stats.
        try {
            context.currentTask.addDependentTask(genTableStats(context, tableScan));
        } catch (HiveException e) {
            throw new SemanticException(e);
        }
    }
    return null;
}
Also used : Partition(org.apache.hadoop.hive.ql.metadata.Partition) TableScanOperator(org.apache.hadoop.hive.ql.exec.TableScanOperator) Table(org.apache.hadoop.hive.ql.metadata.Table) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) StatsWork(org.apache.hadoop.hive.ql.plan.StatsWork) MapWork(org.apache.hadoop.hive.ql.plan.MapWork) StatsNoJobWork(org.apache.hadoop.hive.ql.plan.StatsNoJobWork) TezWork(org.apache.hadoop.hive.ql.plan.TezWork)

Example 8 with StatsWork

use of org.apache.hadoop.hive.ql.plan.StatsWork in project hive by apache.

the class ProcessAnalyzeTable method genTableStats.

private Task<?> genTableStats(GenTezProcContext context, TableScanOperator tableScan) throws HiveException {
    Class<? extends InputFormat> inputFormat = tableScan.getConf().getTableMetadata().getInputFormatClass();
    ParseContext parseContext = context.parseContext;
    Table table = tableScan.getConf().getTableMetadata();
    List<Partition> partitions = new ArrayList<>();
    if (table.isPartitioned()) {
        partitions.addAll(parseContext.getPrunedPartitions(tableScan).getPartitions());
        for (Partition partn : partitions) {
            LOG.debug("XXX: adding part: " + partn);
            context.outputs.add(new WriteEntity(partn, WriteEntity.WriteType.DDL_NO_LOCK));
        }
    }
    TableSpec tableSpec = new TableSpec(table, partitions);
    tableScan.getConf().getTableMetadata().setTableSpec(tableSpec);
    if (inputFormat.equals(OrcInputFormat.class)) {
        // For ORC, there is no Tez Job for table stats.
        StatsNoJobWork snjWork = new StatsNoJobWork(tableScan.getConf().getTableMetadata().getTableSpec());
        snjWork.setStatsReliable(parseContext.getConf().getBoolVar(HiveConf.ConfVars.HIVE_STATS_RELIABLE));
        // If partition is specified, get pruned partition list
        if (partitions.size() > 0) {
            snjWork.setPrunedPartitionList(parseContext.getPrunedPartitions(tableScan));
        }
        return TaskFactory.get(snjWork, parseContext.getConf());
    } else {
        StatsWork statsWork = new StatsWork(tableScan.getConf().getTableMetadata().getTableSpec());
        statsWork.setAggKey(tableScan.getConf().getStatsAggPrefix());
        statsWork.setStatsTmpDir(tableScan.getConf().getTmpStatsDir());
        statsWork.setSourceTask(context.currentTask);
        statsWork.setStatsReliable(parseContext.getConf().getBoolVar(HiveConf.ConfVars.HIVE_STATS_RELIABLE));
        return TaskFactory.get(statsWork, parseContext.getConf());
    }
}
Also used : Partition(org.apache.hadoop.hive.ql.metadata.Partition) TableSpec(org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.TableSpec) Table(org.apache.hadoop.hive.ql.metadata.Table) StatsWork(org.apache.hadoop.hive.ql.plan.StatsWork) ArrayList(java.util.ArrayList) StatsNoJobWork(org.apache.hadoop.hive.ql.plan.StatsNoJobWork) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity)

Aggregations

StatsWork (org.apache.hadoop.hive.ql.plan.StatsWork)8 Partition (org.apache.hadoop.hive.ql.metadata.Partition)7 Table (org.apache.hadoop.hive.ql.metadata.Table)6 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)4 MoveWork (org.apache.hadoop.hive.ql.plan.MoveWork)4 StatsNoJobWork (org.apache.hadoop.hive.ql.plan.StatsNoJobWork)4 Path (org.apache.hadoop.fs.Path)3 TableScanOperator (org.apache.hadoop.hive.ql.exec.TableScanOperator)3 WriteEntity (org.apache.hadoop.hive.ql.hooks.WriteEntity)3 OrcInputFormat (org.apache.hadoop.hive.ql.io.orc.OrcInputFormat)3 LoadTableDesc (org.apache.hadoop.hive.ql.plan.LoadTableDesc)3 URISyntaxException (java.net.URISyntaxException)2 ArrayList (java.util.ArrayList)2 HashMap (java.util.HashMap)2 LinkedHashMap (java.util.LinkedHashMap)2 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)2 Index (org.apache.hadoop.hive.metastore.api.Index)2 HiveIndex (org.apache.hadoop.hive.ql.index.HiveIndex)2 ParseContext (org.apache.hadoop.hive.ql.parse.ParseContext)2 PrunedPartitionList (org.apache.hadoop.hive.ql.parse.PrunedPartitionList)2