Search in sources :

Example 26 with TezWork

use of org.apache.hadoop.hive.ql.plan.TezWork in project hive by apache.

the class MergeJoinProc method process.

@Override
public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException {
    GenTezProcContext context = (GenTezProcContext) procCtx;
    CommonMergeJoinOperator mergeJoinOp = (CommonMergeJoinOperator) nd;
    if (stack.size() < 2) {
        // safety check for L53 to get parentOp, although it is very unlikely that
        // stack size is less than 2, i.e., there is only one MergeJoinOperator in the stack.
        context.currentMergeJoinOperator = mergeJoinOp;
        return null;
    }
    TezWork tezWork = context.currentTask.getWork();
    @SuppressWarnings("unchecked") Operator<? extends OperatorDesc> parentOp = (Operator<? extends OperatorDesc>) ((stack.get(stack.size() - 2)));
    // we need to set the merge work that has been created as part of the dummy store walk. If a
    // merge work already exists for this merge join operator, add the dummy store work to the
    // merge work. Else create a merge work, add above work to the merge work
    MergeJoinWork mergeWork = null;
    if (context.opMergeJoinWorkMap.containsKey(mergeJoinOp)) {
        // we already have the merge work corresponding to this merge join operator
        mergeWork = context.opMergeJoinWorkMap.get(mergeJoinOp);
    } else {
        mergeWork = new MergeJoinWork();
        tezWork.add(mergeWork);
        context.opMergeJoinWorkMap.put(mergeJoinOp, mergeWork);
    }
    if (!(stack.get(stack.size() - 2) instanceof DummyStoreOperator)) {
        /* this may happen in one of the following case:
      TS[0], FIL[26], SEL[2], DUMMY_STORE[30], MERGEJOIN[29]]
                                              /                              
      TS[3], FIL[27], SEL[5], ---------------
      */
        context.currentMergeJoinOperator = mergeJoinOp;
        mergeWork.setTag(mergeJoinOp.getTagForOperator(parentOp));
        return null;
    }
    // Guaranteed to be just 1 because each DummyStoreOperator can be part of only one work.
    BaseWork parentWork = context.childToWorkMap.get(parentOp).get(0);
    mergeWork.addMergedWork(null, parentWork, context.leafOperatorToFollowingWork);
    mergeWork.setMergeJoinOperator(mergeJoinOp);
    tezWork.setVertexType(mergeWork, VertexType.MULTI_INPUT_UNINITIALIZED_EDGES);
    for (BaseWork grandParentWork : tezWork.getParents(parentWork)) {
        TezEdgeProperty edgeProp = tezWork.getEdgeProperty(grandParentWork, parentWork);
        tezWork.disconnect(grandParentWork, parentWork);
        tezWork.connect(grandParentWork, mergeWork, edgeProp);
    }
    for (BaseWork childWork : tezWork.getChildren(parentWork)) {
        TezEdgeProperty edgeProp = tezWork.getEdgeProperty(parentWork, childWork);
        tezWork.disconnect(parentWork, childWork);
        tezWork.connect(mergeWork, childWork, edgeProp);
    }
    tezWork.remove(parentWork);
    DummyStoreOperator dummyOp = (DummyStoreOperator) (stack.get(stack.size() - 2));
    parentWork.setTag(mergeJoinOp.getTagForOperator(dummyOp));
    mergeJoinOp.getParentOperators().remove(dummyOp);
    dummyOp.getChildOperators().clear();
    return true;
}
Also used : CommonMergeJoinOperator(org.apache.hadoop.hive.ql.exec.CommonMergeJoinOperator) Operator(org.apache.hadoop.hive.ql.exec.Operator) DummyStoreOperator(org.apache.hadoop.hive.ql.exec.DummyStoreOperator) MergeJoinWork(org.apache.hadoop.hive.ql.plan.MergeJoinWork) DummyStoreOperator(org.apache.hadoop.hive.ql.exec.DummyStoreOperator) TezEdgeProperty(org.apache.hadoop.hive.ql.plan.TezEdgeProperty) GenTezProcContext(org.apache.hadoop.hive.ql.parse.GenTezProcContext) OperatorDesc(org.apache.hadoop.hive.ql.plan.OperatorDesc) BaseWork(org.apache.hadoop.hive.ql.plan.BaseWork) CommonMergeJoinOperator(org.apache.hadoop.hive.ql.exec.CommonMergeJoinOperator) TezWork(org.apache.hadoop.hive.ql.plan.TezWork)

Example 27 with TezWork

use of org.apache.hadoop.hive.ql.plan.TezWork in project hive by apache.

the class ReduceSinkMapJoinProc method processReduceSinkToHashJoin.

public static Object processReduceSinkToHashJoin(ReduceSinkOperator parentRS, MapJoinOperator mapJoinOp, GenTezProcContext context) throws SemanticException {
    // remove the tag for in-memory side of mapjoin
    parentRS.getConf().setSkipTag(true);
    parentRS.setSkipTag(true);
    // Mark this small table as being processed
    if (mapJoinOp.getConf().isDynamicPartitionHashJoin()) {
        context.mapJoinToUnprocessedSmallTableReduceSinks.get(mapJoinOp).remove(parentRS);
    }
    List<BaseWork> mapJoinWork = null;
    /*
     *  if there was a pre-existing work generated for the big-table mapjoin side,
     *  we need to hook the work generated for the RS (associated with the RS-MJ pattern)
     *  with the pre-existing work.
     *
     *  Otherwise, we need to associate that the mapjoin op
     *  to be linked to the RS work (associated with the RS-MJ pattern).
     *
     */
    mapJoinWork = context.mapJoinWorkMap.get(mapJoinOp);
    BaseWork parentWork = getMapJoinParentWork(context, parentRS);
    // set the link between mapjoin and parent vertex
    int pos = context.mapJoinParentMap.get(mapJoinOp).indexOf(parentRS);
    if (pos == -1) {
        throw new SemanticException("Cannot find position of parent in mapjoin");
    }
    MapJoinDesc joinConf = mapJoinOp.getConf();
    long keyCount = Long.MAX_VALUE, rowCount = Long.MAX_VALUE, bucketCount = 1;
    long tableSize = Long.MAX_VALUE;
    Statistics stats = parentRS.getStatistics();
    if (stats != null) {
        keyCount = rowCount = stats.getNumRows();
        if (keyCount <= 0) {
            keyCount = rowCount = Long.MAX_VALUE;
        }
        tableSize = stats.getDataSize();
        List<String> keyCols = parentRS.getConf().getOutputKeyColumnNames();
        if (keyCols != null && !keyCols.isEmpty()) {
            // See if we can arrive at a smaller number using distinct stats from key columns.
            long maxKeyCount = 1;
            String prefix = Utilities.ReduceField.KEY.toString();
            for (String keyCol : keyCols) {
                ExprNodeDesc realCol = parentRS.getColumnExprMap().get(prefix + "." + keyCol);
                ColStatistics cs = StatsUtils.getColStatisticsFromExpression(context.conf, stats, realCol);
                if (cs == null || cs.getCountDistint() <= 0) {
                    maxKeyCount = Long.MAX_VALUE;
                    break;
                }
                maxKeyCount *= cs.getCountDistint();
                if (maxKeyCount >= keyCount) {
                    break;
                }
            }
            keyCount = Math.min(maxKeyCount, keyCount);
        }
        if (joinConf.isBucketMapJoin()) {
            OpTraits opTraits = mapJoinOp.getOpTraits();
            bucketCount = (opTraits == null) ? -1 : opTraits.getNumBuckets();
            if (bucketCount > 0) {
                // We cannot obtain a better estimate without CustomPartitionVertex providing it
                // to us somehow; in which case using statistics would be completely unnecessary.
                keyCount /= bucketCount;
                tableSize /= bucketCount;
            }
        } else if (joinConf.isDynamicPartitionHashJoin()) {
            // For dynamic partitioned hash join, assuming table is split evenly among the reduce tasks.
            bucketCount = parentRS.getConf().getNumReducers();
            keyCount /= bucketCount;
            tableSize /= bucketCount;
        }
    }
    if (keyCount == 0) {
        keyCount = 1;
    }
    if (tableSize == 0) {
        tableSize = 1;
    }
    LOG.info("Mapjoin " + mapJoinOp + "(bucket map join = " + joinConf.isBucketMapJoin() + "), pos: " + pos + " --> " + parentWork.getName() + " (" + keyCount + " keys estimated from " + rowCount + " rows, " + bucketCount + " buckets)");
    joinConf.getParentToInput().put(pos, parentWork.getName());
    if (keyCount != Long.MAX_VALUE) {
        joinConf.getParentKeyCounts().put(pos, keyCount);
    }
    joinConf.getParentDataSizes().put(pos, tableSize);
    int numBuckets = -1;
    EdgeType edgeType = EdgeType.BROADCAST_EDGE;
    if (joinConf.isBucketMapJoin()) {
        numBuckets = (Integer) joinConf.getBigTableBucketNumMapping().values().toArray()[0];
        /*
       * Here, we can be in one of 4 states.
       *
       * 1. If map join work is null implies that we have not yet traversed the big table side. We
       * just need to see if we can find a reduce sink operator in the big table side. This would
       * imply a reduce side operation.
       *
       * 2. If we don't find a reducesink in 1 it has to be the case that it is a map side operation.
       *
       * 3. If we have already created a work item for the big table side, we need to see if we can
       * find a table scan operator in the big table side. This would imply a map side operation.
       *
       * 4. If we don't find a table scan operator, it has to be a reduce side operation.
       */
        if (mapJoinWork == null) {
            Operator<?> rootOp = OperatorUtils.findSingleOperatorUpstreamJoinAccounted(mapJoinOp.getParentOperators().get(joinConf.getPosBigTable()), ReduceSinkOperator.class);
            if (rootOp == null) {
                // likely we found a table scan operator
                edgeType = EdgeType.CUSTOM_EDGE;
            } else {
                // we have found a reduce sink
                edgeType = EdgeType.CUSTOM_SIMPLE_EDGE;
            }
        } else {
            Operator<?> rootOp = OperatorUtils.findSingleOperatorUpstreamJoinAccounted(mapJoinOp.getParentOperators().get(joinConf.getPosBigTable()), TableScanOperator.class);
            if (rootOp != null) {
                // likely we found a table scan operator
                edgeType = EdgeType.CUSTOM_EDGE;
            } else {
                // we have found a reduce sink
                edgeType = EdgeType.CUSTOM_SIMPLE_EDGE;
            }
        }
    } else if (mapJoinOp.getConf().isDynamicPartitionHashJoin()) {
        if (parentRS.getConf().isForwarding()) {
            edgeType = EdgeType.ONE_TO_ONE_EDGE;
        } else {
            edgeType = EdgeType.CUSTOM_SIMPLE_EDGE;
        }
    }
    if (edgeType == EdgeType.CUSTOM_EDGE) {
        // disable auto parallelism for bucket map joins
        parentRS.getConf().setReducerTraits(EnumSet.of(FIXED));
    }
    TezEdgeProperty edgeProp = new TezEdgeProperty(null, edgeType, numBuckets);
    if (mapJoinWork != null) {
        for (BaseWork myWork : mapJoinWork) {
            // link the work with the work associated with the reduce sink that triggered this rule
            TezWork tezWork = context.currentTask.getWork();
            LOG.debug("connecting " + parentWork.getName() + " with " + myWork.getName());
            tezWork.connect(parentWork, myWork, edgeProp);
            if (edgeType == EdgeType.CUSTOM_EDGE) {
                tezWork.setVertexType(myWork, VertexType.INITIALIZED_EDGES);
            }
            ReduceSinkOperator r = null;
            if (context.connectedReduceSinks.contains(parentRS)) {
                LOG.debug("Cloning reduce sink " + parentRS + " for multi-child broadcast edge");
                // we've already set this one up. Need to clone for the next work.
                r = (ReduceSinkOperator) OperatorFactory.getAndMakeChild(parentRS.getCompilationOpContext(), (ReduceSinkDesc) parentRS.getConf().clone(), new RowSchema(parentRS.getSchema()), parentRS.getParentOperators());
                context.clonedReduceSinks.add(r);
            } else {
                r = parentRS;
            }
            // remember the output name of the reduce sink
            r.getConf().setOutputName(myWork.getName());
            context.connectedReduceSinks.add(r);
        }
    }
    // remember in case we need to connect additional work later
    Map<BaseWork, TezEdgeProperty> linkWorkMap = null;
    if (context.linkOpWithWorkMap.containsKey(mapJoinOp)) {
        linkWorkMap = context.linkOpWithWorkMap.get(mapJoinOp);
    } else {
        linkWorkMap = new HashMap<BaseWork, TezEdgeProperty>();
    }
    linkWorkMap.put(parentWork, edgeProp);
    context.linkOpWithWorkMap.put(mapJoinOp, linkWorkMap);
    List<ReduceSinkOperator> reduceSinks = context.linkWorkWithReduceSinkMap.get(parentWork);
    if (reduceSinks == null) {
        reduceSinks = new ArrayList<ReduceSinkOperator>();
    }
    reduceSinks.add(parentRS);
    context.linkWorkWithReduceSinkMap.put(parentWork, reduceSinks);
    // create the dummy operators
    List<Operator<?>> dummyOperators = new ArrayList<Operator<?>>();
    // create an new operator: HashTableDummyOperator, which share the table desc
    HashTableDummyDesc desc = new HashTableDummyDesc();
    HashTableDummyOperator dummyOp = (HashTableDummyOperator) OperatorFactory.get(parentRS.getCompilationOpContext(), desc);
    TableDesc tbl;
    // need to create the correct table descriptor for key/value
    RowSchema rowSchema = parentRS.getParentOperators().get(0).getSchema();
    tbl = PlanUtils.getReduceValueTableDesc(PlanUtils.getFieldSchemasFromRowSchema(rowSchema, ""));
    dummyOp.getConf().setTbl(tbl);
    Map<Byte, List<ExprNodeDesc>> keyExprMap = mapJoinOp.getConf().getKeys();
    List<ExprNodeDesc> keyCols = keyExprMap.get(Byte.valueOf((byte) 0));
    StringBuilder keyOrder = new StringBuilder();
    StringBuilder keyNullOrder = new StringBuilder();
    for (ExprNodeDesc k : keyCols) {
        keyOrder.append("+");
        keyNullOrder.append(NullOrdering.defaultNullOrder(context.conf).getSign());
    }
    TableDesc keyTableDesc = PlanUtils.getReduceKeyTableDesc(PlanUtils.getFieldSchemasFromColumnList(keyCols, "mapjoinkey"), keyOrder.toString(), keyNullOrder.toString());
    mapJoinOp.getConf().setKeyTableDesc(keyTableDesc);
    // let the dummy op be the parent of mapjoin op
    mapJoinOp.replaceParent(parentRS, dummyOp);
    List<Operator<? extends OperatorDesc>> dummyChildren = new ArrayList<Operator<? extends OperatorDesc>>();
    dummyChildren.add(mapJoinOp);
    dummyOp.setChildOperators(dummyChildren);
    dummyOperators.add(dummyOp);
    // cut the operator tree so as to not retain connections from the parent RS downstream
    List<Operator<? extends OperatorDesc>> childOperators = parentRS.getChildOperators();
    int childIndex = childOperators.indexOf(mapJoinOp);
    childOperators.remove(childIndex);
    // at task startup
    if (mapJoinWork != null) {
        for (BaseWork myWork : mapJoinWork) {
            LOG.debug("adding dummy op to work " + myWork.getName() + " from MJ work: " + dummyOp);
            myWork.addDummyOp(dummyOp);
        }
    }
    if (context.linkChildOpWithDummyOp.containsKey(mapJoinOp)) {
        for (Operator<?> op : context.linkChildOpWithDummyOp.get(mapJoinOp)) {
            dummyOperators.add(op);
        }
    }
    context.linkChildOpWithDummyOp.put(mapJoinOp, dummyOperators);
    return true;
}
Also used : ReduceSinkOperator(org.apache.hadoop.hive.ql.exec.ReduceSinkOperator) MapJoinOperator(org.apache.hadoop.hive.ql.exec.MapJoinOperator) TableScanOperator(org.apache.hadoop.hive.ql.exec.TableScanOperator) Operator(org.apache.hadoop.hive.ql.exec.Operator) HashTableDummyOperator(org.apache.hadoop.hive.ql.exec.HashTableDummyOperator) OpTraits(org.apache.hadoop.hive.ql.plan.OpTraits) TezEdgeProperty(org.apache.hadoop.hive.ql.plan.TezEdgeProperty) ArrayList(java.util.ArrayList) ColStatistics(org.apache.hadoop.hive.ql.plan.ColStatistics) ArrayList(java.util.ArrayList) List(java.util.List) ExprNodeDesc(org.apache.hadoop.hive.ql.plan.ExprNodeDesc) BaseWork(org.apache.hadoop.hive.ql.plan.BaseWork) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) HashTableDummyDesc(org.apache.hadoop.hive.ql.plan.HashTableDummyDesc) RowSchema(org.apache.hadoop.hive.ql.exec.RowSchema) MapJoinDesc(org.apache.hadoop.hive.ql.plan.MapJoinDesc) HashTableDummyOperator(org.apache.hadoop.hive.ql.exec.HashTableDummyOperator) Statistics(org.apache.hadoop.hive.ql.plan.Statistics) ColStatistics(org.apache.hadoop.hive.ql.plan.ColStatistics) EdgeType(org.apache.hadoop.hive.ql.plan.TezEdgeProperty.EdgeType) ReduceSinkOperator(org.apache.hadoop.hive.ql.exec.ReduceSinkOperator) TableDesc(org.apache.hadoop.hive.ql.plan.TableDesc) OperatorDesc(org.apache.hadoop.hive.ql.plan.OperatorDesc) TezWork(org.apache.hadoop.hive.ql.plan.TezWork)

Example 28 with TezWork

use of org.apache.hadoop.hive.ql.plan.TezWork in project hive by apache.

the class GenMapRedUtils method finalMapWorkChores.

/**
 * Called at the end of TaskCompiler::compile
 * This currently does the following for each map work
 *   1.  Intern the table descriptors of the partitions
 *   2.  derive final explain attributes based on previous compilation.
 *
 * The original implementation had 2 functions internTableDesc and deriveFinalExplainAttributes,
 * respectively implementing 1 and 2 mentioned above.  This was done using recursion over the
 * task graph.  The recursion was inefficient in a couple of ways.
 *   - For large graphs the recursion was filling up the stack
 *   - Instead of finding the mapworks, it was walking all possible paths from root
 *     causing a huge performance problem.
 *
 * This implementation combines internTableDesc and deriveFinalExplainAttributes into 1 call.
 * This can be done because each refers to information within Map Work and performs a specific
 * action.
 *
 * The revised implementation generates all the map works from all MapReduce tasks (getMRTasks),
 * Spark Tasks (getSparkTasks) and Tez tasks (getTezTasks).  Then for each of those map works
 * invokes the respective call.  getMRTasks, getSparkTasks and getTezTasks iteratively walks
 * the task graph to find the respective map works.
 *
 * The iterative implementation of these functions was done as part of HIVE-17195.  Before
 * HIVE-17195, these functions were recursive and had the same issue.  So, picking this patch
 * for an older release will also require picking HIVE-17195 at the least.
 */
public static void finalMapWorkChores(List<Task<?>> tasks, Configuration conf, Interner<TableDesc> interner) {
    List<ExecDriver> mrTasks = Utilities.getMRTasks(tasks);
    if (!mrTasks.isEmpty()) {
        for (ExecDriver execDriver : mrTasks) {
            execDriver.getWork().getMapWork().internTable(interner);
            execDriver.getWork().getMapWork().deriveLlap(conf, true);
        }
    }
    List<TezTask> tezTasks = Utilities.getTezTasks(tasks);
    if (!tezTasks.isEmpty()) {
        for (TezTask tezTask : tezTasks) {
            if (tezTask.getWork() instanceof TezWork) {
                TezWork work = tezTask.getWork();
                for (BaseWork w : work.getAllWorkUnsorted()) {
                    if (w instanceof MapWork) {
                        ((MapWork) w).internTable(interner);
                        ((MapWork) w).deriveLlap(conf, false);
                    }
                }
            }
        }
    }
    List<SparkTask> sparkTasks = Utilities.getSparkTasks(tasks);
    if (!sparkTasks.isEmpty()) {
        for (SparkTask sparkTask : sparkTasks) {
            SparkWork work = sparkTask.getWork();
            for (BaseWork w : work.getAllWorkUnsorted()) {
                if (w instanceof MapWork) {
                    ((MapWork) w).internTable(interner);
                    ((MapWork) w).deriveLlap(conf, false);
                }
            }
        }
    }
}
Also used : MapWork(org.apache.hadoop.hive.ql.plan.MapWork) SparkTask(org.apache.hadoop.hive.ql.exec.spark.SparkTask) ExecDriver(org.apache.hadoop.hive.ql.exec.mr.ExecDriver) SparkWork(org.apache.hadoop.hive.ql.plan.SparkWork) TezTask(org.apache.hadoop.hive.ql.exec.tez.TezTask) BaseWork(org.apache.hadoop.hive.ql.plan.BaseWork) TezWork(org.apache.hadoop.hive.ql.plan.TezWork)

Example 29 with TezWork

use of org.apache.hadoop.hive.ql.plan.TezWork in project hive by apache.

the class GenMapRedUtils method isMergeRequired.

/**
 * Returns true iff the fsOp requires a merge
 */
public static boolean isMergeRequired(List<Task<MoveWork>> mvTasks, HiveConf hconf, FileSinkOperator fsOp, Task<?> currTask, boolean isInsertTable) {
    // Has the user enabled merging of files for map-only jobs or for all jobs
    if (mvTasks == null || mvTasks.isEmpty()) {
        return false;
    }
    // no need of merging if the move is to a local file system
    // We are looking based on the original FSOP, so use the original path as is.
    MoveTask mvTask = (MoveTask) GenMapRedUtils.findMoveTaskForFsopOutput(mvTasks, fsOp.getConf().getFinalDirName(), fsOp.getConf().isMmTable(), fsOp.getConf().isDirectInsert(), fsOp.getConf().getMoveTaskId(), fsOp.getConf().getAcidOperation());
    // TODO: wtf?!! why is this in this method? This has nothing to do with anything.
    if (isInsertTable && hconf.getBoolVar(ConfVars.HIVESTATSAUTOGATHER) && !fsOp.getConf().isMaterialization()) {
        // mark the MapredWork and FileSinkOperator for gathering stats
        fsOp.getConf().setGatherStats(true);
        fsOp.getConf().setStatsReliable(hconf.getBoolVar(ConfVars.HIVE_STATS_RELIABLE));
        if (mvTask != null && !mvTask.hasFollowingStatsTask()) {
            GenMapRedUtils.addStatsTask(fsOp, mvTask, currTask, hconf);
        }
    }
    if (mvTask == null || mvTask.isLocal() || !fsOp.getConf().canBeMerged()) {
        return false;
    }
    if (currTask.getWork() instanceof TezWork) {
        // tez blurs the boundary between map and reduce, thus it has it's own config
        return hconf.getBoolVar(ConfVars.HIVEMERGETEZFILES);
    } else if (currTask.getWork() instanceof SparkWork) {
        // spark has its own config for merging
        return hconf.getBoolVar(ConfVars.HIVEMERGESPARKFILES);
    }
    return isMergeRequiredForMr(hconf, fsOp, currTask);
}
Also used : MoveTask(org.apache.hadoop.hive.ql.exec.MoveTask) SparkWork(org.apache.hadoop.hive.ql.plan.SparkWork) TezWork(org.apache.hadoop.hive.ql.plan.TezWork)

Example 30 with TezWork

use of org.apache.hadoop.hive.ql.plan.TezWork in project hive by apache.

the class ProcessAnalyzeTable method process.

@SuppressWarnings("unchecked")
@Override
public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procContext, Object... nodeOutputs) throws SemanticException {
    GenTezProcContext context = (GenTezProcContext) procContext;
    TableScanOperator tableScan = (TableScanOperator) nd;
    ParseContext parseContext = context.parseContext;
    Table table = tableScan.getConf().getTableMetadata();
    Class<? extends InputFormat> inputFormat = table.getInputFormatClass();
    if (parseContext.getQueryProperties().isAnalyzeCommand()) {
        assert tableScan.getChildOperators() == null || tableScan.getChildOperators().size() == 0;
        String alias = null;
        for (String a : parseContext.getTopOps().keySet()) {
            if (tableScan == parseContext.getTopOps().get(a)) {
                alias = a;
            }
        }
        assert alias != null;
        TezWork tezWork = context.currentTask.getWork();
        if (BasicStatsNoJobTask.canUseBasicStats(table, inputFormat)) {
            // For ORC, Parquet and Iceberg tables, all the following statements are the same
            // ANALYZE TABLE T [PARTITION (...)] COMPUTE STATISTICS
            // ANALYZE TABLE T [PARTITION (...)] COMPUTE STATISTICS noscan;
            // There will not be any Tez job above this task
            StatsWork statWork = new StatsWork(table, parseContext.getConf());
            statWork.setFooterScan();
            // If partition is specified, get pruned partition list
            Set<Partition> confirmedParts = GenMapRedUtils.getConfirmedPartitionsForScan(tableScan);
            if (confirmedParts.size() > 0) {
                List<String> partCols = GenMapRedUtils.getPartitionColumns(tableScan);
                PrunedPartitionList partList = new PrunedPartitionList(table, confirmedParts, partCols, false);
                statWork.addInputPartitions(partList.getPartitions());
            }
            Task<StatsWork> snjTask = TaskFactory.get(statWork);
            snjTask.setParentTasks(null);
            context.rootTasks.remove(context.currentTask);
            context.rootTasks.add(snjTask);
            return true;
        } else {
            // ANALYZE TABLE T [PARTITION (...)] COMPUTE STATISTICS;
            // The plan consists of a simple TezTask followed by a StatsTask.
            // The Tez task is just a simple TableScanOperator
            BasicStatsWork basicStatsWork = new BasicStatsWork(table.getTableSpec());
            basicStatsWork.setIsExplicitAnalyze(true);
            basicStatsWork.setNoScanAnalyzeCommand(parseContext.getQueryProperties().isNoScanAnalyzeCommand());
            StatsWork columnStatsWork = new StatsWork(table, basicStatsWork, parseContext.getConf());
            columnStatsWork.collectStatsFromAggregator(tableScan.getConf());
            columnStatsWork.setSourceTask(context.currentTask);
            Task<StatsWork> statsTask = TaskFactory.get(columnStatsWork);
            context.currentTask.addDependentTask(statsTask);
            // The plan consists of a StatsTask only.
            if (parseContext.getQueryProperties().isNoScanAnalyzeCommand()) {
                statsTask.setParentTasks(null);
                context.rootTasks.remove(context.currentTask);
                context.rootTasks.add(statsTask);
            }
            // NOTE: here we should use the new partition predicate pushdown API to
            // get a list of pruned list,
            // and pass it to setTaskPlan as the last parameter
            Set<Partition> confirmedPartns = GenMapRedUtils.getConfirmedPartitionsForScan(tableScan);
            PrunedPartitionList partitions = null;
            if (confirmedPartns.size() > 0) {
                List<String> partCols = GenMapRedUtils.getPartitionColumns(tableScan);
                partitions = new PrunedPartitionList(table, confirmedPartns, partCols, false);
            }
            MapWork w = utils.createMapWork(context, tableScan, tezWork, partitions);
            w.setGatheringStats(true);
            return true;
        }
    }
    return null;
}
Also used : Partition(org.apache.hadoop.hive.ql.metadata.Partition) TableScanOperator(org.apache.hadoop.hive.ql.exec.TableScanOperator) Table(org.apache.hadoop.hive.ql.metadata.Table) StatsWork(org.apache.hadoop.hive.ql.plan.StatsWork) BasicStatsWork(org.apache.hadoop.hive.ql.plan.BasicStatsWork) MapWork(org.apache.hadoop.hive.ql.plan.MapWork) BasicStatsWork(org.apache.hadoop.hive.ql.plan.BasicStatsWork) TezWork(org.apache.hadoop.hive.ql.plan.TezWork)

Aggregations

TezWork (org.apache.hadoop.hive.ql.plan.TezWork)31 BaseWork (org.apache.hadoop.hive.ql.plan.BaseWork)16 MapWork (org.apache.hadoop.hive.ql.plan.MapWork)13 TezTask (org.apache.hadoop.hive.ql.exec.tez.TezTask)11 Task (org.apache.hadoop.hive.ql.exec.Task)9 ArrayList (java.util.ArrayList)8 SparkWork (org.apache.hadoop.hive.ql.plan.SparkWork)8 TableScanOperator (org.apache.hadoop.hive.ql.exec.TableScanOperator)7 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)7 MapredWork (org.apache.hadoop.hive.ql.plan.MapredWork)7 List (java.util.List)6 HiveConf (org.apache.hadoop.hive.conf.HiveConf)6 ConditionalTask (org.apache.hadoop.hive.ql.exec.ConditionalTask)6 Operator (org.apache.hadoop.hive.ql.exec.Operator)6 ReduceWork (org.apache.hadoop.hive.ql.plan.ReduceWork)6 Serializable (java.io.Serializable)5 LinkedList (java.util.LinkedList)5 Path (org.apache.hadoop.fs.Path)5 MoveWork (org.apache.hadoop.hive.ql.plan.MoveWork)5 IOException (java.io.IOException)4