Search in sources :

Example 1 with TezWork

use of org.apache.hadoop.hive.ql.plan.TezWork in project hive by apache.

the class GenMapRedUtils method createMRWorkForMergingFiles.

/**
   * @param fsInput The FileSink operator.
   * @param ctx The MR processing context.
   * @param finalName the final destination path the merge job should output.
   * @param dependencyTask
   * @param mvTasks
   * @param conf
   * @param currTask
   * @throws SemanticException

   * create a Map-only merge job using CombineHiveInputFormat for all partitions with
   * following operators:
   *          MR job J0:
   *          ...
   *          |
   *          v
   *          FileSinkOperator_1 (fsInput)
   *          |
   *          v
   *          Merge job J1:
   *          |
   *          v
   *          TableScan (using CombineHiveInputFormat) (tsMerge)
   *          |
   *          v
   *          FileSinkOperator (fsMerge)
   *
   *          Here the pathToPartitionInfo & pathToAlias will remain the same, which means the paths
   *          do
   *          not contain the dynamic partitions (their parent). So after the dynamic partitions are
   *          created (after the first job finished before the moveTask or ConditionalTask start),
   *          we need to change the pathToPartitionInfo & pathToAlias to include the dynamic
   *          partition
   *          directories.
   *
   */
public static void createMRWorkForMergingFiles(FileSinkOperator fsInput, Path finalName, DependencyCollectionTask dependencyTask, List<Task<MoveWork>> mvTasks, HiveConf conf, Task<? extends Serializable> currTask) throws SemanticException {
    //
    // 1. create the operator tree
    //
    FileSinkDesc fsInputDesc = fsInput.getConf();
    // Create a TableScan operator
    RowSchema inputRS = fsInput.getSchema();
    TableScanOperator tsMerge = GenMapRedUtils.createTemporaryTableScanOperator(fsInput.getCompilationOpContext(), inputRS);
    // Create a FileSink operator
    TableDesc ts = (TableDesc) fsInputDesc.getTableInfo().clone();
    FileSinkDesc fsOutputDesc = new FileSinkDesc(finalName, ts, conf.getBoolVar(ConfVars.COMPRESSRESULT));
    FileSinkOperator fsOutput = (FileSinkOperator) OperatorFactory.getAndMakeChild(fsOutputDesc, inputRS, tsMerge);
    // If the input FileSinkOperator is a dynamic partition enabled, the tsMerge input schema
    // needs to include the partition column, and the fsOutput should have
    // a DynamicPartitionCtx to indicate that it needs to dynamically partitioned.
    DynamicPartitionCtx dpCtx = fsInputDesc.getDynPartCtx();
    if (dpCtx != null && dpCtx.getNumDPCols() > 0) {
        // adding DP ColumnInfo to the RowSchema signature
        ArrayList<ColumnInfo> signature = inputRS.getSignature();
        String tblAlias = fsInputDesc.getTableInfo().getTableName();
        for (String dpCol : dpCtx.getDPColNames()) {
            ColumnInfo colInfo = new ColumnInfo(dpCol, // all partition column type should be string
            TypeInfoFactory.stringTypeInfo, tblAlias, // partition column is virtual column
            true);
            signature.add(colInfo);
        }
        inputRS.setSignature(signature);
        // create another DynamicPartitionCtx, which has a different input-to-DP column mapping
        DynamicPartitionCtx dpCtx2 = new DynamicPartitionCtx(dpCtx);
        fsOutputDesc.setDynPartCtx(dpCtx2);
        // update the FileSinkOperator to include partition columns
        usePartitionColumns(fsInputDesc.getTableInfo().getProperties(), dpCtx.getDPColNames());
    } else {
        // non-partitioned table
        fsInputDesc.getTableInfo().getProperties().remove(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS);
    }
    //
    // 2. Constructing a conditional task consisting of a move task and a map reduce task
    //
    MoveWork dummyMv = new MoveWork(null, null, null, new LoadFileDesc(fsInputDesc.getFinalDirName(), finalName, true, null, null), false);
    MapWork cplan;
    Serializable work;
    if ((conf.getBoolVar(ConfVars.HIVEMERGERCFILEBLOCKLEVEL) && fsInputDesc.getTableInfo().getInputFileFormatClass().equals(RCFileInputFormat.class)) || (conf.getBoolVar(ConfVars.HIVEMERGEORCFILESTRIPELEVEL) && fsInputDesc.getTableInfo().getInputFileFormatClass().equals(OrcInputFormat.class))) {
        cplan = GenMapRedUtils.createMergeTask(fsInputDesc, finalName, dpCtx != null && dpCtx.getNumDPCols() > 0, fsInput.getCompilationOpContext());
        if (conf.getVar(ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) {
            work = new TezWork(conf.getVar(HiveConf.ConfVars.HIVEQUERYID), conf);
            cplan.setName("File Merge");
            ((TezWork) work).add(cplan);
        } else if (conf.getVar(ConfVars.HIVE_EXECUTION_ENGINE).equals("spark")) {
            work = new SparkWork(conf.getVar(HiveConf.ConfVars.HIVEQUERYID));
            cplan.setName("Spark Merge File Work");
            ((SparkWork) work).add(cplan);
        } else {
            work = cplan;
        }
    } else {
        cplan = createMRWorkForMergingFiles(conf, tsMerge, fsInputDesc);
        if (conf.getVar(ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) {
            work = new TezWork(conf.getVar(HiveConf.ConfVars.HIVEQUERYID), conf);
            cplan.setName("File Merge");
            ((TezWork) work).add(cplan);
        } else if (conf.getVar(ConfVars.HIVE_EXECUTION_ENGINE).equals("spark")) {
            work = new SparkWork(conf.getVar(HiveConf.ConfVars.HIVEQUERYID));
            cplan.setName("Spark Merge File Work");
            ((SparkWork) work).add(cplan);
        } else {
            work = new MapredWork();
            ((MapredWork) work).setMapWork(cplan);
        }
    }
    // use CombineHiveInputFormat for map-only merging
    cplan.setInputformat("org.apache.hadoop.hive.ql.io.CombineHiveInputFormat");
    // NOTE: we should gather stats in MR1 rather than MR2 at merge job since we don't
    // know if merge MR2 will be triggered at execution time
    Task<MoveWork> mvTask = GenMapRedUtils.findMoveTask(mvTasks, fsOutput);
    ConditionalTask cndTsk = GenMapRedUtils.createCondTask(conf, currTask, dummyMv, work, fsInputDesc.getFinalDirName(), finalName, mvTask, dependencyTask);
    // keep the dynamic partition context in conditional task resolver context
    ConditionalResolverMergeFilesCtx mrCtx = (ConditionalResolverMergeFilesCtx) cndTsk.getResolverCtx();
    mrCtx.setDPCtx(fsInputDesc.getDynPartCtx());
    mrCtx.setLbCtx(fsInputDesc.getLbCtx());
}
Also used : MoveWork(org.apache.hadoop.hive.ql.plan.MoveWork) RowSchema(org.apache.hadoop.hive.ql.exec.RowSchema) TableScanOperator(org.apache.hadoop.hive.ql.exec.TableScanOperator) LoadFileDesc(org.apache.hadoop.hive.ql.plan.LoadFileDesc) Serializable(java.io.Serializable) FileSinkOperator(org.apache.hadoop.hive.ql.exec.FileSinkOperator) FileSinkDesc(org.apache.hadoop.hive.ql.plan.FileSinkDesc) DynamicPartitionCtx(org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx) ColumnInfo(org.apache.hadoop.hive.ql.exec.ColumnInfo) ConditionalResolverMergeFilesCtx(org.apache.hadoop.hive.ql.plan.ConditionalResolverMergeFiles.ConditionalResolverMergeFilesCtx) SparkWork(org.apache.hadoop.hive.ql.plan.SparkWork) MapWork(org.apache.hadoop.hive.ql.plan.MapWork) MapredWork(org.apache.hadoop.hive.ql.plan.MapredWork) RCFileInputFormat(org.apache.hadoop.hive.ql.io.RCFileInputFormat) OrcInputFormat(org.apache.hadoop.hive.ql.io.orc.OrcInputFormat) ConditionalTask(org.apache.hadoop.hive.ql.exec.ConditionalTask) LoadTableDesc(org.apache.hadoop.hive.ql.plan.LoadTableDesc) TableDesc(org.apache.hadoop.hive.ql.plan.TableDesc) TezWork(org.apache.hadoop.hive.ql.plan.TezWork)

Example 2 with TezWork

use of org.apache.hadoop.hive.ql.plan.TezWork in project hive by apache.

the class CrossProductCheck method dispatch.

@Override
public Object dispatch(Node nd, Stack<Node> stack, Object... nodeOutputs) throws SemanticException {
    @SuppressWarnings("unchecked") Task<? extends Serializable> currTask = (Task<? extends Serializable>) nd;
    if (currTask instanceof MapRedTask) {
        MapRedTask mrTsk = (MapRedTask) currTask;
        MapredWork mrWrk = mrTsk.getWork();
        checkMapJoins(mrTsk);
        checkMRReducer(currTask.toString(), mrWrk);
    } else if (currTask instanceof ConditionalTask) {
        List<Task<? extends Serializable>> taskListInConditionalTask = ((ConditionalTask) currTask).getListTasks();
        for (Task<? extends Serializable> tsk : taskListInConditionalTask) {
            dispatch(tsk, stack, nodeOutputs);
        }
    } else if (currTask instanceof TezTask) {
        TezTask tzTask = (TezTask) currTask;
        TezWork tzWrk = tzTask.getWork();
        checkMapJoins(tzWrk);
        checkTezReducer(tzWrk);
    }
    return null;
}
Also used : MapRedTask(org.apache.hadoop.hive.ql.exec.mr.MapRedTask) ConditionalTask(org.apache.hadoop.hive.ql.exec.ConditionalTask) TezTask(org.apache.hadoop.hive.ql.exec.tez.TezTask) Task(org.apache.hadoop.hive.ql.exec.Task) MapRedTask(org.apache.hadoop.hive.ql.exec.mr.MapRedTask) Serializable(java.io.Serializable) MapredWork(org.apache.hadoop.hive.ql.plan.MapredWork) ConditionalTask(org.apache.hadoop.hive.ql.exec.ConditionalTask) ArrayList(java.util.ArrayList) List(java.util.List) TezTask(org.apache.hadoop.hive.ql.exec.tez.TezTask) TezWork(org.apache.hadoop.hive.ql.plan.TezWork)

Example 3 with TezWork

use of org.apache.hadoop.hive.ql.plan.TezWork in project hive by apache.

the class TezCompiler method setInputFormat.

@Override
protected void setInputFormat(Task<? extends Serializable> task) {
    if (task instanceof TezTask) {
        TezWork work = ((TezTask) task).getWork();
        List<BaseWork> all = work.getAllWork();
        for (BaseWork w : all) {
            if (w instanceof MapWork) {
                MapWork mapWork = (MapWork) w;
                HashMap<String, Operator<? extends OperatorDesc>> opMap = mapWork.getAliasToWork();
                if (!opMap.isEmpty()) {
                    for (Operator<? extends OperatorDesc> op : opMap.values()) {
                        setInputFormat(mapWork, op);
                    }
                }
            }
        }
    } else if (task instanceof ConditionalTask) {
        List<Task<? extends Serializable>> listTasks = ((ConditionalTask) task).getListTasks();
        for (Task<? extends Serializable> tsk : listTasks) {
            setInputFormat(tsk);
        }
    }
    if (task.getChildTasks() != null) {
        for (Task<? extends Serializable> childTask : task.getChildTasks()) {
            setInputFormat(childTask);
        }
    }
}
Also used : CommonMergeJoinOperator(org.apache.hadoop.hive.ql.exec.CommonMergeJoinOperator) ReduceSinkOperator(org.apache.hadoop.hive.ql.exec.ReduceSinkOperator) MapJoinOperator(org.apache.hadoop.hive.ql.exec.MapJoinOperator) GroupByOperator(org.apache.hadoop.hive.ql.exec.GroupByOperator) UnionOperator(org.apache.hadoop.hive.ql.exec.UnionOperator) FileSinkOperator(org.apache.hadoop.hive.ql.exec.FileSinkOperator) SelectOperator(org.apache.hadoop.hive.ql.exec.SelectOperator) FilterOperator(org.apache.hadoop.hive.ql.exec.FilterOperator) TezDummyStoreOperator(org.apache.hadoop.hive.ql.exec.TezDummyStoreOperator) AppMasterEventOperator(org.apache.hadoop.hive.ql.exec.AppMasterEventOperator) JoinOperator(org.apache.hadoop.hive.ql.exec.JoinOperator) TableScanOperator(org.apache.hadoop.hive.ql.exec.TableScanOperator) Operator(org.apache.hadoop.hive.ql.exec.Operator) DummyStoreOperator(org.apache.hadoop.hive.ql.exec.DummyStoreOperator) ConditionalTask(org.apache.hadoop.hive.ql.exec.ConditionalTask) TezTask(org.apache.hadoop.hive.ql.exec.tez.TezTask) Task(org.apache.hadoop.hive.ql.exec.Task) Serializable(java.io.Serializable) TezTask(org.apache.hadoop.hive.ql.exec.tez.TezTask) MapWork(org.apache.hadoop.hive.ql.plan.MapWork) ConditionalTask(org.apache.hadoop.hive.ql.exec.ConditionalTask) List(java.util.List) ArrayList(java.util.ArrayList) LinkedList(java.util.LinkedList) BaseWork(org.apache.hadoop.hive.ql.plan.BaseWork) OperatorDesc(org.apache.hadoop.hive.ql.plan.OperatorDesc) TezWork(org.apache.hadoop.hive.ql.plan.TezWork)

Example 4 with TezWork

use of org.apache.hadoop.hive.ql.plan.TezWork in project hive by apache.

the class GenMapRedUtils method createMRWorkForMergingFiles.

/**
 * @param fsInput The FileSink operator.
 * @param finalName the final destination path the merge job should output.
 * @param dependencyTask
 * @param mvTasks
 * @param conf
 * @param currTask
 * @param lineageState
 * @throws SemanticException
 *
 * create a Map-only merge job using CombineHiveInputFormat for all partitions with
 * following operators:
 *          MR job J0:
 *          ...
 *          |
 *          v
 *          FileSinkOperator_1 (fsInput)
 *          |
 *          v
 *          Merge job J1:
 *          |
 *          v
 *          TableScan (using CombineHiveInputFormat) (tsMerge)
 *          |
 *          v
 *          FileSinkOperator (fsMerge)
 *
 *          Here the pathToPartitionInfo & pathToAlias will remain the same, which means the paths
 *          do
 *          not contain the dynamic partitions (their parent). So after the dynamic partitions are
 *          created (after the first job finished before the moveTask or ConditionalTask start),
 *          we need to change the pathToPartitionInfo & pathToAlias to include the dynamic
 *          partition
 *          directories.
 */
public static void createMRWorkForMergingFiles(FileSinkOperator fsInput, Path finalName, DependencyCollectionTask dependencyTask, List<Task<MoveWork>> mvTasks, HiveConf conf, Task<? extends Serializable> currTask, LineageState lineageState) throws SemanticException {
    // 
    // 1. create the operator tree
    // 
    FileSinkDesc fsInputDesc = fsInput.getConf();
    if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
        Utilities.FILE_OP_LOGGER.trace("Creating merge work from " + System.identityHashCode(fsInput) + " with write ID " + (fsInputDesc.isMmTable() ? fsInputDesc.getTableWriteId() : null) + " into " + finalName);
    }
    boolean isBlockMerge = (conf.getBoolVar(ConfVars.HIVEMERGERCFILEBLOCKLEVEL) && fsInputDesc.getTableInfo().getInputFileFormatClass().equals(RCFileInputFormat.class)) || (conf.getBoolVar(ConfVars.HIVEMERGEORCFILESTRIPELEVEL) && fsInputDesc.getTableInfo().getInputFileFormatClass().equals(OrcInputFormat.class));
    RowSchema inputRS = fsInput.getSchema();
    Long srcMmWriteId = fsInputDesc.isMmTable() ? fsInputDesc.getTableWriteId() : null;
    FileSinkDesc fsOutputDesc = null;
    TableScanOperator tsMerge = null;
    if (!isBlockMerge) {
        // Create a TableScan operator
        tsMerge = GenMapRedUtils.createTemporaryTableScanOperator(fsInput.getCompilationOpContext(), inputRS);
        // Create a FileSink operator
        TableDesc ts = (TableDesc) fsInputDesc.getTableInfo().clone();
        Path mergeDest = srcMmWriteId == null ? finalName : finalName.getParent();
        fsOutputDesc = new FileSinkDesc(mergeDest, ts, conf.getBoolVar(ConfVars.COMPRESSRESULT));
        fsOutputDesc.setMmWriteId(srcMmWriteId);
        fsOutputDesc.setIsMerge(true);
        // Create and attach the filesink for the merge.
        OperatorFactory.getAndMakeChild(fsOutputDesc, inputRS, tsMerge);
    }
    // If the input FileSinkOperator is a dynamic partition enabled, the tsMerge input schema
    // needs to include the partition column, and the fsOutput should have
    // a DynamicPartitionCtx to indicate that it needs to dynamically partitioned.
    DynamicPartitionCtx dpCtx = fsInputDesc.getDynPartCtx();
    if (dpCtx != null && dpCtx.getNumDPCols() > 0) {
        // adding DP ColumnInfo to the RowSchema signature
        ArrayList<ColumnInfo> signature = inputRS.getSignature();
        String tblAlias = fsInputDesc.getTableInfo().getTableName();
        for (String dpCol : dpCtx.getDPColNames()) {
            ColumnInfo colInfo = new ColumnInfo(dpCol, // all partition column type should be string
            TypeInfoFactory.stringTypeInfo, tblAlias, // partition column is virtual column
            true);
            signature.add(colInfo);
        }
        inputRS.setSignature(signature);
        if (!isBlockMerge) {
            // create another DynamicPartitionCtx, which has a different input-to-DP column mapping
            DynamicPartitionCtx dpCtx2 = new DynamicPartitionCtx(dpCtx);
            fsOutputDesc.setDynPartCtx(dpCtx2);
        }
        // update the FileSinkOperator to include partition columns
        usePartitionColumns(fsInputDesc.getTableInfo().getProperties(), dpCtx.getDPColNames());
    } else {
        // non-partitioned table
        fsInputDesc.getTableInfo().getProperties().remove(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS);
    }
    // 
    // 2. Constructing a conditional task consisting of a move task and a map reduce task
    // 
    Path inputDirName = fsInputDesc.getMergeInputDirName();
    MapWork cplan;
    Serializable work;
    if (isBlockMerge) {
        cplan = GenMapRedUtils.createMergeTask(fsInputDesc, finalName, dpCtx != null && dpCtx.getNumDPCols() > 0, fsInput.getCompilationOpContext());
        if (conf.getVar(ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) {
            work = new TezWork(conf.getVar(HiveConf.ConfVars.HIVEQUERYID), conf);
            cplan.setName("File Merge");
            ((TezWork) work).add(cplan);
        } else if (conf.getVar(ConfVars.HIVE_EXECUTION_ENGINE).equals("spark")) {
            work = new SparkWork(conf.getVar(HiveConf.ConfVars.HIVEQUERYID));
            cplan.setName("Spark Merge File Work");
            ((SparkWork) work).add(cplan);
        } else {
            work = cplan;
        }
    } else {
        cplan = createMRWorkForMergingFiles(conf, tsMerge, fsInputDesc);
        if (conf.getVar(ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) {
            work = new TezWork(conf.getVar(HiveConf.ConfVars.HIVEQUERYID), conf);
            cplan.setName("File Merge");
            ((TezWork) work).add(cplan);
        } else if (conf.getVar(ConfVars.HIVE_EXECUTION_ENGINE).equals("spark")) {
            work = new SparkWork(conf.getVar(HiveConf.ConfVars.HIVEQUERYID));
            cplan.setName("Spark Merge File Work");
            ((SparkWork) work).add(cplan);
        } else {
            work = new MapredWork();
            ((MapredWork) work).setMapWork(cplan);
        }
    }
    // use CombineHiveInputFormat for map-only merging
    cplan.setInputformat("org.apache.hadoop.hive.ql.io.CombineHiveInputFormat");
    // NOTE: we should gather stats in MR1 rather than MR2 at merge job since we don't
    // know if merge MR2 will be triggered at execution time
    MoveWork dummyMv = null;
    if (srcMmWriteId == null) {
        // Only create the movework for non-MM table. No action needed for a MM table.
        dummyMv = new MoveWork(null, null, null, new LoadFileDesc(inputDirName, finalName, true, null, null, false), false);
    }
    // Use the original fsOp path here in case of MM - while the new FSOP merges files inside the
    // MM directory, the original MoveTask still commits based on the parent. Note that this path
    // can only be triggered for a merge that's part of insert for now; MM tables do not support
    // concatenate. Keeping the old logic for non-MM tables with temp directories and stuff.
    Path fsopPath = srcMmWriteId != null ? fsInputDesc.getFinalDirName() : finalName;
    Task<MoveWork> mvTask = GenMapRedUtils.findMoveTaskForFsopOutput(mvTasks, fsopPath, fsInputDesc.isMmTable());
    ConditionalTask cndTsk = GenMapRedUtils.createCondTask(conf, currTask, dummyMv, work, fsInputDesc.getMergeInputDirName(), finalName, mvTask, dependencyTask, lineageState);
    // keep the dynamic partition context in conditional task resolver context
    ConditionalResolverMergeFilesCtx mrCtx = (ConditionalResolverMergeFilesCtx) cndTsk.getResolverCtx();
    mrCtx.setDPCtx(fsInputDesc.getDynPartCtx());
    mrCtx.setLbCtx(fsInputDesc.getLbCtx());
}
Also used : Path(org.apache.hadoop.fs.Path) MoveWork(org.apache.hadoop.hive.ql.plan.MoveWork) RowSchema(org.apache.hadoop.hive.ql.exec.RowSchema) TableScanOperator(org.apache.hadoop.hive.ql.exec.TableScanOperator) Serializable(java.io.Serializable) LoadFileDesc(org.apache.hadoop.hive.ql.plan.LoadFileDesc) FileSinkDesc(org.apache.hadoop.hive.ql.plan.FileSinkDesc) DynamicPartitionCtx(org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx) ColumnInfo(org.apache.hadoop.hive.ql.exec.ColumnInfo) ConditionalResolverMergeFilesCtx(org.apache.hadoop.hive.ql.plan.ConditionalResolverMergeFiles.ConditionalResolverMergeFilesCtx) SparkWork(org.apache.hadoop.hive.ql.plan.SparkWork) MapWork(org.apache.hadoop.hive.ql.plan.MapWork) MapredWork(org.apache.hadoop.hive.ql.plan.MapredWork) RCFileInputFormat(org.apache.hadoop.hive.ql.io.RCFileInputFormat) OrcInputFormat(org.apache.hadoop.hive.ql.io.orc.OrcInputFormat) ConditionalTask(org.apache.hadoop.hive.ql.exec.ConditionalTask) LoadTableDesc(org.apache.hadoop.hive.ql.plan.LoadTableDesc) TableDesc(org.apache.hadoop.hive.ql.plan.TableDesc) TezWork(org.apache.hadoop.hive.ql.plan.TezWork)

Example 5 with TezWork

use of org.apache.hadoop.hive.ql.plan.TezWork in project hive by apache.

the class GenMapRedUtils method isMergeRequired.

/**
 * Returns true iff the fsOp requires a merge
 */
public static boolean isMergeRequired(List<Task<MoveWork>> mvTasks, HiveConf hconf, FileSinkOperator fsOp, Task<? extends Serializable> currTask, boolean isInsertTable) {
    // Has the user enabled merging of files for map-only jobs or for all jobs
    if (mvTasks == null || mvTasks.isEmpty()) {
        return false;
    }
    // no need of merging if the move is to a local file system
    // We are looking based on the original FSOP, so use the original path as is.
    MoveTask mvTask = (MoveTask) GenMapRedUtils.findMoveTaskForFsopOutput(mvTasks, fsOp.getConf().getFinalDirName(), fsOp.getConf().isMmTable());
    // TODO: wtf?!! why is this in this method? This has nothing to do with anything.
    if (mvTask != null && isInsertTable && hconf.getBoolVar(ConfVars.HIVESTATSAUTOGATHER) && !fsOp.getConf().isMaterialization()) {
        // mark the MapredWork and FileSinkOperator for gathering stats
        fsOp.getConf().setGatherStats(true);
        fsOp.getConf().setStatsReliable(hconf.getBoolVar(ConfVars.HIVE_STATS_RELIABLE));
        if (!mvTask.hasFollowingStatsTask()) {
            GenMapRedUtils.addStatsTask(fsOp, mvTask, currTask, hconf);
        }
    }
    if (mvTask == null || mvTask.isLocal() || !fsOp.getConf().canBeMerged()) {
        return false;
    }
    if (currTask.getWork() instanceof TezWork) {
        // tez blurs the boundary between map and reduce, thus it has it's own config
        return hconf.getBoolVar(ConfVars.HIVEMERGETEZFILES);
    } else if (currTask.getWork() instanceof SparkWork) {
        // spark has its own config for merging
        return hconf.getBoolVar(ConfVars.HIVEMERGESPARKFILES);
    }
    return isMergeRequiredForMr(hconf, fsOp, currTask);
}
Also used : MoveTask(org.apache.hadoop.hive.ql.exec.MoveTask) SparkWork(org.apache.hadoop.hive.ql.plan.SparkWork) TezWork(org.apache.hadoop.hive.ql.plan.TezWork)

Aggregations

TezWork (org.apache.hadoop.hive.ql.plan.TezWork)31 BaseWork (org.apache.hadoop.hive.ql.plan.BaseWork)16 MapWork (org.apache.hadoop.hive.ql.plan.MapWork)13 TezTask (org.apache.hadoop.hive.ql.exec.tez.TezTask)11 Task (org.apache.hadoop.hive.ql.exec.Task)9 ArrayList (java.util.ArrayList)8 SparkWork (org.apache.hadoop.hive.ql.plan.SparkWork)8 TableScanOperator (org.apache.hadoop.hive.ql.exec.TableScanOperator)7 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)7 MapredWork (org.apache.hadoop.hive.ql.plan.MapredWork)7 List (java.util.List)6 HiveConf (org.apache.hadoop.hive.conf.HiveConf)6 ConditionalTask (org.apache.hadoop.hive.ql.exec.ConditionalTask)6 Operator (org.apache.hadoop.hive.ql.exec.Operator)6 ReduceWork (org.apache.hadoop.hive.ql.plan.ReduceWork)6 Serializable (java.io.Serializable)5 LinkedList (java.util.LinkedList)5 Path (org.apache.hadoop.fs.Path)5 MoveWork (org.apache.hadoop.hive.ql.plan.MoveWork)5 IOException (java.io.IOException)4