Search in sources :

Example 6 with GenMapRedCtx

use of org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx in project hive by apache.

the class GenMRRedSink3 method process.

/**
   * Reduce Scan encountered.
   *
   * @param nd
   *          the reduce sink operator encountered
   * @param opProcCtx
   *          context
   */
public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx opProcCtx, Object... nodeOutputs) throws SemanticException {
    ReduceSinkOperator op = (ReduceSinkOperator) nd;
    GenMRProcContext ctx = (GenMRProcContext) opProcCtx;
    // union consisted on a bunch of map-reduce jobs, and it has been split at
    // the union
    Operator<? extends OperatorDesc> reducer = op.getChildOperators().get(0);
    UnionOperator union = Utils.findNode(stack, UnionOperator.class);
    assert union != null;
    Map<Operator<? extends OperatorDesc>, GenMapRedCtx> mapCurrCtx = ctx.getMapCurrCtx();
    GenMapRedCtx mapredCtx = mapCurrCtx.get(union);
    Task<? extends Serializable> unionTask = null;
    if (mapredCtx != null) {
        unionTask = mapredCtx.getCurrTask();
    } else {
        unionTask = ctx.getCurrTask();
    }
    MapredWork plan = (MapredWork) unionTask.getWork();
    HashMap<Operator<? extends OperatorDesc>, Task<? extends Serializable>> opTaskMap = ctx.getOpTaskMap();
    Task<? extends Serializable> reducerTask = opTaskMap.get(reducer);
    ctx.setCurrTask(unionTask);
    // If the plan for this reducer does not exist, initialize the plan
    if (reducerTask == null) {
        // When the reducer is encountered for the first time
        if (plan.getReduceWork() == null) {
            GenMapRedUtils.initUnionPlan(op, union, ctx, unionTask);
        // When union is followed by a multi-table insert
        } else {
            GenMapRedUtils.splitPlan(op, ctx);
        }
    } else if (plan.getReduceWork() != null && plan.getReduceWork().getReducer() == reducer) {
        // The union is already initialized. However, the union is walked from
        // another input
        // initUnionPlan is idempotent
        GenMapRedUtils.initUnionPlan(op, union, ctx, unionTask);
    } else {
        GenMapRedUtils.joinUnionPlan(ctx, union, unionTask, reducerTask, false);
        ctx.setCurrTask(reducerTask);
    }
    mapCurrCtx.put(op, new GenMapRedCtx(ctx.getCurrTask(), ctx.getCurrAliasId()));
    // the union operator has been processed
    ctx.setCurrUnionOp(null);
    return true;
}
Also used : ReduceSinkOperator(org.apache.hadoop.hive.ql.exec.ReduceSinkOperator) UnionOperator(org.apache.hadoop.hive.ql.exec.UnionOperator) Operator(org.apache.hadoop.hive.ql.exec.Operator) Task(org.apache.hadoop.hive.ql.exec.Task) Serializable(java.io.Serializable) UnionOperator(org.apache.hadoop.hive.ql.exec.UnionOperator) MapredWork(org.apache.hadoop.hive.ql.plan.MapredWork) ReduceSinkOperator(org.apache.hadoop.hive.ql.exec.ReduceSinkOperator) GenMapRedCtx(org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx) OperatorDesc(org.apache.hadoop.hive.ql.plan.OperatorDesc)

Example 7 with GenMapRedCtx

use of org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx in project hive by apache.

the class GenMRUnion1 method processMapOnlyUnion.

/**
   * Process the union if all sub-queries are map-only
   *
   * @return
   * @throws SemanticException
   */
private Object processMapOnlyUnion(UnionOperator union, Stack<Node> stack, GenMRProcContext ctx, UnionProcContext uCtx) throws SemanticException {
    // merge currTask from multiple topOps
    GenMRUnionCtx uCtxTask = ctx.getUnionTask(union);
    if (uCtxTask != null) {
        // get task associated with this union
        Task<? extends Serializable> uTask = ctx.getUnionTask(union).getUTask();
        if (uTask != null) {
            if (ctx.getCurrTask() != null && ctx.getCurrTask() != uTask) {
                // if ctx.getCurrTask() is in rootTasks, should be removed
                ctx.getRootTasks().remove(ctx.getCurrTask());
            }
            ctx.setCurrTask(uTask);
        }
    }
    UnionParseContext uPrsCtx = uCtx.getUnionParseContext(union);
    ctx.getMapCurrCtx().put(union, new GenMapRedCtx(ctx.getCurrTask(), ctx.getCurrAliasId()));
    // if the union is the first time seen, set current task to GenMRUnionCtx
    uCtxTask = ctx.getUnionTask(union);
    if (uCtxTask == null) {
        uCtxTask = new GenMRUnionCtx(ctx.getCurrTask());
        ctx.setUnionTask(union, uCtxTask);
    }
    Task<? extends Serializable> uTask = ctx.getCurrTask();
    if (uTask.getParentTasks() == null || uTask.getParentTasks().isEmpty()) {
        if (!ctx.getRootTasks().contains(uTask)) {
            ctx.getRootTasks().add(uTask);
        }
    }
    return true;
}
Also used : UnionParseContext(org.apache.hadoop.hive.ql.optimizer.unionproc.UnionProcContext.UnionParseContext) GenMapRedCtx(org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx) GenMRUnionCtx(org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMRUnionCtx)

Example 8 with GenMapRedCtx

use of org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx in project hive by apache.

the class GenMRUnion1 method process.

/**
   * Union Operator encountered . Currently, the algorithm is pretty simple: If
   * all the sub-queries are map-only, don't do anything. Otherwise, insert a
   * FileSink on top of all the sub-queries.
   *
   * This can be optimized later on.
   *
   * @param nd
   *          the file sink operator encountered
   * @param opProcCtx
   *          context
   */
@Override
public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx opProcCtx, Object... nodeOutputs) throws SemanticException {
    UnionOperator union = (UnionOperator) nd;
    GenMRProcContext ctx = (GenMRProcContext) opProcCtx;
    ParseContext parseCtx = ctx.getParseCtx();
    UnionProcContext uCtx = parseCtx.getUCtx();
    // Map-only subqueries can be optimized in future to not write to a file in
    // future
    Map<Operator<? extends OperatorDesc>, GenMapRedCtx> mapCurrCtx = ctx.getMapCurrCtx();
    if (union.getConf().isAllInputsInSameReducer()) {
        // All inputs of this UnionOperator are in the same Reducer.
        // We do not need to break the operator tree.
        mapCurrCtx.put((Operator<? extends OperatorDesc>) nd, new GenMapRedCtx(ctx.getCurrTask(), ctx.getCurrAliasId()));
        return null;
    }
    UnionParseContext uPrsCtx = uCtx.getUnionParseContext(union);
    ctx.setCurrUnionOp(union);
    // map-reduce job
    if (uPrsCtx.allMapOnlySubQ()) {
        return processMapOnlyUnion(union, stack, ctx, uCtx);
    }
    assert uPrsCtx != null;
    Task<? extends Serializable> currTask = ctx.getCurrTask();
    int pos = UnionProcFactory.getPositionParent(union, stack);
    Task<? extends Serializable> uTask = null;
    MapredWork uPlan = null;
    // union is encountered for the first time
    GenMRUnionCtx uCtxTask = ctx.getUnionTask(union);
    if (uCtxTask == null) {
        uPlan = GenMapRedUtils.getMapRedWork(parseCtx);
        uTask = TaskFactory.get(uPlan, parseCtx.getConf());
        uCtxTask = new GenMRUnionCtx(uTask);
        ctx.setUnionTask(union, uCtxTask);
    } else {
        uTask = uCtxTask.getUTask();
    }
    // Copy into the current union task plan if
    if (uPrsCtx.getMapOnlySubq(pos) && uPrsCtx.getRootTask(pos)) {
        processSubQueryUnionMerge(ctx, uCtxTask, union, stack);
        if (ctx.getRootTasks().contains(currTask)) {
            ctx.getRootTasks().remove(currTask);
        }
    } else // If it a map-reduce job, create a temporary file
    {
        // is the current task a root task
        if (shouldBeRootTask(currTask) && !ctx.getRootTasks().contains(currTask) && (currTask.getParentTasks() == null || currTask.getParentTasks().isEmpty())) {
            ctx.getRootTasks().add(currTask);
        }
        processSubQueryUnionCreateIntermediate(union.getParentOperators().get(pos), union, uTask, ctx, uCtxTask);
        // the currAliasId and CurrTopOp is not valid any more
        ctx.setCurrAliasId(null);
        ctx.setCurrTopOp(null);
        ctx.getOpTaskMap().put(null, uTask);
    }
    ctx.setCurrTask(uTask);
    mapCurrCtx.put((Operator<? extends OperatorDesc>) nd, new GenMapRedCtx(ctx.getCurrTask(), null));
    return true;
}
Also used : TableScanOperator(org.apache.hadoop.hive.ql.exec.TableScanOperator) UnionOperator(org.apache.hadoop.hive.ql.exec.UnionOperator) Operator(org.apache.hadoop.hive.ql.exec.Operator) UnionProcContext(org.apache.hadoop.hive.ql.optimizer.unionproc.UnionProcContext) UnionParseContext(org.apache.hadoop.hive.ql.optimizer.unionproc.UnionProcContext.UnionParseContext) GenMRUnionCtx(org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMRUnionCtx) UnionOperator(org.apache.hadoop.hive.ql.exec.UnionOperator) MapredWork(org.apache.hadoop.hive.ql.plan.MapredWork) ParseContext(org.apache.hadoop.hive.ql.parse.ParseContext) UnionParseContext(org.apache.hadoop.hive.ql.optimizer.unionproc.UnionProcContext.UnionParseContext) GenMapRedCtx(org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx) OperatorDesc(org.apache.hadoop.hive.ql.plan.OperatorDesc)

Example 9 with GenMapRedCtx

use of org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx in project hive by apache.

the class GenMROperator method process.

/**
   * Reduce Scan encountered.
   *
   * @param nd
   *          the reduce sink operator encountered
   * @param procCtx
   *          context
   */
public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException {
    GenMRProcContext ctx = (GenMRProcContext) procCtx;
    Map<Operator<? extends OperatorDesc>, GenMapRedCtx> mapCurrCtx = ctx.getMapCurrCtx();
    GenMapRedCtx mapredCtx = mapCurrCtx.get(stack.get(stack.size() - 2));
    mapCurrCtx.put((Operator<? extends OperatorDesc>) nd, new GenMapRedCtx(mapredCtx.getCurrTask(), mapredCtx.getCurrAliasId()));
    return true;
}
Also used : Operator(org.apache.hadoop.hive.ql.exec.Operator) GenMapRedCtx(org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx) OperatorDesc(org.apache.hadoop.hive.ql.plan.OperatorDesc)

Example 10 with GenMapRedCtx

use of org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx in project hive by apache.

the class GenMRTableScan1 method process.

/**
   * Table Sink encountered.
   * @param nd
   *          the table sink operator encountered
   * @param opProcCtx
   *          context
   */
@Override
public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx opProcCtx, Object... nodeOutputs) throws SemanticException {
    TableScanOperator op = (TableScanOperator) nd;
    GenMRProcContext ctx = (GenMRProcContext) opProcCtx;
    ParseContext parseCtx = ctx.getParseCtx();
    Class<? extends InputFormat> inputFormat = op.getConf().getTableMetadata().getInputFormatClass();
    Map<Operator<? extends OperatorDesc>, GenMapRedCtx> mapCurrCtx = ctx.getMapCurrCtx();
    // create a dummy MapReduce task
    MapredWork currWork = GenMapRedUtils.getMapRedWork(parseCtx);
    MapRedTask currTask = (MapRedTask) TaskFactory.get(currWork, parseCtx.getConf());
    ctx.setCurrTask(currTask);
    ctx.setCurrTopOp(op);
    for (String alias : parseCtx.getTopOps().keySet()) {
        Operator<? extends OperatorDesc> currOp = parseCtx.getTopOps().get(alias);
        if (currOp == op) {
            String currAliasId = alias;
            ctx.setCurrAliasId(currAliasId);
            mapCurrCtx.put(op, new GenMapRedCtx(currTask, currAliasId));
            if (parseCtx.getQueryProperties().isAnalyzeCommand()) {
                boolean partialScan = parseCtx.getQueryProperties().isPartialScanAnalyzeCommand();
                boolean noScan = parseCtx.getQueryProperties().isNoScanAnalyzeCommand();
                if (OrcInputFormat.class.isAssignableFrom(inputFormat) || MapredParquetInputFormat.class.isAssignableFrom(inputFormat)) {
                    // For ORC and Parquet, all the following statements are the same
                    // ANALYZE TABLE T [PARTITION (...)] COMPUTE STATISTICS
                    // ANALYZE TABLE T [PARTITION (...)] COMPUTE STATISTICS partialscan;
                    // ANALYZE TABLE T [PARTITION (...)] COMPUTE STATISTICS noscan;
                    // There will not be any MR or Tez job above this task
                    StatsNoJobWork snjWork = new StatsNoJobWork(op.getConf().getTableMetadata().getTableSpec());
                    snjWork.setStatsReliable(parseCtx.getConf().getBoolVar(HiveConf.ConfVars.HIVE_STATS_RELIABLE));
                    // If partition is specified, get pruned partition list
                    Set<Partition> confirmedParts = GenMapRedUtils.getConfirmedPartitionsForScan(op);
                    if (confirmedParts.size() > 0) {
                        Table source = op.getConf().getTableMetadata();
                        List<String> partCols = GenMapRedUtils.getPartitionColumns(op);
                        PrunedPartitionList partList = new PrunedPartitionList(source, confirmedParts, partCols, false);
                        snjWork.setPrunedPartitionList(partList);
                    }
                    Task<StatsNoJobWork> snjTask = TaskFactory.get(snjWork, parseCtx.getConf());
                    ctx.setCurrTask(snjTask);
                    ctx.setCurrTopOp(null);
                    ctx.getRootTasks().clear();
                    ctx.getRootTasks().add(snjTask);
                } else {
                    // ANALYZE TABLE T [PARTITION (...)] COMPUTE STATISTICS;
                    // The plan consists of a simple MapRedTask followed by a StatsTask.
                    // The MR task is just a simple TableScanOperator
                    StatsWork statsWork = new StatsWork(op.getConf().getTableMetadata().getTableSpec());
                    statsWork.setAggKey(op.getConf().getStatsAggPrefix());
                    statsWork.setStatsTmpDir(op.getConf().getTmpStatsDir());
                    statsWork.setSourceTask(currTask);
                    statsWork.setStatsReliable(parseCtx.getConf().getBoolVar(HiveConf.ConfVars.HIVE_STATS_RELIABLE));
                    Task<StatsWork> statsTask = TaskFactory.get(statsWork, parseCtx.getConf());
                    currTask.addDependentTask(statsTask);
                    if (!ctx.getRootTasks().contains(currTask)) {
                        ctx.getRootTasks().add(currTask);
                    }
                    // The plan consists of a StatsTask only.
                    if (noScan) {
                        statsTask.setParentTasks(null);
                        statsWork.setNoScanAnalyzeCommand(true);
                        ctx.getRootTasks().remove(currTask);
                        ctx.getRootTasks().add(statsTask);
                    }
                    // ANALYZE TABLE T [PARTITION (...)] COMPUTE STATISTICS partialscan;
                    if (partialScan) {
                        handlePartialScanCommand(op, ctx, parseCtx, currTask, statsWork, statsTask);
                    }
                    currWork.getMapWork().setGatheringStats(true);
                    if (currWork.getReduceWork() != null) {
                        currWork.getReduceWork().setGatheringStats(true);
                    }
                    // NOTE: here we should use the new partition predicate pushdown API to get a list of
                    // pruned list,
                    // and pass it to setTaskPlan as the last parameter
                    Set<Partition> confirmedPartns = GenMapRedUtils.getConfirmedPartitionsForScan(op);
                    if (confirmedPartns.size() > 0) {
                        Table source = op.getConf().getTableMetadata();
                        List<String> partCols = GenMapRedUtils.getPartitionColumns(op);
                        PrunedPartitionList partList = new PrunedPartitionList(source, confirmedPartns, partCols, false);
                        GenMapRedUtils.setTaskPlan(currAliasId, op, currTask, false, ctx, partList);
                    } else {
                        // non-partitioned table
                        GenMapRedUtils.setTaskPlan(currAliasId, op, currTask, false, ctx);
                    }
                }
            }
            return true;
        }
    }
    assert false;
    return null;
}
Also used : TableScanOperator(org.apache.hadoop.hive.ql.exec.TableScanOperator) Operator(org.apache.hadoop.hive.ql.exec.Operator) Partition(org.apache.hadoop.hive.ql.metadata.Partition) TableScanOperator(org.apache.hadoop.hive.ql.exec.TableScanOperator) Table(org.apache.hadoop.hive.ql.metadata.Table) MapRedTask(org.apache.hadoop.hive.ql.exec.mr.MapRedTask) MapredParquetInputFormat(org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat) PrunedPartitionList(org.apache.hadoop.hive.ql.parse.PrunedPartitionList) MapredWork(org.apache.hadoop.hive.ql.plan.MapredWork) StatsWork(org.apache.hadoop.hive.ql.plan.StatsWork) OrcInputFormat(org.apache.hadoop.hive.ql.io.orc.OrcInputFormat) ParseContext(org.apache.hadoop.hive.ql.parse.ParseContext) GenMapRedCtx(org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx) StatsNoJobWork(org.apache.hadoop.hive.ql.plan.StatsNoJobWork) OperatorDesc(org.apache.hadoop.hive.ql.plan.OperatorDesc)

Aggregations

GenMapRedCtx (org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx)10 Operator (org.apache.hadoop.hive.ql.exec.Operator)9 OperatorDesc (org.apache.hadoop.hive.ql.plan.OperatorDesc)9 ReduceSinkOperator (org.apache.hadoop.hive.ql.exec.ReduceSinkOperator)6 MapredWork (org.apache.hadoop.hive.ql.plan.MapredWork)6 Serializable (java.io.Serializable)5 TableScanOperator (org.apache.hadoop.hive.ql.exec.TableScanOperator)5 Task (org.apache.hadoop.hive.ql.exec.Task)5 UnionOperator (org.apache.hadoop.hive.ql.exec.UnionOperator)5 MapRedTask (org.apache.hadoop.hive.ql.exec.mr.MapRedTask)4 ConditionalTask (org.apache.hadoop.hive.ql.exec.ConditionalTask)3 FileSinkOperator (org.apache.hadoop.hive.ql.exec.FileSinkOperator)3 MapJoinOperator (org.apache.hadoop.hive.ql.exec.MapJoinOperator)3 ParseContext (org.apache.hadoop.hive.ql.parse.ParseContext)3 DemuxOperator (org.apache.hadoop.hive.ql.exec.DemuxOperator)2 DependencyCollectionTask (org.apache.hadoop.hive.ql.exec.DependencyCollectionTask)2 JoinOperator (org.apache.hadoop.hive.ql.exec.JoinOperator)2 MoveTask (org.apache.hadoop.hive.ql.exec.MoveTask)2 SMBMapJoinOperator (org.apache.hadoop.hive.ql.exec.SMBMapJoinOperator)2 SparkTask (org.apache.hadoop.hive.ql.exec.spark.SparkTask)2