Search in sources :

Example 21 with ReduceSinkOperator

use of org.apache.hadoop.hive.ql.exec.ReduceSinkOperator in project hive by apache.

the class ConvertJoinMapJoin method removeCycleCreatingSemiJoinOps.

// Remove any semijoin branch associated with hashjoin's parent's operator
// pipeline which can cause a cycle after hashjoin optimization.
private void removeCycleCreatingSemiJoinOps(MapJoinOperator mapjoinOp, Operator<?> parentSelectOpOfBigTable, ParseContext parseContext) throws SemanticException {
    Map<ReduceSinkOperator, TableScanOperator> semiJoinMap = new HashMap<ReduceSinkOperator, TableScanOperator>();
    for (Operator<?> op : parentSelectOpOfBigTable.getChildOperators()) {
        if (!(op instanceof SelectOperator)) {
            continue;
        }
        while (op.getChildOperators().size() > 0) {
            op = op.getChildOperators().get(0);
        }
        // If not ReduceSink Op, skip
        if (!(op instanceof ReduceSinkOperator)) {
            continue;
        }
        ReduceSinkOperator rs = (ReduceSinkOperator) op;
        TableScanOperator ts = parseContext.getRsOpToTsOpMap().get(rs);
        if (ts == null) {
            // skip, no semijoin branch
            continue;
        }
        // Found a semijoin branch.
        for (Operator<?> parent : mapjoinOp.getParentOperators()) {
            if (!(parent instanceof ReduceSinkOperator)) {
                continue;
            }
            Set<TableScanOperator> tsOps = OperatorUtils.findOperatorsUpstream(parent, TableScanOperator.class);
            for (TableScanOperator parentTS : tsOps) {
                // If the parent is same as the ts, then we have a cycle.
                if (ts == parentTS) {
                    semiJoinMap.put(rs, ts);
                    break;
                }
            }
        }
    }
    if (semiJoinMap.size() > 0) {
        for (ReduceSinkOperator rs : semiJoinMap.keySet()) {
            GenTezUtils.removeBranch(rs);
            GenTezUtils.removeSemiJoinOperator(parseContext, rs, semiJoinMap.get(rs));
        }
    }
}
Also used : TableScanOperator(org.apache.hadoop.hive.ql.exec.TableScanOperator) SelectOperator(org.apache.hadoop.hive.ql.exec.SelectOperator) HashMap(java.util.HashMap) ReduceSinkOperator(org.apache.hadoop.hive.ql.exec.ReduceSinkOperator)

Example 22 with ReduceSinkOperator

use of org.apache.hadoop.hive.ql.exec.ReduceSinkOperator in project hive by apache.

the class SparkReduceSinkMapJoinProc method process.

/* (non-Javadoc)
   * This processor addresses the RS-MJ case that occurs in spark on the small/hash
   * table side of things. The work that RS will be a part of must be connected
   * to the MJ work via be a broadcast edge.
   * We should not walk down the tree when we encounter this pattern because:
   * the type of work (map work or reduce work) needs to be determined
   * on the basis of the big table side because it may be a mapwork (no need for shuffle)
   * or reduce work.
   */
@SuppressWarnings("unchecked")
@Override
public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procContext, Object... nodeOutputs) throws SemanticException {
    GenSparkProcContext context = (GenSparkProcContext) procContext;
    if (!nd.getClass().equals(MapJoinOperator.class)) {
        return null;
    }
    MapJoinOperator mapJoinOp = (MapJoinOperator) nd;
    if (stack.size() < 2 || !(stack.get(stack.size() - 2) instanceof ReduceSinkOperator)) {
        context.currentMapJoinOperators.add(mapJoinOp);
        return null;
    }
    context.preceedingWork = null;
    context.currentRootOperator = null;
    ReduceSinkOperator parentRS = (ReduceSinkOperator) stack.get(stack.size() - 2);
    // remove the tag for in-memory side of mapjoin
    parentRS.getConf().setSkipTag(true);
    parentRS.setSkipTag(true);
    // remember the original parent list before we start modifying it.
    if (!context.mapJoinParentMap.containsKey(mapJoinOp)) {
        List<Operator<?>> parents = new ArrayList<Operator<?>>(mapJoinOp.getParentOperators());
        context.mapJoinParentMap.put(mapJoinOp, parents);
    }
    List<BaseWork> mapJoinWork;
    /*
     *  If there was a pre-existing work generated for the big-table mapjoin side,
     *  we need to hook the work generated for the RS (associated with the RS-MJ pattern)
     *  with the pre-existing work.
     *
     *  Otherwise, we need to associate that the mapjoin op
     *  to be linked to the RS work (associated with the RS-MJ pattern).
     *
     */
    mapJoinWork = context.mapJoinWorkMap.get(mapJoinOp);
    int workMapSize = context.childToWorkMap.get(parentRS).size();
    Preconditions.checkArgument(workMapSize == 1, "AssertionError: expected context.childToWorkMap.get(parentRS).size() to be 1, but was " + workMapSize);
    BaseWork parentWork = context.childToWorkMap.get(parentRS).get(0);
    // set the link between mapjoin and parent vertex
    int pos = context.mapJoinParentMap.get(mapJoinOp).indexOf(parentRS);
    if (pos == -1) {
        throw new SemanticException("Cannot find position of parent in mapjoin");
    }
    LOG.debug("Mapjoin " + mapJoinOp + ", pos: " + pos + " --> " + parentWork.getName());
    mapJoinOp.getConf().getParentToInput().put(pos, parentWork.getName());
    SparkEdgeProperty edgeProp = new SparkEdgeProperty(SparkEdgeProperty.SHUFFLE_NONE);
    if (mapJoinWork != null) {
        for (BaseWork myWork : mapJoinWork) {
            // link the work with the work associated with the reduce sink that triggered this rule
            SparkWork sparkWork = context.currentTask.getWork();
            LOG.debug("connecting " + parentWork.getName() + " with " + myWork.getName());
            sparkWork.connect(parentWork, myWork, edgeProp);
        }
    }
    // remember in case we need to connect additional work later
    Map<BaseWork, SparkEdgeProperty> linkWorkMap = null;
    if (context.linkOpWithWorkMap.containsKey(mapJoinOp)) {
        linkWorkMap = context.linkOpWithWorkMap.get(mapJoinOp);
    } else {
        linkWorkMap = new HashMap<BaseWork, SparkEdgeProperty>();
    }
    linkWorkMap.put(parentWork, edgeProp);
    context.linkOpWithWorkMap.put(mapJoinOp, linkWorkMap);
    List<ReduceSinkOperator> reduceSinks = context.linkWorkWithReduceSinkMap.get(parentWork);
    if (reduceSinks == null) {
        reduceSinks = new ArrayList<ReduceSinkOperator>();
    }
    reduceSinks.add(parentRS);
    context.linkWorkWithReduceSinkMap.put(parentWork, reduceSinks);
    // create the dummy operators
    List<Operator<?>> dummyOperators = new ArrayList<Operator<?>>();
    // create an new operator: HashTableDummyOperator, which share the table desc
    HashTableDummyDesc desc = new HashTableDummyDesc();
    HashTableDummyOperator dummyOp = (HashTableDummyOperator) OperatorFactory.get(mapJoinOp.getCompilationOpContext(), desc);
    TableDesc tbl;
    // need to create the correct table descriptor for key/value
    RowSchema rowSchema = parentRS.getParentOperators().get(0).getSchema();
    tbl = PlanUtils.getReduceValueTableDesc(PlanUtils.getFieldSchemasFromRowSchema(rowSchema, ""));
    dummyOp.getConf().setTbl(tbl);
    Map<Byte, List<ExprNodeDesc>> keyExprMap = mapJoinOp.getConf().getKeys();
    List<ExprNodeDesc> keyCols = keyExprMap.get(Byte.valueOf((byte) 0));
    StringBuilder keyOrder = new StringBuilder();
    StringBuilder keyNullOrder = new StringBuilder();
    for (int i = 0; i < keyCols.size(); i++) {
        keyOrder.append("+");
        keyNullOrder.append("a");
    }
    TableDesc keyTableDesc = PlanUtils.getReduceKeyTableDesc(PlanUtils.getFieldSchemasFromColumnList(keyCols, "mapjoinkey"), keyOrder.toString(), keyNullOrder.toString());
    mapJoinOp.getConf().setKeyTableDesc(keyTableDesc);
    // let the dummy op be the parent of mapjoin op
    mapJoinOp.replaceParent(parentRS, dummyOp);
    List<Operator<? extends OperatorDesc>> dummyChildren = new ArrayList<Operator<? extends OperatorDesc>>();
    dummyChildren.add(mapJoinOp);
    dummyOp.setChildOperators(dummyChildren);
    dummyOperators.add(dummyOp);
    // cut the operator tree so as to not retain connections from the parent RS downstream
    List<Operator<? extends OperatorDesc>> childOperators = parentRS.getChildOperators();
    int childIndex = childOperators.indexOf(mapJoinOp);
    childOperators.remove(childIndex);
    // at task startup
    if (mapJoinWork != null) {
        for (BaseWork myWork : mapJoinWork) {
            myWork.addDummyOp(dummyOp);
        }
    }
    if (context.linkChildOpWithDummyOp.containsKey(mapJoinOp)) {
        for (Operator<?> op : context.linkChildOpWithDummyOp.get(mapJoinOp)) {
            dummyOperators.add(op);
        }
    }
    context.linkChildOpWithDummyOp.put(mapJoinOp, dummyOperators);
    // replace ReduceSinkOp with HashTableSinkOp for the RSops which are parents of MJop
    MapJoinDesc mjDesc = mapJoinOp.getConf();
    HiveConf conf = context.conf;
    // Unlike in MR, we may call this method multiple times, for each
    // small table HTS. But, since it's idempotent, it should be OK.
    mjDesc.resetOrder();
    float hashtableMemoryUsage;
    if (hasGroupBy(mapJoinOp, context)) {
        hashtableMemoryUsage = conf.getFloatVar(HiveConf.ConfVars.HIVEHASHTABLEFOLLOWBYGBYMAXMEMORYUSAGE);
    } else {
        hashtableMemoryUsage = conf.getFloatVar(HiveConf.ConfVars.HIVEHASHTABLEMAXMEMORYUSAGE);
    }
    mjDesc.setHashTableMemoryUsage(hashtableMemoryUsage);
    SparkHashTableSinkDesc hashTableSinkDesc = new SparkHashTableSinkDesc(mjDesc);
    SparkHashTableSinkOperator hashTableSinkOp = (SparkHashTableSinkOperator) OperatorFactory.get(mapJoinOp.getCompilationOpContext(), hashTableSinkDesc);
    byte tag = (byte) pos;
    int[] valueIndex = mjDesc.getValueIndex(tag);
    if (valueIndex != null) {
        List<ExprNodeDesc> newValues = new ArrayList<ExprNodeDesc>();
        List<ExprNodeDesc> values = hashTableSinkDesc.getExprs().get(tag);
        for (int index = 0; index < values.size(); index++) {
            if (valueIndex[index] < 0) {
                newValues.add(values.get(index));
            }
        }
        hashTableSinkDesc.getExprs().put(tag, newValues);
    }
    //get all parents of reduce sink
    List<Operator<? extends OperatorDesc>> rsParentOps = parentRS.getParentOperators();
    for (Operator<? extends OperatorDesc> parent : rsParentOps) {
        parent.replaceChild(parentRS, hashTableSinkOp);
    }
    hashTableSinkOp.setParentOperators(rsParentOps);
    hashTableSinkOp.getConf().setTag(tag);
    return true;
}
Also used : ReduceSinkOperator(org.apache.hadoop.hive.ql.exec.ReduceSinkOperator) MapJoinOperator(org.apache.hadoop.hive.ql.exec.MapJoinOperator) GroupByOperator(org.apache.hadoop.hive.ql.exec.GroupByOperator) Operator(org.apache.hadoop.hive.ql.exec.Operator) HashTableDummyOperator(org.apache.hadoop.hive.ql.exec.HashTableDummyOperator) SparkHashTableSinkOperator(org.apache.hadoop.hive.ql.exec.SparkHashTableSinkOperator) ArrayList(java.util.ArrayList) ArrayList(java.util.ArrayList) List(java.util.List) HiveConf(org.apache.hadoop.hive.conf.HiveConf) ExprNodeDesc(org.apache.hadoop.hive.ql.plan.ExprNodeDesc) BaseWork(org.apache.hadoop.hive.ql.plan.BaseWork) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) MapJoinOperator(org.apache.hadoop.hive.ql.exec.MapJoinOperator) SparkHashTableSinkDesc(org.apache.hadoop.hive.ql.plan.SparkHashTableSinkDesc) HashTableDummyDesc(org.apache.hadoop.hive.ql.plan.HashTableDummyDesc) RowSchema(org.apache.hadoop.hive.ql.exec.RowSchema) MapJoinDesc(org.apache.hadoop.hive.ql.plan.MapJoinDesc) SparkWork(org.apache.hadoop.hive.ql.plan.SparkWork) HashTableDummyOperator(org.apache.hadoop.hive.ql.exec.HashTableDummyOperator) SparkEdgeProperty(org.apache.hadoop.hive.ql.plan.SparkEdgeProperty) ReduceSinkOperator(org.apache.hadoop.hive.ql.exec.ReduceSinkOperator) GenSparkProcContext(org.apache.hadoop.hive.ql.parse.spark.GenSparkProcContext) TableDesc(org.apache.hadoop.hive.ql.plan.TableDesc) OperatorDesc(org.apache.hadoop.hive.ql.plan.OperatorDesc) SparkHashTableSinkOperator(org.apache.hadoop.hive.ql.exec.SparkHashTableSinkOperator)

Example 23 with ReduceSinkOperator

use of org.apache.hadoop.hive.ql.exec.ReduceSinkOperator in project hive by apache.

the class SparkSkewJoinProcFactory method splitTask.

/**
   * If the join is not in a leaf ReduceWork, the spark task has to be split into 2 tasks.
   */
private static void splitTask(SparkTask currentTask, ReduceWork reduceWork, ParseContext parseContext) throws SemanticException {
    SparkWork currentWork = currentTask.getWork();
    Set<Operator<?>> reduceSinkSet = SparkMapJoinResolver.getOp(reduceWork, ReduceSinkOperator.class);
    if (currentWork.getChildren(reduceWork).size() == 1 && canSplit(currentWork) && reduceSinkSet.size() == 1) {
        ReduceSinkOperator reduceSink = (ReduceSinkOperator) reduceSinkSet.iterator().next();
        BaseWork childWork = currentWork.getChildren(reduceWork).get(0);
        SparkEdgeProperty originEdge = currentWork.getEdgeProperty(reduceWork, childWork);
        // disconnect the reduce work from its child. this should produce two isolated sub graphs
        currentWork.disconnect(reduceWork, childWork);
        // move works following the current reduce work into a new spark work
        SparkWork newWork = new SparkWork(parseContext.getConf().getVar(HiveConf.ConfVars.HIVEQUERYID));
        newWork.add(childWork);
        copyWorkGraph(currentWork, newWork, childWork);
        // remove them from current spark work
        for (BaseWork baseWork : newWork.getAllWorkUnsorted()) {
            currentWork.remove(baseWork);
            currentWork.getCloneToWork().remove(baseWork);
        }
        // create TS to read intermediate data
        Context baseCtx = parseContext.getContext();
        Path taskTmpDir = baseCtx.getMRTmpPath();
        Operator<? extends OperatorDesc> rsParent = reduceSink.getParentOperators().get(0);
        TableDesc tableDesc = PlanUtils.getIntermediateFileTableDesc(PlanUtils.getFieldSchemasFromRowSchema(rsParent.getSchema(), "temporarycol"));
        // this will insert FS and TS between the RS and its parent
        TableScanOperator tableScanOp = GenMapRedUtils.createTemporaryFile(rsParent, reduceSink, taskTmpDir, tableDesc, parseContext);
        // create new MapWork
        MapWork mapWork = PlanUtils.getMapRedWork().getMapWork();
        mapWork.setName("Map " + GenSparkUtils.getUtils().getNextSeqNumber());
        newWork.add(mapWork);
        newWork.connect(mapWork, childWork, originEdge);
        // setup the new map work
        String streamDesc = taskTmpDir.toUri().toString();
        if (GenMapRedUtils.needsTagging((ReduceWork) childWork)) {
            Operator<? extends OperatorDesc> childReducer = ((ReduceWork) childWork).getReducer();
            String id = null;
            if (childReducer instanceof JoinOperator) {
                if (parseContext.getJoinOps().contains(childReducer)) {
                    id = ((JoinOperator) childReducer).getConf().getId();
                }
            } else if (childReducer instanceof MapJoinOperator) {
                if (parseContext.getMapJoinOps().contains(childReducer)) {
                    id = ((MapJoinOperator) childReducer).getConf().getId();
                }
            } else if (childReducer instanceof SMBMapJoinOperator) {
                if (parseContext.getSmbMapJoinOps().contains(childReducer)) {
                    id = ((SMBMapJoinOperator) childReducer).getConf().getId();
                }
            }
            if (id != null) {
                streamDesc = id + ":$INTNAME";
            } else {
                streamDesc = "$INTNAME";
            }
            String origStreamDesc = streamDesc;
            int pos = 0;
            while (mapWork.getAliasToWork().get(streamDesc) != null) {
                streamDesc = origStreamDesc.concat(String.valueOf(++pos));
            }
        }
        GenMapRedUtils.setTaskPlan(taskTmpDir, streamDesc, tableScanOp, mapWork, false, tableDesc);
        // insert the new task between current task and its child
        @SuppressWarnings("unchecked") Task<? extends Serializable> newTask = TaskFactory.get(newWork, parseContext.getConf());
        List<Task<? extends Serializable>> childTasks = currentTask.getChildTasks();
        // must have at most one child
        if (childTasks != null && childTasks.size() > 0) {
            Task<? extends Serializable> childTask = childTasks.get(0);
            currentTask.removeDependentTask(childTask);
            newTask.addDependentTask(childTask);
        }
        currentTask.addDependentTask(newTask);
        newTask.setFetchSource(currentTask.isFetchSource());
    }
}
Also used : ReduceSinkOperator(org.apache.hadoop.hive.ql.exec.ReduceSinkOperator) CommonJoinOperator(org.apache.hadoop.hive.ql.exec.CommonJoinOperator) MapJoinOperator(org.apache.hadoop.hive.ql.exec.MapJoinOperator) SMBMapJoinOperator(org.apache.hadoop.hive.ql.exec.SMBMapJoinOperator) JoinOperator(org.apache.hadoop.hive.ql.exec.JoinOperator) TableScanOperator(org.apache.hadoop.hive.ql.exec.TableScanOperator) Operator(org.apache.hadoop.hive.ql.exec.Operator) Context(org.apache.hadoop.hive.ql.Context) ParseContext(org.apache.hadoop.hive.ql.parse.ParseContext) Path(org.apache.hadoop.fs.Path) CommonJoinOperator(org.apache.hadoop.hive.ql.exec.CommonJoinOperator) MapJoinOperator(org.apache.hadoop.hive.ql.exec.MapJoinOperator) SMBMapJoinOperator(org.apache.hadoop.hive.ql.exec.SMBMapJoinOperator) JoinOperator(org.apache.hadoop.hive.ql.exec.JoinOperator) MapJoinOperator(org.apache.hadoop.hive.ql.exec.MapJoinOperator) SMBMapJoinOperator(org.apache.hadoop.hive.ql.exec.SMBMapJoinOperator) TableScanOperator(org.apache.hadoop.hive.ql.exec.TableScanOperator) SparkTask(org.apache.hadoop.hive.ql.exec.spark.SparkTask) Task(org.apache.hadoop.hive.ql.exec.Task) Serializable(java.io.Serializable) SparkWork(org.apache.hadoop.hive.ql.plan.SparkWork) ReduceWork(org.apache.hadoop.hive.ql.plan.ReduceWork) SMBMapJoinOperator(org.apache.hadoop.hive.ql.exec.SMBMapJoinOperator) MapWork(org.apache.hadoop.hive.ql.plan.MapWork) SparkEdgeProperty(org.apache.hadoop.hive.ql.plan.SparkEdgeProperty) ReduceSinkOperator(org.apache.hadoop.hive.ql.exec.ReduceSinkOperator) TableDesc(org.apache.hadoop.hive.ql.plan.TableDesc) BaseWork(org.apache.hadoop.hive.ql.plan.BaseWork)

Example 24 with ReduceSinkOperator

use of org.apache.hadoop.hive.ql.exec.ReduceSinkOperator in project hive by apache.

the class SparkMapJoinOptimizer method convertJoinMapJoin.

/*
   * Once we have decided on the map join, the tree would transform from
   *
   *        |                   |
   *       Join               MapJoin
   *       / \                /   \
   *     RS   RS   --->     RS    TS (big table)
   *    /      \           /
   *   TS       TS        TS (small table)
   *
   * for spark.
   */
public MapJoinOperator convertJoinMapJoin(JoinOperator joinOp, OptimizeSparkProcContext context, int bigTablePosition) throws SemanticException {
    // of the constituent reduce sinks.
    for (Operator<? extends OperatorDesc> parentOp : joinOp.getParentOperators()) {
        if (parentOp instanceof MuxOperator) {
            return null;
        }
    }
    //can safely convert the join to a map join.
    MapJoinOperator mapJoinOp = MapJoinProcessor.convertJoinOpMapJoinOp(context.getConf(), joinOp, joinOp.getConf().isLeftInputJoin(), joinOp.getConf().getBaseSrc(), joinOp.getConf().getMapAliases(), bigTablePosition, true);
    Operator<? extends OperatorDesc> parentBigTableOp = mapJoinOp.getParentOperators().get(bigTablePosition);
    if (parentBigTableOp instanceof ReduceSinkOperator) {
        mapJoinOp.getParentOperators().remove(bigTablePosition);
        if (!(mapJoinOp.getParentOperators().contains(parentBigTableOp.getParentOperators().get(0)))) {
            mapJoinOp.getParentOperators().add(bigTablePosition, parentBigTableOp.getParentOperators().get(0));
        }
        parentBigTableOp.getParentOperators().get(0).removeChild(parentBigTableOp);
        for (Operator<? extends OperatorDesc> op : mapJoinOp.getParentOperators()) {
            if (!(op.getChildOperators().contains(mapJoinOp))) {
                op.getChildOperators().add(mapJoinOp);
            }
            op.getChildOperators().remove(joinOp);
        }
    }
    // Data structures
    mapJoinOp.getConf().setQBJoinTreeProps(joinOp.getConf());
    return mapJoinOp;
}
Also used : MapJoinOperator(org.apache.hadoop.hive.ql.exec.MapJoinOperator) ReduceSinkOperator(org.apache.hadoop.hive.ql.exec.ReduceSinkOperator) MuxOperator(org.apache.hadoop.hive.ql.exec.MuxOperator)

Example 25 with ReduceSinkOperator

use of org.apache.hadoop.hive.ql.exec.ReduceSinkOperator in project hive by apache.

the class SemanticAnalyzer method genGroupByPlanReduceSinkOperator.

/**
   * Generate the ReduceSinkOperator for the Group By Query Block
   * (qb.getPartInfo().getXXX(dest)). The new ReduceSinkOperator will be a child
   * of inputOperatorInfo.
   *
   * It will put all Group By keys and the distinct field (if any) in the
   * map-reduce sort key, and all other fields in the map-reduce value.
   *
   * @param numPartitionFields
   *          the number of fields for map-reduce partitioning. This is usually
   *          the number of fields in the Group By keys.
   * @return the new ReduceSinkOperator.
   * @throws SemanticException
   */
@SuppressWarnings("nls")
private ReduceSinkOperator genGroupByPlanReduceSinkOperator(QB qb, String dest, Operator inputOperatorInfo, List<ASTNode> grpByExprs, int numPartitionFields, boolean changeNumPartitionFields, int numReducers, boolean mapAggrDone, boolean groupingSetsPresent) throws SemanticException {
    RowResolver reduceSinkInputRowResolver = opParseCtx.get(inputOperatorInfo).getRowResolver();
    QBParseInfo parseInfo = qb.getParseInfo();
    RowResolver reduceSinkOutputRowResolver = new RowResolver();
    reduceSinkOutputRowResolver.setIsExprResolver(true);
    Map<String, ExprNodeDesc> colExprMap = new HashMap<String, ExprNodeDesc>();
    // Pre-compute group-by keys and store in reduceKeys
    List<String> outputKeyColumnNames = new ArrayList<String>();
    List<String> outputValueColumnNames = new ArrayList<String>();
    ArrayList<ExprNodeDesc> reduceKeys = getReduceKeysForReduceSink(grpByExprs, dest, reduceSinkInputRowResolver, reduceSinkOutputRowResolver, outputKeyColumnNames, colExprMap);
    int keyLength = reduceKeys.size();
    int numOfColsRmedFromkey = grpByExprs.size() - keyLength;
    // add a key for reduce sink
    if (groupingSetsPresent) {
        // Process grouping set for the reduce sink operator
        processGroupingSetReduceSinkOperator(reduceSinkInputRowResolver, reduceSinkOutputRowResolver, reduceKeys, outputKeyColumnNames, colExprMap);
        if (changeNumPartitionFields) {
            numPartitionFields++;
        }
    }
    List<List<Integer>> distinctColIndices = getDistinctColIndicesForReduceSink(parseInfo, dest, reduceKeys, reduceSinkInputRowResolver, reduceSinkOutputRowResolver, outputKeyColumnNames, colExprMap);
    ArrayList<ExprNodeDesc> reduceValues = new ArrayList<ExprNodeDesc>();
    HashMap<String, ASTNode> aggregationTrees = parseInfo.getAggregationExprsForClause(dest);
    if (!mapAggrDone) {
        getReduceValuesForReduceSinkNoMapAgg(parseInfo, dest, reduceSinkInputRowResolver, reduceSinkOutputRowResolver, outputValueColumnNames, reduceValues, colExprMap);
    } else {
        // Put partial aggregation results in reduceValues
        int inputField = reduceKeys.size() + numOfColsRmedFromkey;
        for (Map.Entry<String, ASTNode> entry : aggregationTrees.entrySet()) {
            TypeInfo type = reduceSinkInputRowResolver.getColumnInfos().get(inputField).getType();
            ExprNodeColumnDesc exprDesc = new ExprNodeColumnDesc(type, getColumnInternalName(inputField), "", false);
            reduceValues.add(exprDesc);
            inputField++;
            String outputColName = getColumnInternalName(reduceValues.size() - 1);
            outputValueColumnNames.add(outputColName);
            String internalName = Utilities.ReduceField.VALUE.toString() + "." + outputColName;
            reduceSinkOutputRowResolver.putExpression(entry.getValue(), new ColumnInfo(internalName, type, null, false));
            colExprMap.put(internalName, exprDesc);
        }
    }
    ReduceSinkOperator rsOp = (ReduceSinkOperator) putOpInsertMap(OperatorFactory.getAndMakeChild(PlanUtils.getReduceSinkDesc(reduceKeys, groupingSetsPresent ? keyLength + 1 : keyLength, reduceValues, distinctColIndices, outputKeyColumnNames, outputValueColumnNames, true, -1, numPartitionFields, numReducers, AcidUtils.Operation.NOT_ACID), new RowSchema(reduceSinkOutputRowResolver.getColumnInfos()), inputOperatorInfo), reduceSinkOutputRowResolver);
    rsOp.setColumnExprMap(colExprMap);
    return rsOp;
}
Also used : RowSchema(org.apache.hadoop.hive.ql.exec.RowSchema) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ColumnInfo(org.apache.hadoop.hive.ql.exec.ColumnInfo) TypeInfo(org.apache.hadoop.hive.serde2.typeinfo.TypeInfo) PrimitiveTypeInfo(org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo) ReduceSinkOperator(org.apache.hadoop.hive.ql.exec.ReduceSinkOperator) ExprNodeColumnDesc(org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc) LinkedList(java.util.LinkedList) ArrayList(java.util.ArrayList) List(java.util.List) ExprNodeDesc(org.apache.hadoop.hive.ql.plan.ExprNodeDesc) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap)

Aggregations

ReduceSinkOperator (org.apache.hadoop.hive.ql.exec.ReduceSinkOperator)62 Operator (org.apache.hadoop.hive.ql.exec.Operator)37 ArrayList (java.util.ArrayList)34 TableScanOperator (org.apache.hadoop.hive.ql.exec.TableScanOperator)29 ExprNodeDesc (org.apache.hadoop.hive.ql.plan.ExprNodeDesc)27 HashMap (java.util.HashMap)23 RowSchema (org.apache.hadoop.hive.ql.exec.RowSchema)21 JoinOperator (org.apache.hadoop.hive.ql.exec.JoinOperator)20 GroupByOperator (org.apache.hadoop.hive.ql.exec.GroupByOperator)18 MapJoinOperator (org.apache.hadoop.hive.ql.exec.MapJoinOperator)18 List (java.util.List)17 ColumnInfo (org.apache.hadoop.hive.ql.exec.ColumnInfo)17 OperatorDesc (org.apache.hadoop.hive.ql.plan.OperatorDesc)17 FileSinkOperator (org.apache.hadoop.hive.ql.exec.FileSinkOperator)16 FilterOperator (org.apache.hadoop.hive.ql.exec.FilterOperator)14 SMBMapJoinOperator (org.apache.hadoop.hive.ql.exec.SMBMapJoinOperator)14 SelectOperator (org.apache.hadoop.hive.ql.exec.SelectOperator)14 UnionOperator (org.apache.hadoop.hive.ql.exec.UnionOperator)14 LinkedHashMap (java.util.LinkedHashMap)13 ReduceSinkDesc (org.apache.hadoop.hive.ql.plan.ReduceSinkDesc)12