Search in sources :

Example 31 with MapJoinOperator

use of org.apache.hadoop.hive.ql.exec.MapJoinOperator in project hive by apache.

the class ConvertJoinMapJoin method convertJoinBucketMapJoin.

private boolean convertJoinBucketMapJoin(JoinOperator joinOp, OptimizeTezProcContext context, int bigTablePosition, TezBucketJoinProcCtx tezBucketJoinProcCtx) throws SemanticException {
    if (!checkConvertJoinBucketMapJoin(joinOp, bigTablePosition, tezBucketJoinProcCtx)) {
        LOG.info("Check conversion to bucket map join failed.");
        return false;
    }
    // Incase the join has extra keys other than bucketed columns, partition keys need to be updated
    // on small table(s).
    ReduceSinkOperator bigTableRS = (ReduceSinkOperator) joinOp.getParentOperators().get(bigTablePosition);
    OpTraits opTraits = bigTableRS.getOpTraits();
    List<List<String>> listBucketCols = opTraits.getBucketColNames();
    ArrayList<ExprNodeDesc> bigTablePartitionCols = bigTableRS.getConf().getPartitionCols();
    boolean updatePartitionCols = false;
    List<Integer> positions = new ArrayList<>();
    if (listBucketCols.get(0).size() != bigTablePartitionCols.size()) {
        updatePartitionCols = true;
        // Prepare updated partition columns for small table(s).
        // Get the positions of bucketed columns
        int i = 0;
        Map<String, ExprNodeDesc> colExprMap = bigTableRS.getColumnExprMap();
        for (ExprNodeDesc bigTableExpr : bigTablePartitionCols) {
            // It is guaranteed there is only 1 list within listBucketCols.
            for (String colName : listBucketCols.get(0)) {
                if (colExprMap.get(colName).isSame(bigTableExpr)) {
                    positions.add(i++);
                }
            }
        }
    }
    MapJoinOperator mapJoinOp = convertJoinMapJoin(joinOp, context, bigTablePosition, true);
    if (mapJoinOp == null) {
        LOG.debug("Conversion to bucket map join failed.");
        return false;
    }
    MapJoinDesc joinDesc = mapJoinOp.getConf();
    joinDesc.setBucketMapJoin(true);
    // we can set the traits for this join operator
    opTraits = new OpTraits(joinOp.getOpTraits().getBucketColNames(), tezBucketJoinProcCtx.getNumBuckets(), null, joinOp.getOpTraits().getNumReduceSinks());
    mapJoinOp.setOpTraits(opTraits);
    mapJoinOp.setStatistics(joinOp.getStatistics());
    setNumberOfBucketsOnChildren(mapJoinOp);
    // Once the conversion is done, we can set the partitioner to bucket cols on the small table
    Map<String, Integer> bigTableBucketNumMapping = new HashMap<String, Integer>();
    bigTableBucketNumMapping.put(joinDesc.getBigTableAlias(), tezBucketJoinProcCtx.getNumBuckets());
    joinDesc.setBigTableBucketNumMapping(bigTableBucketNumMapping);
    // Update the partition columns in small table to ensure correct routing of hash tables.
    if (updatePartitionCols) {
        // on the small table side.
        for (Operator<?> op : mapJoinOp.getParentOperators()) {
            if (!(op instanceof ReduceSinkOperator))
                continue;
            ;
            ReduceSinkOperator rsOp = (ReduceSinkOperator) op;
            ArrayList<ExprNodeDesc> newPartitionCols = new ArrayList<>();
            ArrayList<ExprNodeDesc> partitionCols = rsOp.getConf().getPartitionCols();
            for (Integer position : positions) {
                newPartitionCols.add(partitionCols.get(position));
            }
            rsOp.getConf().setPartitionCols(newPartitionCols);
        }
    }
    // Update the memory monitor info for LLAP.
    MemoryMonitorInfo memoryMonitorInfo = joinDesc.getMemoryMonitorInfo();
    if (memoryMonitorInfo.isLlap()) {
        memoryMonitorInfo.setHashTableInflationFactor(1);
        memoryMonitorInfo.setMemoryOverSubscriptionFactor(0);
    }
    return true;
}
Also used : MapJoinOperator(org.apache.hadoop.hive.ql.exec.MapJoinOperator) MapJoinDesc(org.apache.hadoop.hive.ql.plan.MapJoinDesc) OpTraits(org.apache.hadoop.hive.ql.plan.OpTraits) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) MemoryMonitorInfo(org.apache.hadoop.hive.ql.exec.MemoryMonitorInfo) ReduceSinkOperator(org.apache.hadoop.hive.ql.exec.ReduceSinkOperator) List(java.util.List) ArrayList(java.util.ArrayList) ExprNodeDesc(org.apache.hadoop.hive.ql.plan.ExprNodeDesc)

Example 32 with MapJoinOperator

use of org.apache.hadoop.hive.ql.exec.MapJoinOperator in project hive by apache.

the class ConvertJoinMapJoin method convertJoinDynamicPartitionedHashJoin.

private boolean convertJoinDynamicPartitionedHashJoin(JoinOperator joinOp, OptimizeTezProcContext context, final long maxSize) throws SemanticException {
    // Attempt dynamic partitioned hash join
    // Since we don't have big table index yet, must start with estimate of numReducers
    int numReducers = estimateNumBuckets(joinOp, false);
    LOG.info("Try dynamic partitioned hash join with estimated " + numReducers + " reducers");
    int bigTablePos = getMapJoinConversionPos(joinOp, context, numReducers, false, maxSize, false);
    if (bigTablePos >= 0) {
        // Now that we have the big table index, get real numReducers value based on big table RS
        ReduceSinkOperator bigTableParentRS = (ReduceSinkOperator) (joinOp.getParentOperators().get(bigTablePos));
        numReducers = bigTableParentRS.getConf().getNumReducers();
        LOG.debug("Real big table reducers = " + numReducers);
        MapJoinOperator mapJoinOp = convertJoinMapJoin(joinOp, context, bigTablePos, false);
        if (mapJoinOp != null) {
            LOG.info("Selected dynamic partitioned hash join");
            mapJoinOp.getConf().setDynamicPartitionHashJoin(true);
            // Set OpTraits for dynamically partitioned hash join:
            // bucketColNames: Re-use previous joinOp's bucketColNames. Parent operators should be
            // reduce sink, which should have bucket columns based on the join keys.
            // numBuckets: set to number of reducers
            // sortCols: This is an unsorted join - no sort cols
            OpTraits opTraits = new OpTraits(joinOp.getOpTraits().getBucketColNames(), numReducers, null, joinOp.getOpTraits().getNumReduceSinks());
            mapJoinOp.setOpTraits(opTraits);
            mapJoinOp.setStatistics(joinOp.getStatistics());
            // propagate this change till the next RS
            for (Operator<? extends OperatorDesc> childOp : mapJoinOp.getChildOperators()) {
                setAllChildrenTraits(childOp, mapJoinOp.getOpTraits());
            }
            return true;
        }
    }
    return false;
}
Also used : MapJoinOperator(org.apache.hadoop.hive.ql.exec.MapJoinOperator) OpTraits(org.apache.hadoop.hive.ql.plan.OpTraits) ReduceSinkOperator(org.apache.hadoop.hive.ql.exec.ReduceSinkOperator)

Example 33 with MapJoinOperator

use of org.apache.hadoop.hive.ql.exec.MapJoinOperator in project hive by apache.

the class MapJoinProcessor method genMapJoinLocalWork.

/**
 * Generate the MapRed Local Work for the given map-join operator
 *
 * @param newWork
 * @param mapJoinOp
 *          map-join operator for which local work needs to be generated.
 * @param bigTablePos
 * @throws SemanticException
 */
private static void genMapJoinLocalWork(MapredWork newWork, MapJoinOperator mapJoinOp, int bigTablePos) throws SemanticException {
    // keep the small table alias to avoid concurrent modification exception
    ArrayList<String> smallTableAliasList = new ArrayList<String>();
    // create a new  MapredLocalWork
    MapredLocalWork newLocalWork = new MapredLocalWork(new LinkedHashMap<String, Operator<? extends OperatorDesc>>(), new LinkedHashMap<String, FetchWork>());
    for (Map.Entry<String, Operator<? extends OperatorDesc>> entry : newWork.getMapWork().getAliasToWork().entrySet()) {
        String alias = entry.getKey();
        Operator<? extends OperatorDesc> op = entry.getValue();
        // if the table scan is for big table; then skip it
        // tracing down the operator tree from the table scan operator
        Operator<? extends OperatorDesc> parentOp = op;
        Operator<? extends OperatorDesc> childOp = op.getChildOperators().get(0);
        while ((childOp != null) && (!childOp.equals(mapJoinOp))) {
            parentOp = childOp;
            assert parentOp.getChildOperators().size() == 1;
            childOp = parentOp.getChildOperators().get(0);
        }
        if (childOp == null) {
            throw new SemanticException("Cannot find join op by tracing down the table scan operator tree");
        }
        // skip the big table pos
        int i = childOp.getParentOperators().indexOf(parentOp);
        if (i == bigTablePos) {
            continue;
        }
        // set alias to work and put into smallTableAliasList
        newLocalWork.getAliasToWork().put(alias, op);
        smallTableAliasList.add(alias);
        // get input path and remove this alias from pathToAlias
        // because this file will be fetched by fetch operator
        LinkedHashMap<Path, ArrayList<String>> pathToAliases = newWork.getMapWork().getPathToAliases();
        // keep record all the input path for this alias
        HashSet<Path> pathSet = new HashSet<>();
        HashSet<Path> emptyPath = new HashSet<>();
        for (Map.Entry<Path, ArrayList<String>> entry2 : pathToAliases.entrySet()) {
            Path path = entry2.getKey();
            ArrayList<String> list = entry2.getValue();
            if (list.contains(alias)) {
                // add to path set
                pathSet.add(path);
                // remove this alias from the alias list
                list.remove(alias);
                if (list.size() == 0) {
                    emptyPath.add(path);
                }
            }
        }
        // remove the path, with which no alias associates
        for (Path path : emptyPath) {
            newWork.getMapWork().removePathToAlias(path);
        }
        // create fetch work
        FetchWork fetchWork = null;
        List<Path> partDir = new ArrayList<Path>();
        List<PartitionDesc> partDesc = new ArrayList<PartitionDesc>();
        for (Path tablePath : pathSet) {
            PartitionDesc partitionDesc = newWork.getMapWork().getPathToPartitionInfo().get(tablePath);
            // create fetchwork for non partitioned table
            if (partitionDesc.getPartSpec() == null || partitionDesc.getPartSpec().size() == 0) {
                fetchWork = new FetchWork(tablePath, partitionDesc.getTableDesc());
                break;
            }
            // if table is partitioned,add partDir and partitionDesc
            partDir.add(tablePath);
            partDesc.add(partitionDesc);
        }
        // create fetchwork for partitioned table
        if (fetchWork == null) {
            TableDesc table = newWork.getMapWork().getAliasToPartnInfo().get(alias).getTableDesc();
            fetchWork = new FetchWork(partDir, partDesc, table);
        }
        // set alias to fetch work
        newLocalWork.getAliasToFetchWork().put(alias, fetchWork);
    }
    // remove small table ailias from aliasToWork;Avoid concurrent modification
    for (String alias : smallTableAliasList) {
        newWork.getMapWork().getAliasToWork().remove(alias);
    }
    // set up local work
    newWork.getMapWork().setMapRedLocalWork(newLocalWork);
    // remove reducer
    newWork.setReduceWork(null);
}
Also used : LateralViewJoinOperator(org.apache.hadoop.hive.ql.exec.LateralViewJoinOperator) ReduceSinkOperator(org.apache.hadoop.hive.ql.exec.ReduceSinkOperator) MapJoinOperator(org.apache.hadoop.hive.ql.exec.MapJoinOperator) GroupByOperator(org.apache.hadoop.hive.ql.exec.GroupByOperator) UnionOperator(org.apache.hadoop.hive.ql.exec.UnionOperator) FileSinkOperator(org.apache.hadoop.hive.ql.exec.FileSinkOperator) AbstractMapJoinOperator(org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator) SelectOperator(org.apache.hadoop.hive.ql.exec.SelectOperator) SMBMapJoinOperator(org.apache.hadoop.hive.ql.exec.SMBMapJoinOperator) JoinOperator(org.apache.hadoop.hive.ql.exec.JoinOperator) Operator(org.apache.hadoop.hive.ql.exec.Operator) ScriptOperator(org.apache.hadoop.hive.ql.exec.ScriptOperator) Path(org.apache.hadoop.fs.Path) ArrayList(java.util.ArrayList) MapredLocalWork(org.apache.hadoop.hive.ql.plan.MapredLocalWork) FetchWork(org.apache.hadoop.hive.ql.plan.FetchWork) PartitionDesc(org.apache.hadoop.hive.ql.plan.PartitionDesc) TableDesc(org.apache.hadoop.hive.ql.plan.TableDesc) OperatorDesc(org.apache.hadoop.hive.ql.plan.OperatorDesc) Map(java.util.Map) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) HashSet(java.util.HashSet)

Example 34 with MapJoinOperator

use of org.apache.hadoop.hive.ql.exec.MapJoinOperator in project hive by apache.

the class MapJoinProcessor method transform.

/**
 * Transform the query tree. For each join, check if it is a map-side join (user specified). If
 * yes, convert it to a map-side join.
 *
 * @param pactx
 *          current parse context
 */
@Override
public ParseContext transform(ParseContext pactx) throws SemanticException {
    List<MapJoinOperator> listMapJoinOps = new ArrayList<MapJoinOperator>();
    // traverse all the joins and convert them if necessary
    if (pactx.getJoinOps() != null) {
        Set<JoinOperator> joinMap = new HashSet<JoinOperator>();
        Set<MapJoinOperator> mapJoinMap = pactx.getMapJoinOps();
        if (mapJoinMap == null) {
            mapJoinMap = new HashSet<MapJoinOperator>();
            pactx.setMapJoinOps(mapJoinMap);
        }
        Iterator<JoinOperator> joinCtxIter = pactx.getJoinOps().iterator();
        while (joinCtxIter.hasNext()) {
            JoinOperator joinOp = joinCtxIter.next();
            int mapJoinPos = mapSideJoin(joinOp);
            if (mapJoinPos >= 0) {
                MapJoinOperator mapJoinOp = generateMapJoinOperator(pactx, joinOp, mapJoinPos);
                listMapJoinOps.add(mapJoinOp);
                mapJoinOp.getConf().setQBJoinTreeProps(joinOp.getConf());
                mapJoinMap.add(mapJoinOp);
            } else {
                joinOp.getConf().setQBJoinTreeProps(joinOp.getConf());
                joinMap.add(joinOp);
            }
        }
        // store the new joinContext
        pactx.setJoinOps(joinMap);
    }
    // Go over the list and find if a reducer is not needed
    List<AbstractMapJoinOperator<? extends MapJoinDesc>> listMapJoinOpsNoRed = new ArrayList<AbstractMapJoinOperator<? extends MapJoinDesc>>();
    // create a walker which walks the tree in a DFS manner while maintaining
    // the operator stack.
    // The dispatcher generates the plan from the operator tree
    Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();
    opRules.put(new RuleRegExp("R0", MapJoinOperator.getOperatorName() + "%"), getCurrentMapJoin());
    opRules.put(new RuleRegExp("R1", MapJoinOperator.getOperatorName() + "%.*" + FileSinkOperator.getOperatorName() + "%"), getMapJoinFS());
    opRules.put(new RuleRegExp("R2", MapJoinOperator.getOperatorName() + "%.*" + ReduceSinkOperator.getOperatorName() + "%"), getMapJoinDefault());
    opRules.put(new RuleRegExp("R4", MapJoinOperator.getOperatorName() + "%.*" + UnionOperator.getOperatorName() + "%"), getMapJoinDefault());
    // The dispatcher fires the processor corresponding to the closest matching
    // rule and passes the context along
    Dispatcher disp = new DefaultRuleDispatcher(getDefault(), opRules, new MapJoinWalkerCtx(listMapJoinOpsNoRed, pactx));
    GraphWalker ogw = new GenMapRedWalker(disp);
    ArrayList<Node> topNodes = new ArrayList<Node>();
    topNodes.addAll(listMapJoinOps);
    ogw.startWalking(topNodes, null);
    pactx.setListMapJoinOpsNoReducer(listMapJoinOpsNoRed);
    return pactx;
}
Also used : LateralViewJoinOperator(org.apache.hadoop.hive.ql.exec.LateralViewJoinOperator) MapJoinOperator(org.apache.hadoop.hive.ql.exec.MapJoinOperator) AbstractMapJoinOperator(org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator) SMBMapJoinOperator(org.apache.hadoop.hive.ql.exec.SMBMapJoinOperator) JoinOperator(org.apache.hadoop.hive.ql.exec.JoinOperator) Node(org.apache.hadoop.hive.ql.lib.Node) ArrayList(java.util.ArrayList) Dispatcher(org.apache.hadoop.hive.ql.lib.Dispatcher) DefaultRuleDispatcher(org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher) LinkedHashMap(java.util.LinkedHashMap) GraphWalker(org.apache.hadoop.hive.ql.lib.GraphWalker) HashSet(java.util.HashSet) MapJoinOperator(org.apache.hadoop.hive.ql.exec.MapJoinOperator) AbstractMapJoinOperator(org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator) SMBMapJoinOperator(org.apache.hadoop.hive.ql.exec.SMBMapJoinOperator) MapJoinDesc(org.apache.hadoop.hive.ql.plan.MapJoinDesc) NodeProcessor(org.apache.hadoop.hive.ql.lib.NodeProcessor) DefaultRuleDispatcher(org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher) AbstractMapJoinOperator(org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator) RuleRegExp(org.apache.hadoop.hive.ql.lib.RuleRegExp) GenMapRedWalker(org.apache.hadoop.hive.ql.parse.GenMapRedWalker) Rule(org.apache.hadoop.hive.ql.lib.Rule)

Example 35 with MapJoinOperator

use of org.apache.hadoop.hive.ql.exec.MapJoinOperator in project hive by apache.

the class MapJoinProcessor method convertMapJoin.

/**
 * convert a regular join to a a map-side join.
 *
 * @param opParseCtxMap
 * @param op
 *          join operator
 * @param joinTree
 *          qb join tree
 * @param mapJoinPos
 *          position of the source to be read as part of map-reduce framework. All other sources
 *          are cached in memory
 * @param noCheckOuterJoin
 * @param validateMapJoinTree
 */
public MapJoinOperator convertMapJoin(HiveConf conf, JoinOperator op, boolean leftInputJoin, String[] baseSrc, List<String> mapAliases, int mapJoinPos, boolean noCheckOuterJoin, boolean validateMapJoinTree) throws SemanticException {
    // outer join cannot be performed on a table which is being cached
    JoinDesc desc = op.getConf();
    JoinCondDesc[] condns = desc.getConds();
    if (!noCheckOuterJoin) {
        if (checkMapJoin(mapJoinPos, condns) < 0) {
            throw new SemanticException(ErrorMsg.NO_OUTER_MAPJOIN.getMsg());
        }
    }
    // Walk over all the sources (which are guaranteed to be reduce sink
    // operators).
    // The join outputs a concatenation of all the inputs.
    List<Operator<? extends OperatorDesc>> parentOps = op.getParentOperators();
    List<Operator<? extends OperatorDesc>> newParentOps = new ArrayList<Operator<? extends OperatorDesc>>();
    List<Operator<? extends OperatorDesc>> oldReduceSinkParentOps = new ArrayList<Operator<? extends OperatorDesc>>();
    // found a source which is not to be stored in memory
    if (leftInputJoin) {
        // assert mapJoinPos == 0;
        Operator<? extends OperatorDesc> parentOp = parentOps.get(0);
        assert parentOp.getParentOperators().size() == 1;
        Operator<? extends OperatorDesc> grandParentOp = parentOp.getParentOperators().get(0);
        oldReduceSinkParentOps.add(parentOp);
        newParentOps.add(grandParentOp);
    }
    byte pos = 0;
    // Remove parent reduce-sink operators
    for (String src : baseSrc) {
        if (src != null) {
            Operator<? extends OperatorDesc> parentOp = parentOps.get(pos);
            assert parentOp.getParentOperators().size() == 1;
            Operator<? extends OperatorDesc> grandParentOp = parentOp.getParentOperators().get(0);
            oldReduceSinkParentOps.add(parentOp);
            newParentOps.add(grandParentOp);
        }
        pos++;
    }
    // create the map-join operator
    MapJoinOperator mapJoinOp = convertJoinOpMapJoinOp(conf, op, leftInputJoin, baseSrc, mapAliases, mapJoinPos, noCheckOuterJoin);
    // remove old parents
    for (pos = 0; pos < newParentOps.size(); pos++) {
        newParentOps.get(pos).replaceChild(oldReduceSinkParentOps.get(pos), mapJoinOp);
    }
    mapJoinOp.getParentOperators().removeAll(oldReduceSinkParentOps);
    mapJoinOp.setParentOperators(newParentOps);
    // make sure only map-joins can be performed.
    if (validateMapJoinTree) {
        validateMapJoinTypes(mapJoinOp);
    }
    return mapJoinOp;
}
Also used : LateralViewJoinOperator(org.apache.hadoop.hive.ql.exec.LateralViewJoinOperator) ReduceSinkOperator(org.apache.hadoop.hive.ql.exec.ReduceSinkOperator) MapJoinOperator(org.apache.hadoop.hive.ql.exec.MapJoinOperator) GroupByOperator(org.apache.hadoop.hive.ql.exec.GroupByOperator) UnionOperator(org.apache.hadoop.hive.ql.exec.UnionOperator) FileSinkOperator(org.apache.hadoop.hive.ql.exec.FileSinkOperator) AbstractMapJoinOperator(org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator) SelectOperator(org.apache.hadoop.hive.ql.exec.SelectOperator) SMBMapJoinOperator(org.apache.hadoop.hive.ql.exec.SMBMapJoinOperator) JoinOperator(org.apache.hadoop.hive.ql.exec.JoinOperator) Operator(org.apache.hadoop.hive.ql.exec.Operator) ScriptOperator(org.apache.hadoop.hive.ql.exec.ScriptOperator) MapJoinOperator(org.apache.hadoop.hive.ql.exec.MapJoinOperator) AbstractMapJoinOperator(org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator) SMBMapJoinOperator(org.apache.hadoop.hive.ql.exec.SMBMapJoinOperator) ArrayList(java.util.ArrayList) MapJoinDesc(org.apache.hadoop.hive.ql.plan.MapJoinDesc) JoinDesc(org.apache.hadoop.hive.ql.plan.JoinDesc) SMBJoinDesc(org.apache.hadoop.hive.ql.plan.SMBJoinDesc) OperatorDesc(org.apache.hadoop.hive.ql.plan.OperatorDesc) JoinCondDesc(org.apache.hadoop.hive.ql.plan.JoinCondDesc) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException)

Aggregations

MapJoinOperator (org.apache.hadoop.hive.ql.exec.MapJoinOperator)41 Operator (org.apache.hadoop.hive.ql.exec.Operator)22 ReduceSinkOperator (org.apache.hadoop.hive.ql.exec.ReduceSinkOperator)22 ArrayList (java.util.ArrayList)19 JoinOperator (org.apache.hadoop.hive.ql.exec.JoinOperator)18 SMBMapJoinOperator (org.apache.hadoop.hive.ql.exec.SMBMapJoinOperator)17 OperatorDesc (org.apache.hadoop.hive.ql.plan.OperatorDesc)15 TableScanOperator (org.apache.hadoop.hive.ql.exec.TableScanOperator)13 MapJoinDesc (org.apache.hadoop.hive.ql.plan.MapJoinDesc)12 ExprNodeDesc (org.apache.hadoop.hive.ql.plan.ExprNodeDesc)11 FileSinkOperator (org.apache.hadoop.hive.ql.exec.FileSinkOperator)9 GroupByOperator (org.apache.hadoop.hive.ql.exec.GroupByOperator)9 UnionOperator (org.apache.hadoop.hive.ql.exec.UnionOperator)9 HashMap (java.util.HashMap)8 AbstractMapJoinOperator (org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator)8 RowSchema (org.apache.hadoop.hive.ql.exec.RowSchema)8 SelectOperator (org.apache.hadoop.hive.ql.exec.SelectOperator)8 SemanticException (org.apache.hadoop.hive.ql.parse.SemanticException)8 TableDesc (org.apache.hadoop.hive.ql.plan.TableDesc)8 List (java.util.List)7