Search in sources :

Example 11 with MapJoinDesc

use of org.apache.hadoop.hive.ql.plan.MapJoinDesc in project hive by apache.

the class MapJoinProcessor method getMapJoinDesc.

public static MapJoinDesc getMapJoinDesc(HiveConf hconf, JoinOperator op, boolean leftInputJoin, String[] baseSrc, List<String> mapAliases, int mapJoinPos, boolean noCheckOuterJoin, boolean adjustParentsChildren) throws SemanticException {
    JoinDesc desc = op.getConf();
    JoinCondDesc[] condns = desc.getConds();
    Byte[] tagOrder = desc.getTagOrder();
    // outer join cannot be performed on a table which is being cached
    if (!noCheckOuterJoin) {
        if (checkMapJoin(mapJoinPos, condns) < 0) {
            throw new SemanticException(ErrorMsg.NO_OUTER_MAPJOIN.getMsg());
        }
    }
    Map<String, ExprNodeDesc> colExprMap = op.getColumnExprMap();
    List<ColumnInfo> schema = new ArrayList<ColumnInfo>(op.getSchema().getSignature());
    Map<Byte, List<ExprNodeDesc>> valueExprs = op.getConf().getExprs();
    Map<Byte, List<ExprNodeDesc>> newValueExprs = new HashMap<Byte, List<ExprNodeDesc>>();
    ObjectPair<List<ReduceSinkOperator>, Map<Byte, List<ExprNodeDesc>>> pair = getKeys(leftInputJoin, baseSrc, op);
    List<ReduceSinkOperator> oldReduceSinkParentOps = pair.getFirst();
    for (Map.Entry<Byte, List<ExprNodeDesc>> entry : valueExprs.entrySet()) {
        byte tag = entry.getKey();
        Operator<?> terminal = oldReduceSinkParentOps.get(tag);
        List<ExprNodeDesc> values = entry.getValue();
        List<ExprNodeDesc> newValues = ExprNodeDescUtils.backtrack(values, op, terminal);
        newValueExprs.put(tag, newValues);
        for (int i = 0; i < schema.size(); i++) {
            ColumnInfo column = schema.get(i);
            if (column == null) {
                continue;
            }
            ExprNodeDesc expr = colExprMap.get(column.getInternalName());
            int index = ExprNodeDescUtils.indexOf(expr, values);
            if (index >= 0) {
                colExprMap.put(column.getInternalName(), newValues.get(index));
                schema.set(i, null);
            }
        }
    }
    // rewrite value index for mapjoin
    Map<Byte, int[]> valueIndices = new HashMap<Byte, int[]>();
    // get the join keys from old parent ReduceSink operators
    Map<Byte, List<ExprNodeDesc>> keyExprMap = pair.getSecond();
    if (!adjustParentsChildren) {
        // Since we did not remove reduce sink parents, keep the original value expressions
        newValueExprs = valueExprs;
        // Join key exprs are represented in terms of the original table columns,
        // we need to convert these to the generated column names we can see in the Join operator
        Map<Byte, List<ExprNodeDesc>> newKeyExprMap = new HashMap<Byte, List<ExprNodeDesc>>();
        for (Map.Entry<Byte, List<ExprNodeDesc>> mapEntry : keyExprMap.entrySet()) {
            Byte pos = mapEntry.getKey();
            ReduceSinkOperator rsParent = oldReduceSinkParentOps.get(pos.byteValue());
            List<ExprNodeDesc> keyExprList = ExprNodeDescUtils.resolveJoinKeysAsRSColumns(mapEntry.getValue(), rsParent);
            if (keyExprList == null) {
                throw new SemanticException("Error resolving join keys");
            }
            newKeyExprMap.put(pos, keyExprList);
        }
        keyExprMap = newKeyExprMap;
    }
    // construct valueTableDescs and valueFilteredTableDescs
    List<TableDesc> valueTableDescs = new ArrayList<TableDesc>();
    List<TableDesc> valueFilteredTableDescs = new ArrayList<TableDesc>();
    int[][] filterMap = desc.getFilterMap();
    for (byte pos = 0; pos < op.getParentOperators().size(); pos++) {
        List<ExprNodeDesc> valueCols = newValueExprs.get(pos);
        if (pos != mapJoinPos) {
            // remove values in key exprs for value table schema
            // value expression for hashsink will be modified in
            // LocalMapJoinProcessor
            int[] valueIndex = new int[valueCols.size()];
            List<ExprNodeDesc> valueColsInValueExpr = new ArrayList<ExprNodeDesc>();
            for (int i = 0; i < valueIndex.length; i++) {
                ExprNodeDesc expr = valueCols.get(i);
                int kindex = ExprNodeDescUtils.indexOf(expr, keyExprMap.get(pos));
                if (kindex >= 0) {
                    valueIndex[i] = kindex;
                } else {
                    valueIndex[i] = -valueColsInValueExpr.size() - 1;
                    valueColsInValueExpr.add(expr);
                }
            }
            if (needValueIndex(valueIndex)) {
                valueIndices.put(pos, valueIndex);
            }
            valueCols = valueColsInValueExpr;
        }
        // deep copy expr node desc
        List<ExprNodeDesc> valueFilteredCols = ExprNodeDescUtils.clone(valueCols);
        if (filterMap != null && filterMap[pos] != null && pos != mapJoinPos) {
            ExprNodeColumnDesc isFilterDesc = new ExprNodeColumnDesc(TypeInfoFactory.getPrimitiveTypeInfo(serdeConstants.SMALLINT_TYPE_NAME), "filter", "filter", false);
            valueFilteredCols.add(isFilterDesc);
        }
        TableDesc valueTableDesc = PlanUtils.getMapJoinValueTableDesc(PlanUtils.getFieldSchemasFromColumnList(valueCols, "mapjoinvalue"));
        TableDesc valueFilteredTableDesc = PlanUtils.getMapJoinValueTableDesc(PlanUtils.getFieldSchemasFromColumnList(valueFilteredCols, "mapjoinvalue"));
        valueTableDescs.add(valueTableDesc);
        valueFilteredTableDescs.add(valueFilteredTableDesc);
    }
    Map<Byte, List<ExprNodeDesc>> filters = desc.getFilters();
    Map<Byte, List<ExprNodeDesc>> newFilters = new HashMap<Byte, List<ExprNodeDesc>>();
    for (Map.Entry<Byte, List<ExprNodeDesc>> entry : filters.entrySet()) {
        byte srcTag = entry.getKey();
        List<ExprNodeDesc> filter = entry.getValue();
        Operator<?> terminal = oldReduceSinkParentOps.get(srcTag);
        newFilters.put(srcTag, ExprNodeDescUtils.backtrack(filter, op, terminal));
    }
    desc.setFilters(filters = newFilters);
    // create dumpfile prefix needed to create descriptor
    String dumpFilePrefix = "";
    if (mapAliases != null) {
        for (String mapAlias : mapAliases) {
            dumpFilePrefix = dumpFilePrefix + mapAlias;
        }
        dumpFilePrefix = dumpFilePrefix + "-" + PlanUtils.getCountForMapJoinDumpFilePrefix();
    } else {
        dumpFilePrefix = "mapfile" + PlanUtils.getCountForMapJoinDumpFilePrefix();
    }
    List<ExprNodeDesc> keyCols = keyExprMap.get((byte) mapJoinPos);
    List<String> outputColumnNames = op.getConf().getOutputColumnNames();
    TableDesc keyTableDesc = PlanUtils.getMapJoinKeyTableDesc(hconf, PlanUtils.getFieldSchemasFromColumnList(keyCols, MAPJOINKEY_FIELDPREFIX));
    JoinCondDesc[] joinCondns = op.getConf().getConds();
    MapJoinDesc mapJoinDescriptor = new MapJoinDesc(keyExprMap, keyTableDesc, newValueExprs, valueTableDescs, valueFilteredTableDescs, outputColumnNames, mapJoinPos, joinCondns, filters, op.getConf().getNoOuterJoin(), dumpFilePrefix);
    mapJoinDescriptor.setStatistics(op.getConf().getStatistics());
    mapJoinDescriptor.setTagOrder(tagOrder);
    mapJoinDescriptor.setNullSafes(desc.getNullSafes());
    mapJoinDescriptor.setFilterMap(desc.getFilterMap());
    mapJoinDescriptor.setResidualFilterExprs(desc.getResidualFilterExprs());
    if (!valueIndices.isEmpty()) {
        mapJoinDescriptor.setValueIndices(valueIndices);
    }
    return mapJoinDescriptor;
}
Also used : HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) ArrayList(java.util.ArrayList) ColumnInfo(org.apache.hadoop.hive.ql.exec.ColumnInfo) ExprNodeColumnDesc(org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc) List(java.util.List) ArrayList(java.util.ArrayList) ExprNodeDesc(org.apache.hadoop.hive.ql.plan.ExprNodeDesc) JoinCondDesc(org.apache.hadoop.hive.ql.plan.JoinCondDesc) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) MapJoinDesc(org.apache.hadoop.hive.ql.plan.MapJoinDesc) ReduceSinkOperator(org.apache.hadoop.hive.ql.exec.ReduceSinkOperator) TableDesc(org.apache.hadoop.hive.ql.plan.TableDesc) MapJoinDesc(org.apache.hadoop.hive.ql.plan.MapJoinDesc) JoinDesc(org.apache.hadoop.hive.ql.plan.JoinDesc) SMBJoinDesc(org.apache.hadoop.hive.ql.plan.SMBJoinDesc) Map(java.util.Map) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap)

Example 12 with MapJoinDesc

use of org.apache.hadoop.hive.ql.plan.MapJoinDesc in project hive by apache.

the class MapJoinProcessor method transform.

/**
   * Transform the query tree. For each join, check if it is a map-side join (user specified). If
   * yes, convert it to a map-side join.
   *
   * @param pactx
   *          current parse context
   */
@Override
public ParseContext transform(ParseContext pactx) throws SemanticException {
    List<MapJoinOperator> listMapJoinOps = new ArrayList<MapJoinOperator>();
    // traverse all the joins and convert them if necessary
    if (pactx.getJoinOps() != null) {
        Set<JoinOperator> joinMap = new HashSet<JoinOperator>();
        Set<MapJoinOperator> mapJoinMap = pactx.getMapJoinOps();
        if (mapJoinMap == null) {
            mapJoinMap = new HashSet<MapJoinOperator>();
            pactx.setMapJoinOps(mapJoinMap);
        }
        Iterator<JoinOperator> joinCtxIter = pactx.getJoinOps().iterator();
        while (joinCtxIter.hasNext()) {
            JoinOperator joinOp = joinCtxIter.next();
            int mapJoinPos = mapSideJoin(joinOp);
            if (mapJoinPos >= 0) {
                MapJoinOperator mapJoinOp = generateMapJoinOperator(pactx, joinOp, mapJoinPos);
                listMapJoinOps.add(mapJoinOp);
                mapJoinOp.getConf().setQBJoinTreeProps(joinOp.getConf());
                mapJoinMap.add(mapJoinOp);
            } else {
                joinOp.getConf().setQBJoinTreeProps(joinOp.getConf());
                joinMap.add(joinOp);
            }
        }
        // store the new joinContext
        pactx.setJoinOps(joinMap);
    }
    // Go over the list and find if a reducer is not needed
    List<AbstractMapJoinOperator<? extends MapJoinDesc>> listMapJoinOpsNoRed = new ArrayList<AbstractMapJoinOperator<? extends MapJoinDesc>>();
    // create a walker which walks the tree in a DFS manner while maintaining
    // the operator stack.
    // The dispatcher generates the plan from the operator tree
    Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();
    opRules.put(new RuleRegExp("R0", MapJoinOperator.getOperatorName() + "%"), getCurrentMapJoin());
    opRules.put(new RuleRegExp("R1", MapJoinOperator.getOperatorName() + "%.*" + FileSinkOperator.getOperatorName() + "%"), getMapJoinFS());
    opRules.put(new RuleRegExp("R2", MapJoinOperator.getOperatorName() + "%.*" + ReduceSinkOperator.getOperatorName() + "%"), getMapJoinDefault());
    opRules.put(new RuleRegExp("R4", MapJoinOperator.getOperatorName() + "%.*" + UnionOperator.getOperatorName() + "%"), getMapJoinDefault());
    // The dispatcher fires the processor corresponding to the closest matching
    // rule and passes the context along
    Dispatcher disp = new DefaultRuleDispatcher(getDefault(), opRules, new MapJoinWalkerCtx(listMapJoinOpsNoRed, pactx));
    GraphWalker ogw = new GenMapRedWalker(disp);
    ArrayList<Node> topNodes = new ArrayList<Node>();
    topNodes.addAll(listMapJoinOps);
    ogw.startWalking(topNodes, null);
    pactx.setListMapJoinOpsNoReducer(listMapJoinOpsNoRed);
    return pactx;
}
Also used : LateralViewJoinOperator(org.apache.hadoop.hive.ql.exec.LateralViewJoinOperator) MapJoinOperator(org.apache.hadoop.hive.ql.exec.MapJoinOperator) AbstractMapJoinOperator(org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator) SMBMapJoinOperator(org.apache.hadoop.hive.ql.exec.SMBMapJoinOperator) JoinOperator(org.apache.hadoop.hive.ql.exec.JoinOperator) Node(org.apache.hadoop.hive.ql.lib.Node) ArrayList(java.util.ArrayList) Dispatcher(org.apache.hadoop.hive.ql.lib.Dispatcher) DefaultRuleDispatcher(org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher) LinkedHashMap(java.util.LinkedHashMap) GraphWalker(org.apache.hadoop.hive.ql.lib.GraphWalker) HashSet(java.util.HashSet) MapJoinOperator(org.apache.hadoop.hive.ql.exec.MapJoinOperator) AbstractMapJoinOperator(org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator) SMBMapJoinOperator(org.apache.hadoop.hive.ql.exec.SMBMapJoinOperator) MapJoinDesc(org.apache.hadoop.hive.ql.plan.MapJoinDesc) NodeProcessor(org.apache.hadoop.hive.ql.lib.NodeProcessor) DefaultRuleDispatcher(org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher) AbstractMapJoinOperator(org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator) RuleRegExp(org.apache.hadoop.hive.ql.lib.RuleRegExp) GenMapRedWalker(org.apache.hadoop.hive.ql.parse.GenMapRedWalker) Rule(org.apache.hadoop.hive.ql.lib.Rule)

Example 13 with MapJoinDesc

use of org.apache.hadoop.hive.ql.plan.MapJoinDesc in project hive by apache.

the class OperatorUtils method findOperatorsUpstreamJoinAccounted.

public static <T> Set<T> findOperatorsUpstreamJoinAccounted(Operator<?> start, Class<T> clazz, Set<T> found) {
    if (clazz.isInstance(start)) {
        found.add((T) start);
    }
    int onlyIncludeIndex = -1;
    if (start instanceof AbstractMapJoinOperator) {
        AbstractMapJoinOperator mapJoinOp = (AbstractMapJoinOperator) start;
        MapJoinDesc desc = (MapJoinDesc) mapJoinOp.getConf();
        onlyIncludeIndex = desc.getPosBigTable();
    }
    if (start.getParentOperators() != null) {
        int i = 0;
        for (Operator<?> parent : start.getParentOperators()) {
            if (onlyIncludeIndex >= 0) {
                if (onlyIncludeIndex == i) {
                    findOperatorsUpstream(parent, clazz, found);
                }
            } else {
                findOperatorsUpstream(parent, clazz, found);
            }
            i++;
        }
    }
    return found;
}
Also used : MapJoinDesc(org.apache.hadoop.hive.ql.plan.MapJoinDesc)

Example 14 with MapJoinDesc

use of org.apache.hadoop.hive.ql.plan.MapJoinDesc in project hive by apache.

the class Vectorizer method vectorizeOperator.

public Operator<? extends OperatorDesc> vectorizeOperator(Operator<? extends OperatorDesc> op, VectorizationContext vContext, boolean isTezOrSpark, VectorTaskColumnInfo vectorTaskColumnInfo) throws HiveException {
    Operator<? extends OperatorDesc> vectorOp = null;
    boolean isNative;
    switch(op.getType()) {
        case TABLESCAN:
            vectorOp = vectorizeTableScanOperator(op, vContext);
            isNative = true;
            break;
        case MAPJOIN:
            {
                if (op instanceof MapJoinOperator) {
                    VectorMapJoinInfo vectorMapJoinInfo = new VectorMapJoinInfo();
                    MapJoinDesc desc = (MapJoinDesc) op.getConf();
                    boolean specialize = canSpecializeMapJoin(op, desc, isTezOrSpark, vContext, vectorMapJoinInfo);
                    if (!specialize) {
                        Class<? extends Operator<?>> opClass = null;
                        // *NON-NATIVE* vector map differences for LEFT OUTER JOIN and Filtered...
                        List<ExprNodeDesc> bigTableFilters = desc.getFilters().get((byte) desc.getPosBigTable());
                        boolean isOuterAndFiltered = (!desc.isNoOuterJoin() && bigTableFilters.size() > 0);
                        if (!isOuterAndFiltered) {
                            opClass = VectorMapJoinOperator.class;
                        } else {
                            opClass = VectorMapJoinOuterFilteredOperator.class;
                        }
                        vectorOp = OperatorFactory.getVectorOperator(opClass, op.getCompilationOpContext(), op.getConf(), vContext);
                        isNative = false;
                    } else {
                        // TEMPORARY Until Native Vector Map Join with Hybrid passes tests...
                        // HiveConf.setBoolVar(physicalContext.getConf(),
                        //    HiveConf.ConfVars.HIVEUSEHYBRIDGRACEHASHJOIN, false);
                        vectorOp = specializeMapJoinOperator(op, vContext, desc, vectorMapJoinInfo);
                        isNative = true;
                        if (vectorTaskColumnInfo != null) {
                            if (usesVectorUDFAdaptor(vectorMapJoinInfo.getBigTableKeyExpressions())) {
                                vectorTaskColumnInfo.setUsesVectorUDFAdaptor(true);
                            }
                            if (usesVectorUDFAdaptor(vectorMapJoinInfo.getBigTableValueExpressions())) {
                                vectorTaskColumnInfo.setUsesVectorUDFAdaptor(true);
                            }
                        }
                    }
                } else {
                    Preconditions.checkState(op instanceof SMBMapJoinOperator);
                    SMBJoinDesc smbJoinSinkDesc = (SMBJoinDesc) op.getConf();
                    VectorSMBJoinDesc vectorSMBJoinDesc = new VectorSMBJoinDesc();
                    smbJoinSinkDesc.setVectorDesc(vectorSMBJoinDesc);
                    vectorOp = OperatorFactory.getVectorOperator(op.getCompilationOpContext(), smbJoinSinkDesc, vContext);
                    isNative = false;
                }
            }
            break;
        case REDUCESINK:
            {
                VectorReduceSinkInfo vectorReduceSinkInfo = new VectorReduceSinkInfo();
                ReduceSinkDesc desc = (ReduceSinkDesc) op.getConf();
                boolean specialize = canSpecializeReduceSink(desc, isTezOrSpark, vContext, vectorReduceSinkInfo);
                if (!specialize) {
                    vectorOp = OperatorFactory.getVectorOperator(op.getCompilationOpContext(), op.getConf(), vContext);
                    isNative = false;
                } else {
                    vectorOp = specializeReduceSinkOperator(op, vContext, desc, vectorReduceSinkInfo);
                    isNative = true;
                    if (vectorTaskColumnInfo != null) {
                        if (usesVectorUDFAdaptor(vectorReduceSinkInfo.getReduceSinkKeyExpressions())) {
                            vectorTaskColumnInfo.setUsesVectorUDFAdaptor(true);
                        }
                        if (usesVectorUDFAdaptor(vectorReduceSinkInfo.getReduceSinkValueExpressions())) {
                            vectorTaskColumnInfo.setUsesVectorUDFAdaptor(true);
                        }
                    }
                }
            }
            break;
        case FILTER:
            {
                vectorOp = vectorizeFilterOperator(op, vContext);
                isNative = true;
                if (vectorTaskColumnInfo != null) {
                    VectorFilterDesc vectorFilterDesc = (VectorFilterDesc) ((AbstractOperatorDesc) vectorOp.getConf()).getVectorDesc();
                    VectorExpression vectorPredicateExpr = vectorFilterDesc.getPredicateExpression();
                    if (usesVectorUDFAdaptor(vectorPredicateExpr)) {
                        vectorTaskColumnInfo.setUsesVectorUDFAdaptor(true);
                    }
                }
            }
            break;
        case SELECT:
            {
                vectorOp = vectorizeSelectOperator(op, vContext);
                isNative = true;
                if (vectorTaskColumnInfo != null) {
                    VectorSelectDesc vectorSelectDesc = (VectorSelectDesc) ((AbstractOperatorDesc) vectorOp.getConf()).getVectorDesc();
                    VectorExpression[] vectorSelectExprs = vectorSelectDesc.getSelectExpressions();
                    if (usesVectorUDFAdaptor(vectorSelectExprs)) {
                        vectorTaskColumnInfo.setUsesVectorUDFAdaptor(true);
                    }
                }
            }
            break;
        case GROUPBY:
            {
                vectorOp = vectorizeGroupByOperator(op, vContext);
                isNative = false;
                if (vectorTaskColumnInfo != null) {
                    VectorGroupByDesc vectorGroupByDesc = (VectorGroupByDesc) ((AbstractOperatorDesc) vectorOp.getConf()).getVectorDesc();
                    if (!vectorGroupByDesc.isVectorOutput()) {
                        vectorTaskColumnInfo.setGroupByVectorOutput(false);
                    }
                    VectorExpression[] vecKeyExpressions = vectorGroupByDesc.getKeyExpressions();
                    if (usesVectorUDFAdaptor(vecKeyExpressions)) {
                        vectorTaskColumnInfo.setUsesVectorUDFAdaptor(true);
                    }
                    VectorAggregateExpression[] vecAggregators = vectorGroupByDesc.getAggregators();
                    for (VectorAggregateExpression vecAggr : vecAggregators) {
                        if (usesVectorUDFAdaptor(vecAggr.inputExpression())) {
                            vectorTaskColumnInfo.setUsesVectorUDFAdaptor(true);
                        }
                    }
                }
            }
            break;
        case FILESINK:
            {
                FileSinkDesc fileSinkDesc = (FileSinkDesc) op.getConf();
                VectorFileSinkDesc vectorFileSinkDesc = new VectorFileSinkDesc();
                fileSinkDesc.setVectorDesc(vectorFileSinkDesc);
                vectorOp = OperatorFactory.getVectorOperator(op.getCompilationOpContext(), fileSinkDesc, vContext);
                isNative = false;
            }
            break;
        case LIMIT:
            {
                LimitDesc limitDesc = (LimitDesc) op.getConf();
                VectorLimitDesc vectorLimitDesc = new VectorLimitDesc();
                limitDesc.setVectorDesc(vectorLimitDesc);
                vectorOp = OperatorFactory.getVectorOperator(op.getCompilationOpContext(), limitDesc, vContext);
                isNative = true;
            }
            break;
        case EVENT:
            {
                AppMasterEventDesc eventDesc = (AppMasterEventDesc) op.getConf();
                VectorAppMasterEventDesc vectorEventDesc = new VectorAppMasterEventDesc();
                eventDesc.setVectorDesc(vectorEventDesc);
                vectorOp = OperatorFactory.getVectorOperator(op.getCompilationOpContext(), eventDesc, vContext);
                isNative = true;
            }
            break;
        case HASHTABLESINK:
            {
                SparkHashTableSinkDesc sparkHashTableSinkDesc = (SparkHashTableSinkDesc) op.getConf();
                VectorSparkHashTableSinkDesc vectorSparkHashTableSinkDesc = new VectorSparkHashTableSinkDesc();
                sparkHashTableSinkDesc.setVectorDesc(vectorSparkHashTableSinkDesc);
                vectorOp = OperatorFactory.getVectorOperator(op.getCompilationOpContext(), sparkHashTableSinkDesc, vContext);
                isNative = true;
            }
            break;
        case SPARKPRUNINGSINK:
            {
                SparkPartitionPruningSinkDesc sparkPartitionPruningSinkDesc = (SparkPartitionPruningSinkDesc) op.getConf();
                VectorSparkPartitionPruningSinkDesc vectorSparkPartitionPruningSinkDesc = new VectorSparkPartitionPruningSinkDesc();
                sparkPartitionPruningSinkDesc.setVectorDesc(vectorSparkPartitionPruningSinkDesc);
                vectorOp = OperatorFactory.getVectorOperator(op.getCompilationOpContext(), sparkPartitionPruningSinkDesc, vContext);
                isNative = true;
            }
            break;
        default:
            // These are children of GROUP BY operators with non-vector outputs.
            isNative = false;
            vectorOp = op;
            break;
    }
    Preconditions.checkState(vectorOp != null);
    if (vectorTaskColumnInfo != null && !isNative) {
        vectorTaskColumnInfo.setAllNative(false);
    }
    LOG.debug("vectorizeOperator " + vectorOp.getClass().getName());
    LOG.debug("vectorizeOperator " + vectorOp.getConf().getClass().getName());
    if (vectorOp != op) {
        fixupParentChildOperators(op, vectorOp);
        ((AbstractOperatorDesc) vectorOp.getConf()).setVectorMode(true);
    }
    return vectorOp;
}
Also used : VectorMapJoinInnerStringOperator(org.apache.hadoop.hive.ql.exec.vector.mapjoin.VectorMapJoinInnerStringOperator) VectorReduceSinkLongOperator(org.apache.hadoop.hive.ql.exec.vector.reducesink.VectorReduceSinkLongOperator) VectorMapJoinOuterLongOperator(org.apache.hadoop.hive.ql.exec.vector.mapjoin.VectorMapJoinOuterLongOperator) VectorReduceSinkStringOperator(org.apache.hadoop.hive.ql.exec.vector.reducesink.VectorReduceSinkStringOperator) VectorMapJoinInnerBigOnlyMultiKeyOperator(org.apache.hadoop.hive.ql.exec.vector.mapjoin.VectorMapJoinInnerBigOnlyMultiKeyOperator) VectorMapJoinLeftSemiMultiKeyOperator(org.apache.hadoop.hive.ql.exec.vector.mapjoin.VectorMapJoinLeftSemiMultiKeyOperator) VectorMapJoinLeftSemiStringOperator(org.apache.hadoop.hive.ql.exec.vector.mapjoin.VectorMapJoinLeftSemiStringOperator) VectorMapJoinLeftSemiLongOperator(org.apache.hadoop.hive.ql.exec.vector.mapjoin.VectorMapJoinLeftSemiLongOperator) VectorReduceSinkMultiKeyOperator(org.apache.hadoop.hive.ql.exec.vector.reducesink.VectorReduceSinkMultiKeyOperator) VectorMapJoinOuterFilteredOperator(org.apache.hadoop.hive.ql.exec.vector.VectorMapJoinOuterFilteredOperator) VectorMapJoinInnerBigOnlyLongOperator(org.apache.hadoop.hive.ql.exec.vector.mapjoin.VectorMapJoinInnerBigOnlyLongOperator) VectorMapJoinInnerBigOnlyStringOperator(org.apache.hadoop.hive.ql.exec.vector.mapjoin.VectorMapJoinInnerBigOnlyStringOperator) VectorMapJoinInnerMultiKeyOperator(org.apache.hadoop.hive.ql.exec.vector.mapjoin.VectorMapJoinInnerMultiKeyOperator) VectorMapJoinOuterStringOperator(org.apache.hadoop.hive.ql.exec.vector.mapjoin.VectorMapJoinOuterStringOperator) VectorMapJoinOperator(org.apache.hadoop.hive.ql.exec.vector.VectorMapJoinOperator) VectorMapJoinInnerLongOperator(org.apache.hadoop.hive.ql.exec.vector.mapjoin.VectorMapJoinInnerLongOperator) VectorMapJoinOuterMultiKeyOperator(org.apache.hadoop.hive.ql.exec.vector.mapjoin.VectorMapJoinOuterMultiKeyOperator) AppMasterEventDesc(org.apache.hadoop.hive.ql.plan.AppMasterEventDesc) VectorAppMasterEventDesc(org.apache.hadoop.hive.ql.plan.VectorAppMasterEventDesc) SMBJoinDesc(org.apache.hadoop.hive.ql.plan.SMBJoinDesc) VectorSMBJoinDesc(org.apache.hadoop.hive.ql.plan.VectorSMBJoinDesc) VectorFileSinkDesc(org.apache.hadoop.hive.ql.plan.VectorFileSinkDesc) FileSinkDesc(org.apache.hadoop.hive.ql.plan.FileSinkDesc) VectorMapJoinOperator(org.apache.hadoop.hive.ql.exec.vector.VectorMapJoinOperator) VectorFileSinkDesc(org.apache.hadoop.hive.ql.plan.VectorFileSinkDesc) VectorReduceSinkInfo(org.apache.hadoop.hive.ql.plan.VectorReduceSinkInfo) VectorSparkPartitionPruningSinkDesc(org.apache.hadoop.hive.ql.plan.VectorSparkPartitionPruningSinkDesc) SparkPartitionPruningSinkDesc(org.apache.hadoop.hive.ql.optimizer.spark.SparkPartitionPruningSinkDesc) VectorAppMasterEventDesc(org.apache.hadoop.hive.ql.plan.VectorAppMasterEventDesc) ArrayList(java.util.ArrayList) List(java.util.List) VectorSelectDesc(org.apache.hadoop.hive.ql.plan.VectorSelectDesc) VectorReduceSinkDesc(org.apache.hadoop.hive.ql.plan.VectorReduceSinkDesc) ReduceSinkDesc(org.apache.hadoop.hive.ql.plan.ReduceSinkDesc) VectorMapJoinOperator(org.apache.hadoop.hive.ql.exec.vector.VectorMapJoinOperator) VectorFilterDesc(org.apache.hadoop.hive.ql.plan.VectorFilterDesc) SparkHashTableSinkDesc(org.apache.hadoop.hive.ql.plan.SparkHashTableSinkDesc) VectorSparkHashTableSinkDesc(org.apache.hadoop.hive.ql.plan.VectorSparkHashTableSinkDesc) AbstractOperatorDesc(org.apache.hadoop.hive.ql.plan.AbstractOperatorDesc) MapJoinDesc(org.apache.hadoop.hive.ql.plan.MapJoinDesc) VectorMapJoinDesc(org.apache.hadoop.hive.ql.plan.VectorMapJoinDesc) VectorSparkHashTableSinkDesc(org.apache.hadoop.hive.ql.plan.VectorSparkHashTableSinkDesc) VectorSparkPartitionPruningSinkDesc(org.apache.hadoop.hive.ql.plan.VectorSparkPartitionPruningSinkDesc) VectorMapJoinInfo(org.apache.hadoop.hive.ql.plan.VectorMapJoinInfo) VectorSMBJoinDesc(org.apache.hadoop.hive.ql.plan.VectorSMBJoinDesc) VectorAggregateExpression(org.apache.hadoop.hive.ql.exec.vector.expressions.aggregates.VectorAggregateExpression) VectorLimitDesc(org.apache.hadoop.hive.ql.plan.VectorLimitDesc) LimitDesc(org.apache.hadoop.hive.ql.plan.LimitDesc) VectorLimitDesc(org.apache.hadoop.hive.ql.plan.VectorLimitDesc) VectorMapJoinOuterFilteredOperator(org.apache.hadoop.hive.ql.exec.vector.VectorMapJoinOuterFilteredOperator) VectorGroupByDesc(org.apache.hadoop.hive.ql.plan.VectorGroupByDesc) VectorExpression(org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression)

Example 15 with MapJoinDesc

use of org.apache.hadoop.hive.ql.plan.MapJoinDesc in project hive by apache.

the class SparkMapJoinOptimizer method convertJoinBucketMapJoin.

private int convertJoinBucketMapJoin(JoinOperator joinOp, MapJoinOperator mapJoinOp, OptimizeSparkProcContext context, int bigTablePosition) throws SemanticException {
    ParseContext parseContext = context.getParseContext();
    List<String> joinAliases = new ArrayList<String>();
    String baseBigAlias = null;
    Map<Integer, Set<String>> posToAliasMap = joinOp.getPosToAliasMap();
    for (Map.Entry<Integer, Set<String>> entry : posToAliasMap.entrySet()) {
        if (entry.getKey().intValue() == bigTablePosition) {
            baseBigAlias = entry.getValue().iterator().next();
        }
        for (String alias : entry.getValue()) {
            if (!joinAliases.contains(alias)) {
                joinAliases.add(alias);
            }
        }
    }
    mapJoinOp.setPosToAliasMap(posToAliasMap);
    BucketMapjoinProc.checkAndConvertBucketMapJoin(parseContext, mapJoinOp, baseBigAlias, joinAliases);
    MapJoinDesc joinDesc = mapJoinOp.getConf();
    return joinDesc.isBucketMapJoin() ? joinDesc.getBigTableBucketNumMapping().size() : -1;
}
Also used : Set(java.util.Set) MapJoinDesc(org.apache.hadoop.hive.ql.plan.MapJoinDesc) ParseContext(org.apache.hadoop.hive.ql.parse.ParseContext) ArrayList(java.util.ArrayList) Map(java.util.Map)

Aggregations

MapJoinDesc (org.apache.hadoop.hive.ql.plan.MapJoinDesc)17 ArrayList (java.util.ArrayList)13 MapJoinOperator (org.apache.hadoop.hive.ql.exec.MapJoinOperator)10 Operator (org.apache.hadoop.hive.ql.exec.Operator)9 ExprNodeDesc (org.apache.hadoop.hive.ql.plan.ExprNodeDesc)9 OperatorDesc (org.apache.hadoop.hive.ql.plan.OperatorDesc)9 List (java.util.List)8 JoinOperator (org.apache.hadoop.hive.ql.exec.JoinOperator)8 ReduceSinkOperator (org.apache.hadoop.hive.ql.exec.ReduceSinkOperator)7 RowSchema (org.apache.hadoop.hive.ql.exec.RowSchema)7 TableDesc (org.apache.hadoop.hive.ql.plan.TableDesc)7 HashMap (java.util.HashMap)6 Map (java.util.Map)6 TableScanOperator (org.apache.hadoop.hive.ql.exec.TableScanOperator)6 GroupByOperator (org.apache.hadoop.hive.ql.exec.GroupByOperator)5 LinkedHashMap (java.util.LinkedHashMap)4 HiveConf (org.apache.hadoop.hive.conf.HiveConf)4 AbstractMapJoinOperator (org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator)4 ColumnInfo (org.apache.hadoop.hive.ql.exec.ColumnInfo)4 HashTableDummyOperator (org.apache.hadoop.hive.ql.exec.HashTableDummyOperator)4