Search in sources :

Example 11 with JoinDesc

use of org.apache.hadoop.hive.ql.plan.JoinDesc in project hive by apache.

the class GenSparkSkewJoinProcessor method processSkewJoin.

@SuppressWarnings("unchecked")
public static void processSkewJoin(JoinOperator joinOp, Task<?> currTask, ReduceWork reduceWork, ParseContext parseCtx) throws SemanticException {
    SparkWork currentWork = ((SparkTask) currTask).getWork();
    if (currentWork.getChildren(reduceWork).size() > 0) {
        LOG.warn("Skip runtime skew join as the ReduceWork has child work and hasn't been split.");
        return;
    }
    List<Task<?>> children = currTask.getChildTasks();
    Path baseTmpDir = parseCtx.getContext().getMRTmpPath();
    JoinDesc joinDescriptor = joinOp.getConf();
    Map<Byte, List<ExprNodeDesc>> joinValues = joinDescriptor.getExprs();
    int numAliases = joinValues.size();
    Map<Byte, Path> bigKeysDirMap = new HashMap<Byte, Path>();
    Map<Byte, Map<Byte, Path>> smallKeysDirMap = new HashMap<Byte, Map<Byte, Path>>();
    Map<Byte, Path> skewJoinJobResultsDir = new HashMap<Byte, Path>();
    Byte[] tags = joinDescriptor.getTagOrder();
    // for each joining table, set dir for big key and small keys properly
    for (int i = 0; i < numAliases; i++) {
        Byte alias = tags[i];
        bigKeysDirMap.put(alias, GenMRSkewJoinProcessor.getBigKeysDir(baseTmpDir, alias));
        Map<Byte, Path> smallKeysMap = new HashMap<Byte, Path>();
        smallKeysDirMap.put(alias, smallKeysMap);
        for (Byte src2 : tags) {
            if (!src2.equals(alias)) {
                smallKeysMap.put(src2, GenMRSkewJoinProcessor.getSmallKeysDir(baseTmpDir, alias, src2));
            }
        }
        skewJoinJobResultsDir.put(alias, GenMRSkewJoinProcessor.getBigKeysSkewJoinResultDir(baseTmpDir, alias));
    }
    joinDescriptor.setHandleSkewJoin(true);
    joinDescriptor.setBigKeysDirMap(bigKeysDirMap);
    joinDescriptor.setSmallKeysDirMap(smallKeysDirMap);
    joinDescriptor.setSkewKeyDefinition(HiveConf.getIntVar(parseCtx.getConf(), HiveConf.ConfVars.HIVESKEWJOINKEY));
    // create proper table/column desc for spilled tables
    TableDesc keyTblDesc = (TableDesc) reduceWork.getKeyDesc().clone();
    List<String> joinKeys = Utilities.getColumnNames(keyTblDesc.getProperties());
    List<String> joinKeyTypes = Utilities.getColumnTypes(keyTblDesc.getProperties());
    Map<Byte, TableDesc> tableDescList = new HashMap<Byte, TableDesc>();
    Map<Byte, RowSchema> rowSchemaList = new HashMap<Byte, RowSchema>();
    Map<Byte, List<ExprNodeDesc>> newJoinValues = new HashMap<Byte, List<ExprNodeDesc>>();
    Map<Byte, List<ExprNodeDesc>> newJoinKeys = new HashMap<Byte, List<ExprNodeDesc>>();
    // used for create mapJoinDesc, should be in order
    List<TableDesc> newJoinValueTblDesc = new ArrayList<TableDesc>();
    for (int i = 0; i < tags.length; i++) {
        newJoinValueTblDesc.add(null);
    }
    for (int i = 0; i < numAliases; i++) {
        Byte alias = tags[i];
        List<ExprNodeDesc> valueCols = joinValues.get(alias);
        String colNames = "";
        String colTypes = "";
        int columnSize = valueCols.size();
        List<ExprNodeDesc> newValueExpr = new ArrayList<ExprNodeDesc>();
        List<ExprNodeDesc> newKeyExpr = new ArrayList<ExprNodeDesc>();
        ArrayList<ColumnInfo> columnInfos = new ArrayList<ColumnInfo>();
        boolean first = true;
        for (int k = 0; k < columnSize; k++) {
            TypeInfo type = valueCols.get(k).getTypeInfo();
            // any name, it does not matter.
            String newColName = i + "_VALUE_" + k;
            ColumnInfo columnInfo = new ColumnInfo(newColName, type, alias.toString(), false);
            columnInfos.add(columnInfo);
            newValueExpr.add(new ExprNodeColumnDesc(columnInfo.getType(), columnInfo.getInternalName(), columnInfo.getTabAlias(), false));
            if (!first) {
                colNames = colNames + ",";
                colTypes = colTypes + ",";
            }
            first = false;
            colNames = colNames + newColName;
            colTypes = colTypes + valueCols.get(k).getTypeString();
        }
        // we are putting join keys at last part of the spilled table
        for (int k = 0; k < joinKeys.size(); k++) {
            if (!first) {
                colNames = colNames + ",";
                colTypes = colTypes + ",";
            }
            first = false;
            colNames = colNames + joinKeys.get(k);
            colTypes = colTypes + joinKeyTypes.get(k);
            ColumnInfo columnInfo = new ColumnInfo(joinKeys.get(k), TypeInfoFactory.getPrimitiveTypeInfo(joinKeyTypes.get(k)), alias.toString(), false);
            columnInfos.add(columnInfo);
            newKeyExpr.add(new ExprNodeColumnDesc(columnInfo.getType(), columnInfo.getInternalName(), columnInfo.getTabAlias(), false));
        }
        newJoinValues.put(alias, newValueExpr);
        newJoinKeys.put(alias, newKeyExpr);
        tableDescList.put(alias, Utilities.getTableDesc(colNames, colTypes));
        rowSchemaList.put(alias, new RowSchema(columnInfos));
        // construct value table Desc
        String valueColNames = "";
        String valueColTypes = "";
        first = true;
        for (int k = 0; k < columnSize; k++) {
            // any name, it does not matter.
            String newColName = i + "_VALUE_" + k;
            if (!first) {
                valueColNames = valueColNames + ",";
                valueColTypes = valueColTypes + ",";
            }
            valueColNames = valueColNames + newColName;
            valueColTypes = valueColTypes + valueCols.get(k).getTypeString();
            first = false;
        }
        newJoinValueTblDesc.set((byte) i, Utilities.getTableDesc(valueColNames, valueColTypes));
    }
    joinDescriptor.setSkewKeysValuesTables(tableDescList);
    joinDescriptor.setKeyTableDesc(keyTblDesc);
    // create N-1 map join tasks
    HashMap<Path, Task<?>> bigKeysDirToTaskMap = new HashMap<Path, Task<?>>();
    List<Serializable> listWorks = new ArrayList<Serializable>();
    List<Task<?>> listTasks = new ArrayList<Task<?>>();
    for (int i = 0; i < numAliases - 1; i++) {
        Byte src = tags[i];
        HiveConf hiveConf = new HiveConf(parseCtx.getConf(), GenSparkSkewJoinProcessor.class);
        SparkWork sparkWork = new SparkWork(parseCtx.getConf().getVar(HiveConf.ConfVars.HIVEQUERYID));
        Task<?> skewJoinMapJoinTask = TaskFactory.get(sparkWork);
        skewJoinMapJoinTask.setFetchSource(currTask.isFetchSource());
        // create N TableScans
        Operator<? extends OperatorDesc>[] parentOps = new TableScanOperator[tags.length];
        for (int k = 0; k < tags.length; k++) {
            Operator<? extends OperatorDesc> ts = GenMapRedUtils.createTemporaryTableScanOperator(joinOp.getCompilationOpContext(), rowSchemaList.get((byte) k));
            ((TableScanOperator) ts).setTableDescSkewJoin(tableDescList.get((byte) k));
            parentOps[k] = ts;
        }
        // create the MapJoinOperator
        String dumpFilePrefix = "mapfile" + PlanUtils.getCountForMapJoinDumpFilePrefix();
        MapJoinDesc mapJoinDescriptor = new MapJoinDesc(newJoinKeys, keyTblDesc, newJoinValues, newJoinValueTblDesc, newJoinValueTblDesc, joinDescriptor.getOutputColumnNames(), i, joinDescriptor.getConds(), joinDescriptor.getFilters(), joinDescriptor.getNoOuterJoin(), dumpFilePrefix, joinDescriptor.getMemoryMonitorInfo(), joinDescriptor.getInMemoryDataSize());
        mapJoinDescriptor.setTagOrder(tags);
        mapJoinDescriptor.setHandleSkewJoin(false);
        mapJoinDescriptor.setNullSafes(joinDescriptor.getNullSafes());
        mapJoinDescriptor.setColumnExprMap(joinDescriptor.getColumnExprMap());
        // temporarily, mark it as child of all the TS
        MapJoinOperator mapJoinOp = (MapJoinOperator) OperatorFactory.getAndMakeChild(joinOp.getCompilationOpContext(), mapJoinDescriptor, null, parentOps);
        // clone the original join operator, and replace it with the MJ
        // this makes sure MJ has the same downstream operator plan as the original join
        List<Operator<?>> reducerList = new ArrayList<Operator<?>>();
        reducerList.add(reduceWork.getReducer());
        Operator<? extends OperatorDesc> reducer = SerializationUtilities.cloneOperatorTree(reducerList).get(0);
        Preconditions.checkArgument(reducer instanceof JoinOperator, "Reducer should be join operator, but actually is " + reducer.getName());
        JoinOperator cloneJoinOp = (JoinOperator) reducer;
        List<Operator<? extends OperatorDesc>> childOps = cloneJoinOp.getChildOperators();
        for (Operator<? extends OperatorDesc> childOp : childOps) {
            childOp.replaceParent(cloneJoinOp, mapJoinOp);
        }
        mapJoinOp.setChildOperators(childOps);
        // set memory usage for the MJ operator
        setMemUsage(mapJoinOp, skewJoinMapJoinTask, parseCtx);
        // create N MapWorks and add them to the SparkWork
        MapWork bigMapWork = null;
        Map<Byte, Path> smallTblDirs = smallKeysDirMap.get(src);
        for (int j = 0; j < tags.length; j++) {
            MapWork mapWork = PlanUtils.getMapRedWork().getMapWork();
            sparkWork.add(mapWork);
            // This code has been only added for testing
            boolean mapperCannotSpanPartns = parseCtx.getConf().getBoolVar(HiveConf.ConfVars.HIVE_MAPPER_CANNOT_SPAN_MULTIPLE_PARTITIONS);
            mapWork.setMapperCannotSpanPartns(mapperCannotSpanPartns);
            Operator<? extends OperatorDesc> tableScan = parentOps[j];
            String alias = tags[j].toString();
            ArrayList<String> aliases = new ArrayList<String>();
            aliases.add(alias);
            Path path;
            if (j == i) {
                path = bigKeysDirMap.get(tags[j]);
                bigKeysDirToTaskMap.put(path, skewJoinMapJoinTask);
                bigMapWork = mapWork;
            } else {
                path = smallTblDirs.get(tags[j]);
            }
            mapWork.addPathToAlias(path, aliases);
            mapWork.getAliasToWork().put(alias, tableScan);
            PartitionDesc partitionDesc = new PartitionDesc(tableDescList.get(tags[j]), null);
            mapWork.addPathToPartitionInfo(path, partitionDesc);
            mapWork.getAliasToPartnInfo().put(alias, partitionDesc);
            mapWork.setName("Map " + GenSparkUtils.getUtils().getNextSeqNumber());
        }
        // connect all small dir map work to the big dir map work
        Preconditions.checkArgument(bigMapWork != null, "Haven't identified big dir MapWork");
        // these 2 flags are intended only for the big-key map work
        bigMapWork.setNumMapTasks(HiveConf.getIntVar(hiveConf, HiveConf.ConfVars.HIVESKEWJOINMAPJOINNUMMAPTASK));
        bigMapWork.setMinSplitSize(HiveConf.getLongVar(hiveConf, HiveConf.ConfVars.HIVESKEWJOINMAPJOINMINSPLIT));
        // use HiveInputFormat so that we can control the number of map tasks
        bigMapWork.setInputformat(HiveInputFormat.class.getName());
        for (BaseWork work : sparkWork.getRoots()) {
            Preconditions.checkArgument(work instanceof MapWork, "All root work should be MapWork, but got " + work.getClass().getSimpleName());
            if (work != bigMapWork) {
                sparkWork.connect(work, bigMapWork, new SparkEdgeProperty(SparkEdgeProperty.SHUFFLE_NONE));
            }
        }
        // insert SparkHashTableSink and Dummy operators
        for (int j = 0; j < tags.length; j++) {
            if (j != i) {
                insertSHTS(tags[j], (TableScanOperator) parentOps[j], bigMapWork);
            }
        }
        listWorks.add(skewJoinMapJoinTask.getWork());
        listTasks.add(skewJoinMapJoinTask);
    }
    if (children != null) {
        for (Task<?> tsk : listTasks) {
            for (Task<?> oldChild : children) {
                tsk.addDependentTask(oldChild);
            }
        }
        currTask.setChildTasks(new ArrayList<Task<?>>());
        for (Task<?> oldChild : children) {
            oldChild.getParentTasks().remove(currTask);
        }
        listTasks.addAll(children);
        for (Task<?> oldChild : children) {
            listWorks.add(oldChild.getWork());
        }
    }
    ConditionalResolverSkewJoin.ConditionalResolverSkewJoinCtx context = new ConditionalResolverSkewJoin.ConditionalResolverSkewJoinCtx(bigKeysDirToTaskMap, children);
    ConditionalWork cndWork = new ConditionalWork(listWorks);
    ConditionalTask cndTsk = (ConditionalTask) TaskFactory.get(cndWork);
    cndTsk.setListTasks(listTasks);
    cndTsk.setResolver(new ConditionalResolverSkewJoin());
    cndTsk.setResolverCtx(context);
    currTask.setChildTasks(new ArrayList<Task<?>>());
    currTask.addDependentTask(cndTsk);
}
Also used : MapJoinOperator(org.apache.hadoop.hive.ql.exec.MapJoinOperator) JoinOperator(org.apache.hadoop.hive.ql.exec.JoinOperator) SparkTask(org.apache.hadoop.hive.ql.exec.spark.SparkTask) ConditionalTask(org.apache.hadoop.hive.ql.exec.ConditionalTask) Task(org.apache.hadoop.hive.ql.exec.Task) Serializable(java.io.Serializable) TableScanOperator(org.apache.hadoop.hive.ql.exec.TableScanOperator) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ConditionalWork(org.apache.hadoop.hive.ql.plan.ConditionalWork) ColumnInfo(org.apache.hadoop.hive.ql.exec.ColumnInfo) ConditionalTask(org.apache.hadoop.hive.ql.exec.ConditionalTask) List(java.util.List) ArrayList(java.util.ArrayList) HiveConf(org.apache.hadoop.hive.conf.HiveConf) ExprNodeDesc(org.apache.hadoop.hive.ql.plan.ExprNodeDesc) BaseWork(org.apache.hadoop.hive.ql.plan.BaseWork) ConditionalResolverSkewJoin(org.apache.hadoop.hive.ql.plan.ConditionalResolverSkewJoin) MapJoinOperator(org.apache.hadoop.hive.ql.exec.MapJoinOperator) RowSchema(org.apache.hadoop.hive.ql.exec.RowSchema) TypeInfo(org.apache.hadoop.hive.serde2.typeinfo.TypeInfo) TableDesc(org.apache.hadoop.hive.ql.plan.TableDesc) MapJoinDesc(org.apache.hadoop.hive.ql.plan.MapJoinDesc) JoinDesc(org.apache.hadoop.hive.ql.plan.JoinDesc) Map(java.util.Map) HashMap(java.util.HashMap) OperatorDesc(org.apache.hadoop.hive.ql.plan.OperatorDesc) MapJoinOperator(org.apache.hadoop.hive.ql.exec.MapJoinOperator) HashTableDummyOperator(org.apache.hadoop.hive.ql.exec.HashTableDummyOperator) SparkHashTableSinkOperator(org.apache.hadoop.hive.ql.exec.SparkHashTableSinkOperator) JoinOperator(org.apache.hadoop.hive.ql.exec.JoinOperator) TableScanOperator(org.apache.hadoop.hive.ql.exec.TableScanOperator) Operator(org.apache.hadoop.hive.ql.exec.Operator) HiveInputFormat(org.apache.hadoop.hive.ql.io.HiveInputFormat) ExprNodeColumnDesc(org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc) Path(org.apache.hadoop.fs.Path) MapJoinDesc(org.apache.hadoop.hive.ql.plan.MapJoinDesc) SparkTask(org.apache.hadoop.hive.ql.exec.spark.SparkTask) SparkWork(org.apache.hadoop.hive.ql.plan.SparkWork) MapWork(org.apache.hadoop.hive.ql.plan.MapWork) SparkEdgeProperty(org.apache.hadoop.hive.ql.plan.SparkEdgeProperty) PartitionDesc(org.apache.hadoop.hive.ql.plan.PartitionDesc)

Example 12 with JoinDesc

use of org.apache.hadoop.hive.ql.plan.JoinDesc in project hive by apache.

the class DotExporter method style.

private String style(Operator<?> n) {
    String fillColor = "white";
    OperatorDesc c = n.getConf();
    if (n instanceof TableScanOperator) {
        fillColor = "#ccffcc";
    }
    if (c instanceof JoinDesc) {
        fillColor = "#ffcccc";
    }
    return String.format("style=filled,fillcolor=\"%s\"", fillColor);
}
Also used : TableScanOperator(org.apache.hadoop.hive.ql.exec.TableScanOperator) OperatorDesc(org.apache.hadoop.hive.ql.plan.OperatorDesc) JoinDesc(org.apache.hadoop.hive.ql.plan.JoinDesc)

Example 13 with JoinDesc

use of org.apache.hadoop.hive.ql.plan.JoinDesc in project hive by apache.

the class ConvertJoinMapJoin method convertJoinSMBJoin.

// replaces the join operator with a new CommonJoinOperator, removes the
// parent reduce sinks
private void convertJoinSMBJoin(JoinOperator joinOp, OptimizeTezProcContext context, int mapJoinConversionPos, int numBuckets, boolean adjustParentsChildren) throws SemanticException {
    MapJoinDesc mapJoinDesc = null;
    if (adjustParentsChildren) {
        mapJoinDesc = MapJoinProcessor.getMapJoinDesc(context.conf, joinOp, joinOp.getConf().isLeftInputJoin(), joinOp.getConf().getBaseSrc(), joinOp.getConf().getMapAliases(), mapJoinConversionPos, true);
    } else {
        JoinDesc joinDesc = joinOp.getConf();
        // retain the original join desc in the map join.
        mapJoinDesc = new MapJoinDesc(MapJoinProcessor.getKeys(joinOp.getConf().isLeftInputJoin(), joinOp.getConf().getBaseSrc(), joinOp).getRight(), null, joinDesc.getExprs(), null, null, joinDesc.getOutputColumnNames(), mapJoinConversionPos, joinDesc.getConds(), joinDesc.getFilters(), joinDesc.getNoOuterJoin(), null, joinDesc.getMemoryMonitorInfo(), joinDesc.getInMemoryDataSize());
        mapJoinDesc.setNullSafes(joinDesc.getNullSafes());
        mapJoinDesc.setFilterMap(joinDesc.getFilterMap());
        mapJoinDesc.setResidualFilterExprs(joinDesc.getResidualFilterExprs());
        // keep column expression map, explain plan uses this to display
        mapJoinDesc.setColumnExprMap(joinDesc.getColumnExprMap());
        mapJoinDesc.setReversedExprs(joinDesc.getReversedExprs());
        mapJoinDesc.resetOrder();
    }
    CommonMergeJoinOperator mergeJoinOp = (CommonMergeJoinOperator) OperatorFactory.get(joinOp.getCompilationOpContext(), new CommonMergeJoinDesc(numBuckets, mapJoinConversionPos, mapJoinDesc), joinOp.getSchema());
    context.parseContext.getContext().getPlanMapper().link(joinOp, mergeJoinOp);
    int numReduceSinks = joinOp.getOpTraits().getNumReduceSinks();
    OpTraits opTraits = new OpTraits(joinOp.getOpTraits().getBucketColNames(), numBuckets, joinOp.getOpTraits().getSortCols(), numReduceSinks);
    mergeJoinOp.setOpTraits(opTraits);
    mergeJoinOp.getConf().setBucketingVersion(joinOp.getConf().getBucketingVersion());
    preserveOperatorInfos(mergeJoinOp, joinOp, context);
    for (Operator<? extends OperatorDesc> parentOp : joinOp.getParentOperators()) {
        int pos = parentOp.getChildOperators().indexOf(joinOp);
        parentOp.getChildOperators().remove(pos);
        parentOp.getChildOperators().add(pos, mergeJoinOp);
    }
    for (Operator<? extends OperatorDesc> childOp : joinOp.getChildOperators()) {
        int pos = childOp.getParentOperators().indexOf(joinOp);
        childOp.getParentOperators().remove(pos);
        childOp.getParentOperators().add(pos, mergeJoinOp);
    }
    List<Operator<? extends OperatorDesc>> childOperators = mergeJoinOp.getChildOperators();
    List<Operator<? extends OperatorDesc>> parentOperators = mergeJoinOp.getParentOperators();
    childOperators.clear();
    parentOperators.clear();
    childOperators.addAll(joinOp.getChildOperators());
    parentOperators.addAll(joinOp.getParentOperators());
    mergeJoinOp.getConf().setGenJoinKeys(false);
    if (adjustParentsChildren) {
        mergeJoinOp.getConf().setGenJoinKeys(true);
        List<Operator<? extends OperatorDesc>> newParentOpList = new ArrayList<Operator<? extends OperatorDesc>>();
        for (Operator<? extends OperatorDesc> parentOp : mergeJoinOp.getParentOperators()) {
            for (Operator<? extends OperatorDesc> grandParentOp : parentOp.getParentOperators()) {
                grandParentOp.getChildOperators().remove(parentOp);
                grandParentOp.getChildOperators().add(mergeJoinOp);
                newParentOpList.add(grandParentOp);
            }
        }
        mergeJoinOp.getParentOperators().clear();
        mergeJoinOp.getParentOperators().addAll(newParentOpList);
        List<Operator<? extends OperatorDesc>> parentOps = new ArrayList<Operator<? extends OperatorDesc>>(mergeJoinOp.getParentOperators());
        for (Operator<? extends OperatorDesc> parentOp : parentOps) {
            int parentIndex = mergeJoinOp.getParentOperators().indexOf(parentOp);
            if (parentIndex == mapJoinConversionPos) {
                continue;
            }
            // during join processing, not at the time of close.
            if (parentOp instanceof GroupByOperator) {
                GroupByOperator gpbyOp = (GroupByOperator) parentOp;
                if (gpbyOp.getConf().getMode() == GroupByDesc.Mode.HASH) {
                    // No need to change for MERGE_PARTIAL etc.
                    gpbyOp.getConf().setMode(GroupByDesc.Mode.FINAL);
                }
            }
            // insert the dummy store operator here
            DummyStoreOperator dummyStoreOp = new TezDummyStoreOperator(mergeJoinOp.getCompilationOpContext());
            dummyStoreOp.setConf(new DummyStoreDesc());
            dummyStoreOp.setParentOperators(new ArrayList<Operator<? extends OperatorDesc>>());
            dummyStoreOp.setChildOperators(new ArrayList<Operator<? extends OperatorDesc>>());
            dummyStoreOp.getChildOperators().add(mergeJoinOp);
            int index = parentOp.getChildOperators().indexOf(mergeJoinOp);
            parentOp.getChildOperators().remove(index);
            parentOp.getChildOperators().add(index, dummyStoreOp);
            dummyStoreOp.getParentOperators().add(parentOp);
            mergeJoinOp.getParentOperators().remove(parentIndex);
            mergeJoinOp.getParentOperators().add(parentIndex, dummyStoreOp);
        }
    }
    mergeJoinOp.cloneOriginalParentsList(mergeJoinOp.getParentOperators());
}
Also used : CommonMergeJoinOperator(org.apache.hadoop.hive.ql.exec.CommonMergeJoinOperator) ReduceSinkOperator(org.apache.hadoop.hive.ql.exec.ReduceSinkOperator) MapJoinOperator(org.apache.hadoop.hive.ql.exec.MapJoinOperator) GroupByOperator(org.apache.hadoop.hive.ql.exec.GroupByOperator) FileSinkOperator(org.apache.hadoop.hive.ql.exec.FileSinkOperator) SelectOperator(org.apache.hadoop.hive.ql.exec.SelectOperator) MuxOperator(org.apache.hadoop.hive.ql.exec.MuxOperator) CommonJoinOperator(org.apache.hadoop.hive.ql.exec.CommonJoinOperator) TezDummyStoreOperator(org.apache.hadoop.hive.ql.exec.TezDummyStoreOperator) AppMasterEventOperator(org.apache.hadoop.hive.ql.exec.AppMasterEventOperator) JoinOperator(org.apache.hadoop.hive.ql.exec.JoinOperator) TableScanOperator(org.apache.hadoop.hive.ql.exec.TableScanOperator) Operator(org.apache.hadoop.hive.ql.exec.Operator) DummyStoreOperator(org.apache.hadoop.hive.ql.exec.DummyStoreOperator) MapJoinDesc(org.apache.hadoop.hive.ql.plan.MapJoinDesc) GroupByOperator(org.apache.hadoop.hive.ql.exec.GroupByOperator) DummyStoreDesc(org.apache.hadoop.hive.ql.plan.DummyStoreDesc) OpTraits(org.apache.hadoop.hive.ql.plan.OpTraits) TezDummyStoreOperator(org.apache.hadoop.hive.ql.exec.TezDummyStoreOperator) DummyStoreOperator(org.apache.hadoop.hive.ql.exec.DummyStoreOperator) CommonMergeJoinDesc(org.apache.hadoop.hive.ql.plan.CommonMergeJoinDesc) ArrayList(java.util.ArrayList) TezDummyStoreOperator(org.apache.hadoop.hive.ql.exec.TezDummyStoreOperator) MapJoinDesc(org.apache.hadoop.hive.ql.plan.MapJoinDesc) JoinDesc(org.apache.hadoop.hive.ql.plan.JoinDesc) CommonMergeJoinDesc(org.apache.hadoop.hive.ql.plan.CommonMergeJoinDesc) OperatorDesc(org.apache.hadoop.hive.ql.plan.OperatorDesc) CommonMergeJoinOperator(org.apache.hadoop.hive.ql.exec.CommonMergeJoinOperator)

Example 14 with JoinDesc

use of org.apache.hadoop.hive.ql.plan.JoinDesc in project hive by apache.

the class ConvertJoinMapJoin method checkAndConvertSMBJoin.

@SuppressWarnings("unchecked")
private Object checkAndConvertSMBJoin(OptimizeTezProcContext context, JoinOperator joinOp, TezBucketJoinProcCtx tezBucketJoinProcCtx) throws SemanticException {
    // map join either based on the size. Check if we can convert to SMB join.
    if (!(HiveConf.getBoolVar(context.conf, ConfVars.HIVE_AUTO_SORTMERGE_JOIN)) || ((!HiveConf.getBoolVar(context.conf, ConfVars.HIVE_AUTO_SORTMERGE_JOIN_REDUCE)) && joinOp.getOpTraits().getNumReduceSinks() >= 2)) {
        fallbackToReduceSideJoin(joinOp, context);
        return null;
    }
    Class<? extends BigTableSelectorForAutoSMJ> bigTableMatcherClass = null;
    try {
        String selector = HiveConf.getVar(context.parseContext.getConf(), HiveConf.ConfVars.HIVE_AUTO_SORTMERGE_JOIN_BIGTABLE_SELECTOR);
        bigTableMatcherClass = JavaUtils.loadClass(selector);
    } catch (ClassNotFoundException e) {
        throw new SemanticException(e.getMessage());
    }
    BigTableSelectorForAutoSMJ bigTableMatcher = ReflectionUtils.newInstance(bigTableMatcherClass, null);
    JoinDesc joinDesc = joinOp.getConf();
    JoinCondDesc[] joinCondns = joinDesc.getConds();
    Set<Integer> joinCandidates = MapJoinProcessor.getBigTableCandidates(joinCondns);
    if (joinCandidates.isEmpty()) {
        // of any type. So return false.
        return false;
    }
    int mapJoinConversionPos = bigTableMatcher.getBigTablePosition(context.parseContext, joinOp, joinCandidates);
    if (mapJoinConversionPos < 0) {
        // contains aliases from sub-query
        // we are just converting to a common merge join operator. The shuffle
        // join in map-reduce case.
        fallbackToReduceSideJoin(joinOp, context);
        return null;
    }
    if (checkConvertJoinSMBJoin(joinOp, context, mapJoinConversionPos, tezBucketJoinProcCtx)) {
        convertJoinSMBJoin(joinOp, context, mapJoinConversionPos, tezBucketJoinProcCtx.getNumBuckets(), true);
    } else {
        // we are just converting to a common merge join operator. The shuffle
        // join in map-reduce case.
        fallbackToReduceSideJoin(joinOp, context);
    }
    return null;
}
Also used : MapJoinDesc(org.apache.hadoop.hive.ql.plan.MapJoinDesc) JoinDesc(org.apache.hadoop.hive.ql.plan.JoinDesc) CommonMergeJoinDesc(org.apache.hadoop.hive.ql.plan.CommonMergeJoinDesc) JoinCondDesc(org.apache.hadoop.hive.ql.plan.JoinCondDesc) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException)

Example 15 with JoinDesc

use of org.apache.hadoop.hive.ql.plan.JoinDesc in project hive by apache.

the class ConvertJoinMapJoin method getMapJoinConversion.

/**
 * Obtain big table position for join.
 *
 * @param joinOp join operator
 * @param context optimization context
 * @param buckets bucket count for Bucket Map Join conversion consideration or reduce count
 * for Dynamic Hash Join conversion consideration
 * @param skipJoinTypeChecks whether to skip join type checking
 * @param maxSize size threshold for Map Join conversion
 * @param checkMapJoinThresholds whether to check thresholds to convert to Map Join
 * @return returns big table position or -1 if it cannot be determined
 * @throws SemanticException
 */
public MapJoinConversion getMapJoinConversion(JoinOperator joinOp, OptimizeTezProcContext context, int buckets, boolean skipJoinTypeChecks, long maxSize, boolean checkMapJoinThresholds) throws SemanticException {
    JoinDesc joinDesc = joinOp.getConf();
    JoinCondDesc[] conds = joinDesc.getConds();
    if (!skipJoinTypeChecks) {
        /*
       * HIVE-9038: Join tests fail in tez when we have more than 1 join on the same key and there is
       * an outer join down the join tree that requires filterTag. We disable this conversion to map
       * join here now. We need to emulate the behavior of HashTableSinkOperator as in MR or create a
       * new operation to be able to support this. This seems like a corner case enough to special
       * case this for now.
       */
        if (conds.length > 1) {
            if (hasOuterJoin(joinOp)) {
                return null;
            }
        }
    }
    // Assume.
    boolean isFullOuterEnabledForDynamicPartitionHashJoin = false;
    boolean isFullOuterEnabledForMapJoin = false;
    boolean isFullOuterJoin = MapJoinProcessor.precheckFullOuter(context.conf, joinOp);
    if (isFullOuterJoin) {
        boolean isFullOuterEnabled = MapJoinProcessor.isFullOuterMapEnabled(context.conf, joinOp);
        if (isFullOuterEnabled) {
            // FUTURE: Currently, we only support DPHJ.
            isFullOuterEnabledForDynamicPartitionHashJoin = MapJoinProcessor.isFullOuterEnabledForDynamicPartitionHashJoin(context.conf, joinOp);
        }
    }
    Set<Integer> bigTableCandidateSet = MapJoinProcessor.getBigTableCandidates(conds, /* isSupportFullOuter */
    true);
    if (bigTableCandidateSet.isEmpty()) {
        return null;
    }
    int bigTablePosition = -1;
    // big input cumulative row count
    long bigInputCumulativeCardinality = -1L;
    // stats of the big input
    Statistics bigInputStat = null;
    // bigTableFound means we've encountered a table that's bigger than the
    // max. This table is either the the big table or we cannot convert.
    boolean foundInputNotFittingInMemory = false;
    // total size of the inputs
    long totalSize = 0;
    // convert to DPHJ
    boolean convertDPHJ = false;
    for (int pos = 0; pos < joinOp.getParentOperators().size(); pos++) {
        Operator<? extends OperatorDesc> parentOp = joinOp.getParentOperators().get(pos);
        Statistics currInputStat = parentOp.getStatistics();
        if (currInputStat == null) {
            LOG.warn("Couldn't get statistics from: " + parentOp);
            return null;
        }
        long inputSize = computeOnlineDataSize(currInputStat);
        LOG.info("Join input#{}; onlineDataSize: {}; Statistics: {}", pos, inputSize, currInputStat);
        boolean currentInputNotFittingInMemory = false;
        if ((bigInputStat == null) || (inputSize > computeOnlineDataSize(bigInputStat))) {
            if (foundInputNotFittingInMemory) {
                // on size and there's another one that's bigger.
                return null;
            }
            if (inputSize / buckets > maxSize) {
                if (!bigTableCandidateSet.contains(pos)) {
                    // big for the map side.
                    return null;
                }
                currentInputNotFittingInMemory = true;
                foundInputNotFittingInMemory = true;
            }
        }
        long currentInputCumulativeCardinality;
        if (foundInputNotFittingInMemory) {
            currentInputCumulativeCardinality = -1L;
        } else {
            Long cardinality = computeCumulativeCardinality(parentOp);
            if (cardinality == null) {
                // We could not get stats, we cannot convert
                return null;
            }
            currentInputCumulativeCardinality = cardinality;
        }
        // This input is the big table if it is contained in the big candidates set, and either:
        // 1) we have not chosen a big table yet, or
        // 2) it has been chosen as the big table above, or
        // 3) the cumulative cardinality for this input is higher, or
        // 4) the cumulative cardinality is equal, but the size is bigger,
        boolean selectedBigTable = bigTableCandidateSet.contains(pos) && (bigInputStat == null || currentInputNotFittingInMemory || (!foundInputNotFittingInMemory && (currentInputCumulativeCardinality > bigInputCumulativeCardinality || (currentInputCumulativeCardinality == bigInputCumulativeCardinality && inputSize > computeOnlineDataSize(bigInputStat)))));
        if (bigInputStat != null && selectedBigTable) {
            // We are replacing the current big table with a new one, thus
            // we need to count the current one as a map table then.
            totalSize += computeOnlineDataSize(bigInputStat);
            // for HashMap
            if (checkMapJoinThresholds && !checkNumberOfEntriesForHashTable(joinOp, bigTablePosition, context)) {
                convertDPHJ = true;
            }
        } else if (!selectedBigTable) {
            // This is not the first table and we are not using it as big table,
            // in fact, we're adding this table as a map table
            totalSize += inputSize;
            // for HashMap
            if (checkMapJoinThresholds && !checkNumberOfEntriesForHashTable(joinOp, pos, context)) {
                convertDPHJ = true;
            }
        }
        if (totalSize / buckets > maxSize) {
            // hence cannot convert.
            return null;
        }
        if (selectedBigTable) {
            bigTablePosition = pos;
            bigInputCumulativeCardinality = currentInputCumulativeCardinality;
            bigInputStat = currInputStat;
        }
    }
    if (bigTablePosition == -1) {
        LOG.debug("No big table selected, no MapJoin");
        return null;
    }
    // Check if size of data to shuffle (larger table) is less than given max size
    if (checkMapJoinThresholds && convertDPHJ && checkShuffleSizeForLargeTable(joinOp, bigTablePosition, context)) {
        LOG.debug("Conditions to convert to MapJoin are not met");
        return null;
    }
    // only allow cross product in map joins if build side is 'small'
    boolean cartesianProductEdgeEnabled = HiveConf.getBoolVar(context.conf, HiveConf.ConfVars.TEZ_CARTESIAN_PRODUCT_EDGE_ENABLED);
    if (cartesianProductEdgeEnabled && !hasOuterJoin(joinOp) && isCrossProduct(joinOp)) {
        for (int i = 0; i < joinOp.getParentOperators().size(); i++) {
            if (i != bigTablePosition) {
                Statistics parentStats = joinOp.getParentOperators().get(i).getStatistics();
                if (parentStats.getNumRows() > HiveConf.getIntVar(context.conf, HiveConf.ConfVars.XPRODSMALLTABLEROWSTHRESHOLD)) {
                    // threshold rows we would disable mapjoin
                    return null;
                }
            }
        }
    }
    // We store the total memory that this MapJoin is going to use,
    // which is calculated as totalSize/buckets, with totalSize
    // equal to sum of small tables size.
    joinOp.getConf().setInMemoryDataSize(totalSize / buckets);
    return new MapJoinConversion(bigTablePosition, isFullOuterJoin, isFullOuterEnabledForDynamicPartitionHashJoin, isFullOuterEnabledForMapJoin);
}
Also used : MapJoinDesc(org.apache.hadoop.hive.ql.plan.MapJoinDesc) JoinDesc(org.apache.hadoop.hive.ql.plan.JoinDesc) CommonMergeJoinDesc(org.apache.hadoop.hive.ql.plan.CommonMergeJoinDesc) Statistics(org.apache.hadoop.hive.ql.plan.Statistics) ColStatistics(org.apache.hadoop.hive.ql.plan.ColStatistics) JoinCondDesc(org.apache.hadoop.hive.ql.plan.JoinCondDesc)

Aggregations

JoinDesc (org.apache.hadoop.hive.ql.plan.JoinDesc)24 ArrayList (java.util.ArrayList)15 MapJoinDesc (org.apache.hadoop.hive.ql.plan.MapJoinDesc)15 Operator (org.apache.hadoop.hive.ql.exec.Operator)14 JoinOperator (org.apache.hadoop.hive.ql.exec.JoinOperator)13 List (java.util.List)12 TableScanOperator (org.apache.hadoop.hive.ql.exec.TableScanOperator)12 JoinCondDesc (org.apache.hadoop.hive.ql.plan.JoinCondDesc)12 HashMap (java.util.HashMap)11 ExprNodeDesc (org.apache.hadoop.hive.ql.plan.ExprNodeDesc)11 ReduceSinkOperator (org.apache.hadoop.hive.ql.exec.ReduceSinkOperator)10 OperatorDesc (org.apache.hadoop.hive.ql.plan.OperatorDesc)10 LinkedHashMap (java.util.LinkedHashMap)9 ColumnInfo (org.apache.hadoop.hive.ql.exec.ColumnInfo)9 SemanticException (org.apache.hadoop.hive.ql.parse.SemanticException)9 MapJoinOperator (org.apache.hadoop.hive.ql.exec.MapJoinOperator)8 RowSchema (org.apache.hadoop.hive.ql.exec.RowSchema)8 ExprNodeColumnDesc (org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc)8 SelectOperator (org.apache.hadoop.hive.ql.exec.SelectOperator)7 HashSet (java.util.HashSet)6