Search in sources :

Example 6 with OpTraits

use of org.apache.hadoop.hive.ql.plan.OpTraits in project hive by apache.

the class ConvertJoinMapJoin method convertJoinDynamicPartitionedHashJoin.

private boolean convertJoinDynamicPartitionedHashJoin(JoinOperator joinOp, OptimizeTezProcContext context, final long maxSize) throws SemanticException {
    // Attempt dynamic partitioned hash join
    // Since we don't have big table index yet, must start with estimate of numReducers
    int numReducers = estimateNumBuckets(joinOp, false);
    LOG.info("Try dynamic partitioned hash join with estimated " + numReducers + " reducers");
    int bigTablePos = getMapJoinConversionPos(joinOp, context, numReducers, false, maxSize, false);
    if (bigTablePos >= 0) {
        // Now that we have the big table index, get real numReducers value based on big table RS
        ReduceSinkOperator bigTableParentRS = (ReduceSinkOperator) (joinOp.getParentOperators().get(bigTablePos));
        numReducers = bigTableParentRS.getConf().getNumReducers();
        LOG.debug("Real big table reducers = " + numReducers);
        MapJoinOperator mapJoinOp = convertJoinMapJoin(joinOp, context, bigTablePos, false);
        if (mapJoinOp != null) {
            LOG.info("Selected dynamic partitioned hash join");
            mapJoinOp.getConf().setDynamicPartitionHashJoin(true);
            // Set OpTraits for dynamically partitioned hash join:
            // bucketColNames: Re-use previous joinOp's bucketColNames. Parent operators should be
            // reduce sink, which should have bucket columns based on the join keys.
            // numBuckets: set to number of reducers
            // sortCols: This is an unsorted join - no sort cols
            OpTraits opTraits = new OpTraits(joinOp.getOpTraits().getBucketColNames(), numReducers, null, joinOp.getOpTraits().getNumReduceSinks());
            mapJoinOp.setOpTraits(opTraits);
            mapJoinOp.setStatistics(joinOp.getStatistics());
            // propagate this change till the next RS
            for (Operator<? extends OperatorDesc> childOp : mapJoinOp.getChildOperators()) {
                setAllChildrenTraits(childOp, mapJoinOp.getOpTraits());
            }
            return true;
        }
    }
    return false;
}
Also used : MapJoinOperator(org.apache.hadoop.hive.ql.exec.MapJoinOperator) OpTraits(org.apache.hadoop.hive.ql.plan.OpTraits) ReduceSinkOperator(org.apache.hadoop.hive.ql.exec.ReduceSinkOperator)

Example 7 with OpTraits

use of org.apache.hadoop.hive.ql.plan.OpTraits in project hive by apache.

the class ConvertJoinMapJoin method convertJoinSMBJoin.

// replaces the join operator with a new CommonJoinOperator, removes the
// parent reduce sinks
private void convertJoinSMBJoin(JoinOperator joinOp, OptimizeTezProcContext context, int mapJoinConversionPos, int numBuckets, boolean adjustParentsChildren) throws SemanticException {
    MapJoinDesc mapJoinDesc = null;
    if (adjustParentsChildren) {
        mapJoinDesc = MapJoinProcessor.getMapJoinDesc(context.conf, joinOp, joinOp.getConf().isLeftInputJoin(), joinOp.getConf().getBaseSrc(), joinOp.getConf().getMapAliases(), mapJoinConversionPos, true);
    } else {
        JoinDesc joinDesc = joinOp.getConf();
        // retain the original join desc in the map join.
        mapJoinDesc = new MapJoinDesc(MapJoinProcessor.getKeys(joinOp.getConf().isLeftInputJoin(), joinOp.getConf().getBaseSrc(), joinOp).getRight(), null, joinDesc.getExprs(), null, null, joinDesc.getOutputColumnNames(), mapJoinConversionPos, joinDesc.getConds(), joinDesc.getFilters(), joinDesc.getNoOuterJoin(), null, joinDesc.getMemoryMonitorInfo(), joinDesc.getInMemoryDataSize());
        mapJoinDesc.setNullSafes(joinDesc.getNullSafes());
        mapJoinDesc.setFilterMap(joinDesc.getFilterMap());
        mapJoinDesc.setResidualFilterExprs(joinDesc.getResidualFilterExprs());
        // keep column expression map, explain plan uses this to display
        mapJoinDesc.setColumnExprMap(joinDesc.getColumnExprMap());
        mapJoinDesc.setReversedExprs(joinDesc.getReversedExprs());
        mapJoinDesc.resetOrder();
    }
    CommonMergeJoinOperator mergeJoinOp = (CommonMergeJoinOperator) OperatorFactory.get(joinOp.getCompilationOpContext(), new CommonMergeJoinDesc(numBuckets, mapJoinConversionPos, mapJoinDesc), joinOp.getSchema());
    context.parseContext.getContext().getPlanMapper().link(joinOp, mergeJoinOp);
    int numReduceSinks = joinOp.getOpTraits().getNumReduceSinks();
    OpTraits opTraits = new OpTraits(joinOp.getOpTraits().getBucketColNames(), numBuckets, joinOp.getOpTraits().getSortCols(), numReduceSinks);
    mergeJoinOp.setOpTraits(opTraits);
    mergeJoinOp.getConf().setBucketingVersion(joinOp.getConf().getBucketingVersion());
    preserveOperatorInfos(mergeJoinOp, joinOp, context);
    for (Operator<? extends OperatorDesc> parentOp : joinOp.getParentOperators()) {
        int pos = parentOp.getChildOperators().indexOf(joinOp);
        parentOp.getChildOperators().remove(pos);
        parentOp.getChildOperators().add(pos, mergeJoinOp);
    }
    for (Operator<? extends OperatorDesc> childOp : joinOp.getChildOperators()) {
        int pos = childOp.getParentOperators().indexOf(joinOp);
        childOp.getParentOperators().remove(pos);
        childOp.getParentOperators().add(pos, mergeJoinOp);
    }
    List<Operator<? extends OperatorDesc>> childOperators = mergeJoinOp.getChildOperators();
    List<Operator<? extends OperatorDesc>> parentOperators = mergeJoinOp.getParentOperators();
    childOperators.clear();
    parentOperators.clear();
    childOperators.addAll(joinOp.getChildOperators());
    parentOperators.addAll(joinOp.getParentOperators());
    mergeJoinOp.getConf().setGenJoinKeys(false);
    if (adjustParentsChildren) {
        mergeJoinOp.getConf().setGenJoinKeys(true);
        List<Operator<? extends OperatorDesc>> newParentOpList = new ArrayList<Operator<? extends OperatorDesc>>();
        for (Operator<? extends OperatorDesc> parentOp : mergeJoinOp.getParentOperators()) {
            for (Operator<? extends OperatorDesc> grandParentOp : parentOp.getParentOperators()) {
                grandParentOp.getChildOperators().remove(parentOp);
                grandParentOp.getChildOperators().add(mergeJoinOp);
                newParentOpList.add(grandParentOp);
            }
        }
        mergeJoinOp.getParentOperators().clear();
        mergeJoinOp.getParentOperators().addAll(newParentOpList);
        List<Operator<? extends OperatorDesc>> parentOps = new ArrayList<Operator<? extends OperatorDesc>>(mergeJoinOp.getParentOperators());
        for (Operator<? extends OperatorDesc> parentOp : parentOps) {
            int parentIndex = mergeJoinOp.getParentOperators().indexOf(parentOp);
            if (parentIndex == mapJoinConversionPos) {
                continue;
            }
            // during join processing, not at the time of close.
            if (parentOp instanceof GroupByOperator) {
                GroupByOperator gpbyOp = (GroupByOperator) parentOp;
                if (gpbyOp.getConf().getMode() == GroupByDesc.Mode.HASH) {
                    // No need to change for MERGE_PARTIAL etc.
                    gpbyOp.getConf().setMode(GroupByDesc.Mode.FINAL);
                }
            }
            // insert the dummy store operator here
            DummyStoreOperator dummyStoreOp = new TezDummyStoreOperator(mergeJoinOp.getCompilationOpContext());
            dummyStoreOp.setConf(new DummyStoreDesc());
            dummyStoreOp.setParentOperators(new ArrayList<Operator<? extends OperatorDesc>>());
            dummyStoreOp.setChildOperators(new ArrayList<Operator<? extends OperatorDesc>>());
            dummyStoreOp.getChildOperators().add(mergeJoinOp);
            int index = parentOp.getChildOperators().indexOf(mergeJoinOp);
            parentOp.getChildOperators().remove(index);
            parentOp.getChildOperators().add(index, dummyStoreOp);
            dummyStoreOp.getParentOperators().add(parentOp);
            mergeJoinOp.getParentOperators().remove(parentIndex);
            mergeJoinOp.getParentOperators().add(parentIndex, dummyStoreOp);
        }
    }
    mergeJoinOp.cloneOriginalParentsList(mergeJoinOp.getParentOperators());
}
Also used : CommonMergeJoinOperator(org.apache.hadoop.hive.ql.exec.CommonMergeJoinOperator) ReduceSinkOperator(org.apache.hadoop.hive.ql.exec.ReduceSinkOperator) MapJoinOperator(org.apache.hadoop.hive.ql.exec.MapJoinOperator) GroupByOperator(org.apache.hadoop.hive.ql.exec.GroupByOperator) FileSinkOperator(org.apache.hadoop.hive.ql.exec.FileSinkOperator) SelectOperator(org.apache.hadoop.hive.ql.exec.SelectOperator) MuxOperator(org.apache.hadoop.hive.ql.exec.MuxOperator) CommonJoinOperator(org.apache.hadoop.hive.ql.exec.CommonJoinOperator) TezDummyStoreOperator(org.apache.hadoop.hive.ql.exec.TezDummyStoreOperator) AppMasterEventOperator(org.apache.hadoop.hive.ql.exec.AppMasterEventOperator) JoinOperator(org.apache.hadoop.hive.ql.exec.JoinOperator) TableScanOperator(org.apache.hadoop.hive.ql.exec.TableScanOperator) Operator(org.apache.hadoop.hive.ql.exec.Operator) DummyStoreOperator(org.apache.hadoop.hive.ql.exec.DummyStoreOperator) MapJoinDesc(org.apache.hadoop.hive.ql.plan.MapJoinDesc) GroupByOperator(org.apache.hadoop.hive.ql.exec.GroupByOperator) DummyStoreDesc(org.apache.hadoop.hive.ql.plan.DummyStoreDesc) OpTraits(org.apache.hadoop.hive.ql.plan.OpTraits) TezDummyStoreOperator(org.apache.hadoop.hive.ql.exec.TezDummyStoreOperator) DummyStoreOperator(org.apache.hadoop.hive.ql.exec.DummyStoreOperator) CommonMergeJoinDesc(org.apache.hadoop.hive.ql.plan.CommonMergeJoinDesc) ArrayList(java.util.ArrayList) TezDummyStoreOperator(org.apache.hadoop.hive.ql.exec.TezDummyStoreOperator) MapJoinDesc(org.apache.hadoop.hive.ql.plan.MapJoinDesc) JoinDesc(org.apache.hadoop.hive.ql.plan.JoinDesc) CommonMergeJoinDesc(org.apache.hadoop.hive.ql.plan.CommonMergeJoinDesc) OperatorDesc(org.apache.hadoop.hive.ql.plan.OperatorDesc) CommonMergeJoinOperator(org.apache.hadoop.hive.ql.exec.CommonMergeJoinOperator)

Example 8 with OpTraits

use of org.apache.hadoop.hive.ql.plan.OpTraits in project hive by apache.

the class ConvertJoinMapJoin method convertJoinDynamicPartitionedHashJoin.

private boolean convertJoinDynamicPartitionedHashJoin(JoinOperator joinOp, OptimizeTezProcContext context) throws SemanticException {
    // Attempt dynamic partitioned hash join
    // Since we don't have big table index yet, must start with estimate of numReducers
    int numReducers = estimateNumBuckets(joinOp, false);
    LOG.info("Try dynamic partitioned hash join with estimated " + numReducers + " reducers");
    MapJoinConversion mapJoinConversion = getMapJoinConversion(joinOp, context, numReducers, false, maxJoinMemory, false);
    if (mapJoinConversion != null) {
        if (mapJoinConversion.getIsFullOuterJoin() && !mapJoinConversion.getIsFullOuterEnabledForDynamicPartitionHashJoin()) {
            return false;
        }
        final int bigTablePos = mapJoinConversion.getBigTablePos();
        // Now that we have the big table index, get real numReducers value based on big table RS
        ReduceSinkOperator bigTableParentRS = (ReduceSinkOperator) (joinOp.getParentOperators().get(bigTablePos));
        numReducers = bigTableParentRS.getConf().getNumReducers();
        LOG.debug("Real big table reducers = " + numReducers);
        MapJoinOperator mapJoinOp = convertJoinMapJoin(joinOp, context, mapJoinConversion, false);
        if (mapJoinOp != null) {
            LOG.info("Selected dynamic partitioned hash join");
            MapJoinDesc mapJoinDesc = mapJoinOp.getConf();
            mapJoinDesc.setDynamicPartitionHashJoin(true);
            if (mapJoinConversion.getIsFullOuterJoin()) {
                FullOuterMapJoinOptimization.removeFilterMap(mapJoinDesc);
            }
            // Set OpTraits for dynamically partitioned hash join:
            // bucketColNames: Re-use previous joinOp's bucketColNames. Parent operators should be
            // reduce sink, which should have bucket columns based on the join keys.
            // numBuckets: set to number of reducers
            // sortCols: This is an unsorted join - no sort cols
            OpTraits opTraits = new OpTraits(joinOp.getOpTraits().getBucketColNames(), numReducers, null, joinOp.getOpTraits().getNumReduceSinks());
            mapJoinOp.setOpTraits(opTraits);
            preserveOperatorInfos(mapJoinOp, joinOp, context);
            // propagate this change till the next RS
            for (Operator<? extends OperatorDesc> childOp : mapJoinOp.getChildOperators()) {
                setAllChildrenTraits(childOp, mapJoinOp.getOpTraits());
            }
            return true;
        }
    }
    return false;
}
Also used : MapJoinOperator(org.apache.hadoop.hive.ql.exec.MapJoinOperator) MapJoinDesc(org.apache.hadoop.hive.ql.plan.MapJoinDesc) OpTraits(org.apache.hadoop.hive.ql.plan.OpTraits) ReduceSinkOperator(org.apache.hadoop.hive.ql.exec.ReduceSinkOperator)

Example 9 with OpTraits

use of org.apache.hadoop.hive.ql.plan.OpTraits in project hive by apache.

the class ReduceSinkMapJoinProc method processReduceSinkToHashJoin.

public static Object processReduceSinkToHashJoin(ReduceSinkOperator parentRS, MapJoinOperator mapJoinOp, GenTezProcContext context) throws SemanticException {
    // remove the tag for in-memory side of mapjoin
    parentRS.getConf().setSkipTag(true);
    parentRS.setSkipTag(true);
    // Mark this small table as being processed
    if (mapJoinOp.getConf().isDynamicPartitionHashJoin()) {
        context.mapJoinToUnprocessedSmallTableReduceSinks.get(mapJoinOp).remove(parentRS);
    }
    List<BaseWork> mapJoinWork = null;
    /*
     *  if there was a pre-existing work generated for the big-table mapjoin side,
     *  we need to hook the work generated for the RS (associated with the RS-MJ pattern)
     *  with the pre-existing work.
     *
     *  Otherwise, we need to associate that the mapjoin op
     *  to be linked to the RS work (associated with the RS-MJ pattern).
     *
     */
    mapJoinWork = context.mapJoinWorkMap.get(mapJoinOp);
    BaseWork parentWork = getMapJoinParentWork(context, parentRS);
    // set the link between mapjoin and parent vertex
    int pos = context.mapJoinParentMap.get(mapJoinOp).indexOf(parentRS);
    if (pos == -1) {
        throw new SemanticException("Cannot find position of parent in mapjoin");
    }
    MapJoinDesc joinConf = mapJoinOp.getConf();
    long keyCount = Long.MAX_VALUE, rowCount = Long.MAX_VALUE, bucketCount = 1;
    long tableSize = Long.MAX_VALUE;
    Statistics stats = parentRS.getStatistics();
    if (stats != null) {
        keyCount = rowCount = stats.getNumRows();
        if (keyCount <= 0) {
            keyCount = rowCount = Long.MAX_VALUE;
        }
        tableSize = stats.getDataSize();
        List<String> keyCols = parentRS.getConf().getOutputKeyColumnNames();
        if (keyCols != null && !keyCols.isEmpty()) {
            // See if we can arrive at a smaller number using distinct stats from key columns.
            long maxKeyCount = 1;
            String prefix = Utilities.ReduceField.KEY.toString();
            for (String keyCol : keyCols) {
                ExprNodeDesc realCol = parentRS.getColumnExprMap().get(prefix + "." + keyCol);
                ColStatistics cs = StatsUtils.getColStatisticsFromExpression(context.conf, stats, realCol);
                if (cs == null || cs.getCountDistint() <= 0) {
                    maxKeyCount = Long.MAX_VALUE;
                    break;
                }
                maxKeyCount *= cs.getCountDistint();
                if (maxKeyCount >= keyCount) {
                    break;
                }
            }
            keyCount = Math.min(maxKeyCount, keyCount);
        }
        if (joinConf.isBucketMapJoin()) {
            OpTraits opTraits = mapJoinOp.getOpTraits();
            bucketCount = (opTraits == null) ? -1 : opTraits.getNumBuckets();
            if (bucketCount > 0) {
                // We cannot obtain a better estimate without CustomPartitionVertex providing it
                // to us somehow; in which case using statistics would be completely unnecessary.
                keyCount /= bucketCount;
                tableSize /= bucketCount;
            }
        } else if (joinConf.isDynamicPartitionHashJoin()) {
            // For dynamic partitioned hash join, assuming table is split evenly among the reduce tasks.
            bucketCount = parentRS.getConf().getNumReducers();
            keyCount /= bucketCount;
            tableSize /= bucketCount;
        }
    }
    if (keyCount == 0) {
        keyCount = 1;
    }
    if (tableSize == 0) {
        tableSize = 1;
    }
    LOG.info("Mapjoin " + mapJoinOp + "(bucket map join = " + joinConf.isBucketMapJoin() + "), pos: " + pos + " --> " + parentWork.getName() + " (" + keyCount + " keys estimated from " + rowCount + " rows, " + bucketCount + " buckets)");
    joinConf.getParentToInput().put(pos, parentWork.getName());
    if (keyCount != Long.MAX_VALUE) {
        joinConf.getParentKeyCounts().put(pos, keyCount);
    }
    joinConf.getParentDataSizes().put(pos, tableSize);
    int numBuckets = -1;
    EdgeType edgeType = EdgeType.BROADCAST_EDGE;
    if (joinConf.isBucketMapJoin()) {
        numBuckets = (Integer) joinConf.getBigTableBucketNumMapping().values().toArray()[0];
        /*
       * Here, we can be in one of 4 states.
       *
       * 1. If map join work is null implies that we have not yet traversed the big table side. We
       * just need to see if we can find a reduce sink operator in the big table side. This would
       * imply a reduce side operation.
       *
       * 2. If we don't find a reducesink in 1 it has to be the case that it is a map side operation.
       *
       * 3. If we have already created a work item for the big table side, we need to see if we can
       * find a table scan operator in the big table side. This would imply a map side operation.
       *
       * 4. If we don't find a table scan operator, it has to be a reduce side operation.
       */
        if (mapJoinWork == null) {
            Operator<?> rootOp = OperatorUtils.findSingleOperatorUpstreamJoinAccounted(mapJoinOp.getParentOperators().get(joinConf.getPosBigTable()), ReduceSinkOperator.class);
            if (rootOp == null) {
                // likely we found a table scan operator
                edgeType = EdgeType.CUSTOM_EDGE;
            } else {
                // we have found a reduce sink
                edgeType = EdgeType.CUSTOM_SIMPLE_EDGE;
            }
        } else {
            Operator<?> rootOp = OperatorUtils.findSingleOperatorUpstreamJoinAccounted(mapJoinOp.getParentOperators().get(joinConf.getPosBigTable()), TableScanOperator.class);
            if (rootOp != null) {
                // likely we found a table scan operator
                edgeType = EdgeType.CUSTOM_EDGE;
            } else {
                // we have found a reduce sink
                edgeType = EdgeType.CUSTOM_SIMPLE_EDGE;
            }
        }
    } else if (mapJoinOp.getConf().isDynamicPartitionHashJoin()) {
        if (parentRS.getConf().isForwarding()) {
            edgeType = EdgeType.ONE_TO_ONE_EDGE;
        } else {
            edgeType = EdgeType.CUSTOM_SIMPLE_EDGE;
        }
    }
    if (edgeType == EdgeType.CUSTOM_EDGE) {
        // disable auto parallelism for bucket map joins
        parentRS.getConf().setReducerTraits(EnumSet.of(FIXED));
    }
    TezEdgeProperty edgeProp = new TezEdgeProperty(null, edgeType, numBuckets);
    if (mapJoinWork != null) {
        for (BaseWork myWork : mapJoinWork) {
            // link the work with the work associated with the reduce sink that triggered this rule
            TezWork tezWork = context.currentTask.getWork();
            LOG.debug("connecting " + parentWork.getName() + " with " + myWork.getName());
            tezWork.connect(parentWork, myWork, edgeProp);
            if (edgeType == EdgeType.CUSTOM_EDGE) {
                tezWork.setVertexType(myWork, VertexType.INITIALIZED_EDGES);
            }
            ReduceSinkOperator r = null;
            if (context.connectedReduceSinks.contains(parentRS)) {
                LOG.debug("Cloning reduce sink " + parentRS + " for multi-child broadcast edge");
                // we've already set this one up. Need to clone for the next work.
                r = (ReduceSinkOperator) OperatorFactory.getAndMakeChild(parentRS.getCompilationOpContext(), (ReduceSinkDesc) parentRS.getConf().clone(), new RowSchema(parentRS.getSchema()), parentRS.getParentOperators());
                context.clonedReduceSinks.add(r);
            } else {
                r = parentRS;
            }
            // remember the output name of the reduce sink
            r.getConf().setOutputName(myWork.getName());
            context.connectedReduceSinks.add(r);
        }
    }
    // remember in case we need to connect additional work later
    Map<BaseWork, TezEdgeProperty> linkWorkMap = null;
    if (context.linkOpWithWorkMap.containsKey(mapJoinOp)) {
        linkWorkMap = context.linkOpWithWorkMap.get(mapJoinOp);
    } else {
        linkWorkMap = new HashMap<BaseWork, TezEdgeProperty>();
    }
    linkWorkMap.put(parentWork, edgeProp);
    context.linkOpWithWorkMap.put(mapJoinOp, linkWorkMap);
    List<ReduceSinkOperator> reduceSinks = context.linkWorkWithReduceSinkMap.get(parentWork);
    if (reduceSinks == null) {
        reduceSinks = new ArrayList<ReduceSinkOperator>();
    }
    reduceSinks.add(parentRS);
    context.linkWorkWithReduceSinkMap.put(parentWork, reduceSinks);
    // create the dummy operators
    List<Operator<?>> dummyOperators = new ArrayList<Operator<?>>();
    // create an new operator: HashTableDummyOperator, which share the table desc
    HashTableDummyDesc desc = new HashTableDummyDesc();
    HashTableDummyOperator dummyOp = (HashTableDummyOperator) OperatorFactory.get(parentRS.getCompilationOpContext(), desc);
    TableDesc tbl;
    // need to create the correct table descriptor for key/value
    RowSchema rowSchema = parentRS.getParentOperators().get(0).getSchema();
    tbl = PlanUtils.getReduceValueTableDesc(PlanUtils.getFieldSchemasFromRowSchema(rowSchema, ""));
    dummyOp.getConf().setTbl(tbl);
    Map<Byte, List<ExprNodeDesc>> keyExprMap = mapJoinOp.getConf().getKeys();
    List<ExprNodeDesc> keyCols = keyExprMap.get(Byte.valueOf((byte) 0));
    StringBuilder keyOrder = new StringBuilder();
    StringBuilder keyNullOrder = new StringBuilder();
    for (ExprNodeDesc k : keyCols) {
        keyOrder.append("+");
        keyNullOrder.append(NullOrdering.defaultNullOrder(context.conf).getSign());
    }
    TableDesc keyTableDesc = PlanUtils.getReduceKeyTableDesc(PlanUtils.getFieldSchemasFromColumnList(keyCols, "mapjoinkey"), keyOrder.toString(), keyNullOrder.toString());
    mapJoinOp.getConf().setKeyTableDesc(keyTableDesc);
    // let the dummy op be the parent of mapjoin op
    mapJoinOp.replaceParent(parentRS, dummyOp);
    List<Operator<? extends OperatorDesc>> dummyChildren = new ArrayList<Operator<? extends OperatorDesc>>();
    dummyChildren.add(mapJoinOp);
    dummyOp.setChildOperators(dummyChildren);
    dummyOperators.add(dummyOp);
    // cut the operator tree so as to not retain connections from the parent RS downstream
    List<Operator<? extends OperatorDesc>> childOperators = parentRS.getChildOperators();
    int childIndex = childOperators.indexOf(mapJoinOp);
    childOperators.remove(childIndex);
    // at task startup
    if (mapJoinWork != null) {
        for (BaseWork myWork : mapJoinWork) {
            LOG.debug("adding dummy op to work " + myWork.getName() + " from MJ work: " + dummyOp);
            myWork.addDummyOp(dummyOp);
        }
    }
    if (context.linkChildOpWithDummyOp.containsKey(mapJoinOp)) {
        for (Operator<?> op : context.linkChildOpWithDummyOp.get(mapJoinOp)) {
            dummyOperators.add(op);
        }
    }
    context.linkChildOpWithDummyOp.put(mapJoinOp, dummyOperators);
    return true;
}
Also used : ReduceSinkOperator(org.apache.hadoop.hive.ql.exec.ReduceSinkOperator) MapJoinOperator(org.apache.hadoop.hive.ql.exec.MapJoinOperator) TableScanOperator(org.apache.hadoop.hive.ql.exec.TableScanOperator) Operator(org.apache.hadoop.hive.ql.exec.Operator) HashTableDummyOperator(org.apache.hadoop.hive.ql.exec.HashTableDummyOperator) OpTraits(org.apache.hadoop.hive.ql.plan.OpTraits) TezEdgeProperty(org.apache.hadoop.hive.ql.plan.TezEdgeProperty) ArrayList(java.util.ArrayList) ColStatistics(org.apache.hadoop.hive.ql.plan.ColStatistics) ArrayList(java.util.ArrayList) List(java.util.List) ExprNodeDesc(org.apache.hadoop.hive.ql.plan.ExprNodeDesc) BaseWork(org.apache.hadoop.hive.ql.plan.BaseWork) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) HashTableDummyDesc(org.apache.hadoop.hive.ql.plan.HashTableDummyDesc) RowSchema(org.apache.hadoop.hive.ql.exec.RowSchema) MapJoinDesc(org.apache.hadoop.hive.ql.plan.MapJoinDesc) HashTableDummyOperator(org.apache.hadoop.hive.ql.exec.HashTableDummyOperator) Statistics(org.apache.hadoop.hive.ql.plan.Statistics) ColStatistics(org.apache.hadoop.hive.ql.plan.ColStatistics) EdgeType(org.apache.hadoop.hive.ql.plan.TezEdgeProperty.EdgeType) ReduceSinkOperator(org.apache.hadoop.hive.ql.exec.ReduceSinkOperator) TableDesc(org.apache.hadoop.hive.ql.plan.TableDesc) OperatorDesc(org.apache.hadoop.hive.ql.plan.OperatorDesc) TezWork(org.apache.hadoop.hive.ql.plan.TezWork)

Aggregations

MapJoinOperator (org.apache.hadoop.hive.ql.exec.MapJoinOperator)9 OpTraits (org.apache.hadoop.hive.ql.plan.OpTraits)9 ReduceSinkOperator (org.apache.hadoop.hive.ql.exec.ReduceSinkOperator)7 ArrayList (java.util.ArrayList)5 MapJoinDesc (org.apache.hadoop.hive.ql.plan.MapJoinDesc)5 List (java.util.List)4 JoinOperator (org.apache.hadoop.hive.ql.exec.JoinOperator)4 ExprNodeDesc (org.apache.hadoop.hive.ql.plan.ExprNodeDesc)4 CommonJoinOperator (org.apache.hadoop.hive.ql.exec.CommonJoinOperator)3 CommonMergeJoinOperator (org.apache.hadoop.hive.ql.exec.CommonMergeJoinOperator)3 MemoryMonitorInfo (org.apache.hadoop.hive.ql.exec.MemoryMonitorInfo)3 Operator (org.apache.hadoop.hive.ql.exec.Operator)3 TableScanOperator (org.apache.hadoop.hive.ql.exec.TableScanOperator)3 HashMap (java.util.HashMap)2 AppMasterEventOperator (org.apache.hadoop.hive.ql.exec.AppMasterEventOperator)2 DummyStoreOperator (org.apache.hadoop.hive.ql.exec.DummyStoreOperator)2 FileSinkOperator (org.apache.hadoop.hive.ql.exec.FileSinkOperator)2 GroupByOperator (org.apache.hadoop.hive.ql.exec.GroupByOperator)2 MuxOperator (org.apache.hadoop.hive.ql.exec.MuxOperator)2 SelectOperator (org.apache.hadoop.hive.ql.exec.SelectOperator)2