use of org.apache.hadoop.hive.ql.exec.ReduceSinkOperator in project hive by apache.
the class GlobalLimitOptimizer method checkQbpForGlobalLimit.
/**
* Check the limit number in all sub queries
*
* @return if there is one and only one limit for all subqueries, return the limit
* if there is no limit, return 0
* otherwise, return null
*/
private static LimitOperator checkQbpForGlobalLimit(TableScanOperator ts) {
Set<Class<? extends Operator<?>>> searchedClasses = new ImmutableSet.Builder<Class<? extends Operator<?>>>().add(ReduceSinkOperator.class).add(GroupByOperator.class).add(FilterOperator.class).add(LimitOperator.class).build();
Multimap<Class<? extends Operator<?>>, Operator<?>> ops = OperatorUtils.classifyOperators(ts, searchedClasses);
// existsOrdering AND existsPartitioning should be false.
for (Operator<?> op : ops.get(ReduceSinkOperator.class)) {
ReduceSinkDesc reduceSinkConf = ((ReduceSinkOperator) op).getConf();
if (reduceSinkConf.isOrdering() || reduceSinkConf.isPartitioning()) {
return null;
}
}
// - There cannot exist any (distinct) aggregate.
for (Operator<?> op : ops.get(GroupByOperator.class)) {
GroupByDesc groupByConf = ((GroupByOperator) op).getConf();
if (groupByConf.isAggregate() || groupByConf.isDistinct()) {
return null;
}
}
// - There cannot exist any sampling predicate.
for (Operator<?> op : ops.get(FilterOperator.class)) {
FilterDesc filterConf = ((FilterOperator) op).getConf();
if (filterConf.getIsSamplingPred()) {
return null;
}
}
// If there is one and only one limit starting at op, return the limit
// If there is no limit, return 0
// Otherwise, return null
Collection<Operator<?>> limitOps = ops.get(LimitOperator.class);
if (limitOps.size() == 1) {
return (LimitOperator) limitOps.iterator().next();
} else if (limitOps.size() == 0) {
return null;
}
return null;
}
use of org.apache.hadoop.hive.ql.exec.ReduceSinkOperator in project hive by apache.
the class MapJoinProcessor method getMapJoinDesc.
public static MapJoinDesc getMapJoinDesc(HiveConf hconf, JoinOperator op, boolean leftInputJoin, String[] baseSrc, List<String> mapAliases, int mapJoinPos, boolean noCheckOuterJoin, boolean adjustParentsChildren) throws SemanticException {
JoinDesc desc = op.getConf();
JoinCondDesc[] condns = desc.getConds();
Byte[] tagOrder = desc.getTagOrder();
// outer join cannot be performed on a table which is being cached
if (!noCheckOuterJoin) {
if (checkMapJoin(mapJoinPos, condns) < 0) {
throw new SemanticException(ErrorMsg.NO_OUTER_MAPJOIN.getMsg());
}
}
Map<String, ExprNodeDesc> colExprMap = op.getColumnExprMap();
List<ColumnInfo> schema = new ArrayList<ColumnInfo>(op.getSchema().getSignature());
Map<Byte, List<ExprNodeDesc>> valueExprs = op.getConf().getExprs();
Map<Byte, List<ExprNodeDesc>> newValueExprs = new HashMap<Byte, List<ExprNodeDesc>>();
ObjectPair<List<ReduceSinkOperator>, Map<Byte, List<ExprNodeDesc>>> pair = getKeys(leftInputJoin, baseSrc, op);
List<ReduceSinkOperator> oldReduceSinkParentOps = pair.getFirst();
for (Map.Entry<Byte, List<ExprNodeDesc>> entry : valueExprs.entrySet()) {
byte tag = entry.getKey();
Operator<?> terminal = oldReduceSinkParentOps.get(tag);
List<ExprNodeDesc> values = entry.getValue();
List<ExprNodeDesc> newValues = ExprNodeDescUtils.backtrack(values, op, terminal);
newValueExprs.put(tag, newValues);
for (int i = 0; i < schema.size(); i++) {
ColumnInfo column = schema.get(i);
if (column == null) {
continue;
}
ExprNodeDesc expr = colExprMap.get(column.getInternalName());
int index = ExprNodeDescUtils.indexOf(expr, values);
if (index >= 0) {
schema.set(i, null);
if (adjustParentsChildren) {
// Since we remove reduce sink parents, replace original expressions
colExprMap.put(column.getInternalName(), newValues.get(index));
}
}
}
}
// rewrite value index for mapjoin
Map<Byte, int[]> valueIndices = new HashMap<Byte, int[]>();
// get the join keys from old parent ReduceSink operators
Map<Byte, List<ExprNodeDesc>> keyExprMap = pair.getSecond();
if (!adjustParentsChildren) {
// Since we did not remove reduce sink parents, keep the original value expressions
newValueExprs = valueExprs;
// Join key exprs are represented in terms of the original table columns,
// we need to convert these to the generated column names we can see in the Join operator
Map<Byte, List<ExprNodeDesc>> newKeyExprMap = new HashMap<Byte, List<ExprNodeDesc>>();
for (Map.Entry<Byte, List<ExprNodeDesc>> mapEntry : keyExprMap.entrySet()) {
Byte pos = mapEntry.getKey();
ReduceSinkOperator rsParent = oldReduceSinkParentOps.get(pos.byteValue());
List<ExprNodeDesc> keyExprList = ExprNodeDescUtils.resolveJoinKeysAsRSColumns(mapEntry.getValue(), rsParent);
if (keyExprList == null) {
throw new SemanticException("Error resolving join keys");
}
newKeyExprMap.put(pos, keyExprList);
}
keyExprMap = newKeyExprMap;
}
// construct valueTableDescs and valueFilteredTableDescs
List<TableDesc> valueTableDescs = new ArrayList<TableDesc>();
List<TableDesc> valueFilteredTableDescs = new ArrayList<TableDesc>();
int[][] filterMap = desc.getFilterMap();
for (byte pos = 0; pos < op.getParentOperators().size(); pos++) {
List<ExprNodeDesc> valueCols = newValueExprs.get(pos);
if (pos != mapJoinPos) {
// remove values in key exprs for value table schema
// value expression for hashsink will be modified in
// LocalMapJoinProcessor
int[] valueIndex = new int[valueCols.size()];
List<ExprNodeDesc> valueColsInValueExpr = new ArrayList<ExprNodeDesc>();
for (int i = 0; i < valueIndex.length; i++) {
ExprNodeDesc expr = valueCols.get(i);
int kindex = ExprNodeDescUtils.indexOf(expr, keyExprMap.get(pos));
if (kindex >= 0) {
valueIndex[i] = kindex;
} else {
valueIndex[i] = -valueColsInValueExpr.size() - 1;
valueColsInValueExpr.add(expr);
}
}
if (needValueIndex(valueIndex)) {
valueIndices.put(pos, valueIndex);
}
valueCols = valueColsInValueExpr;
}
// deep copy expr node desc
List<ExprNodeDesc> valueFilteredCols = ExprNodeDescUtils.clone(valueCols);
if (filterMap != null && filterMap[pos] != null && pos != mapJoinPos) {
ExprNodeColumnDesc isFilterDesc = new ExprNodeColumnDesc(TypeInfoFactory.getPrimitiveTypeInfo(serdeConstants.SMALLINT_TYPE_NAME), "filter", "filter", false);
valueFilteredCols.add(isFilterDesc);
}
TableDesc valueTableDesc = PlanUtils.getMapJoinValueTableDesc(PlanUtils.getFieldSchemasFromColumnList(valueCols, "mapjoinvalue"));
TableDesc valueFilteredTableDesc = PlanUtils.getMapJoinValueTableDesc(PlanUtils.getFieldSchemasFromColumnList(valueFilteredCols, "mapjoinvalue"));
valueTableDescs.add(valueTableDesc);
valueFilteredTableDescs.add(valueFilteredTableDesc);
}
Map<Byte, List<ExprNodeDesc>> filters = desc.getFilters();
Map<Byte, List<ExprNodeDesc>> newFilters = new HashMap<Byte, List<ExprNodeDesc>>();
for (Map.Entry<Byte, List<ExprNodeDesc>> entry : filters.entrySet()) {
byte srcTag = entry.getKey();
List<ExprNodeDesc> filter = entry.getValue();
Operator<?> terminal = oldReduceSinkParentOps.get(srcTag);
newFilters.put(srcTag, ExprNodeDescUtils.backtrack(filter, op, terminal));
}
desc.setFilters(filters = newFilters);
// create dumpfile prefix needed to create descriptor
String dumpFilePrefix = "";
if (mapAliases != null) {
for (String mapAlias : mapAliases) {
dumpFilePrefix = dumpFilePrefix + mapAlias;
}
dumpFilePrefix = dumpFilePrefix + "-" + PlanUtils.getCountForMapJoinDumpFilePrefix();
} else {
dumpFilePrefix = "mapfile" + PlanUtils.getCountForMapJoinDumpFilePrefix();
}
List<ExprNodeDesc> keyCols = keyExprMap.get((byte) mapJoinPos);
List<String> outputColumnNames = op.getConf().getOutputColumnNames();
TableDesc keyTableDesc = PlanUtils.getMapJoinKeyTableDesc(hconf, PlanUtils.getFieldSchemasFromColumnList(keyCols, MAPJOINKEY_FIELDPREFIX));
JoinCondDesc[] joinCondns = op.getConf().getConds();
MapJoinDesc mapJoinDescriptor = new MapJoinDesc(keyExprMap, keyTableDesc, newValueExprs, valueTableDescs, valueFilteredTableDescs, outputColumnNames, mapJoinPos, joinCondns, filters, op.getConf().getNoOuterJoin(), dumpFilePrefix, op.getConf().getMemoryMonitorInfo(), op.getConf().getInMemoryDataSize());
mapJoinDescriptor.setStatistics(op.getConf().getStatistics());
mapJoinDescriptor.setTagOrder(tagOrder);
mapJoinDescriptor.setNullSafes(desc.getNullSafes());
mapJoinDescriptor.setFilterMap(desc.getFilterMap());
mapJoinDescriptor.setResidualFilterExprs(desc.getResidualFilterExprs());
mapJoinDescriptor.setColumnExprMap(colExprMap);
if (!valueIndices.isEmpty()) {
mapJoinDescriptor.setValueIndices(valueIndices);
}
return mapJoinDescriptor;
}
use of org.apache.hadoop.hive.ql.exec.ReduceSinkOperator in project hive by apache.
the class ReduceSinkMapJoinProc method processReduceSinkToHashJoin.
public static Object processReduceSinkToHashJoin(ReduceSinkOperator parentRS, MapJoinOperator mapJoinOp, GenTezProcContext context) throws SemanticException {
// remove the tag for in-memory side of mapjoin
parentRS.getConf().setSkipTag(true);
parentRS.setSkipTag(true);
// Mark this small table as being processed
if (mapJoinOp.getConf().isDynamicPartitionHashJoin()) {
context.mapJoinToUnprocessedSmallTableReduceSinks.get(mapJoinOp).remove(parentRS);
}
List<BaseWork> mapJoinWork = null;
/*
* if there was a pre-existing work generated for the big-table mapjoin side,
* we need to hook the work generated for the RS (associated with the RS-MJ pattern)
* with the pre-existing work.
*
* Otherwise, we need to associate that the mapjoin op
* to be linked to the RS work (associated with the RS-MJ pattern).
*
*/
mapJoinWork = context.mapJoinWorkMap.get(mapJoinOp);
BaseWork parentWork = getMapJoinParentWork(context, parentRS);
// set the link between mapjoin and parent vertex
int pos = context.mapJoinParentMap.get(mapJoinOp).indexOf(parentRS);
if (pos == -1) {
throw new SemanticException("Cannot find position of parent in mapjoin");
}
MapJoinDesc joinConf = mapJoinOp.getConf();
long keyCount = Long.MAX_VALUE, rowCount = Long.MAX_VALUE, bucketCount = 1;
long tableSize = Long.MAX_VALUE;
Statistics stats = parentRS.getStatistics();
if (stats != null) {
keyCount = rowCount = stats.getNumRows();
if (keyCount <= 0) {
keyCount = rowCount = Long.MAX_VALUE;
}
tableSize = stats.getDataSize();
ArrayList<String> keyCols = parentRS.getConf().getOutputKeyColumnNames();
if (keyCols != null && !keyCols.isEmpty()) {
// See if we can arrive at a smaller number using distinct stats from key columns.
long maxKeyCount = 1;
String prefix = Utilities.ReduceField.KEY.toString();
for (String keyCol : keyCols) {
ExprNodeDesc realCol = parentRS.getColumnExprMap().get(prefix + "." + keyCol);
ColStatistics cs = StatsUtils.getColStatisticsFromExpression(context.conf, stats, realCol);
if (cs == null || cs.getCountDistint() <= 0) {
maxKeyCount = Long.MAX_VALUE;
break;
}
maxKeyCount *= cs.getCountDistint();
if (maxKeyCount >= keyCount) {
break;
}
}
keyCount = Math.min(maxKeyCount, keyCount);
}
if (joinConf.isBucketMapJoin()) {
OpTraits opTraits = mapJoinOp.getOpTraits();
bucketCount = (opTraits == null) ? -1 : opTraits.getNumBuckets();
if (bucketCount > 0) {
// We cannot obtain a better estimate without CustomPartitionVertex providing it
// to us somehow; in which case using statistics would be completely unnecessary.
keyCount /= bucketCount;
tableSize /= bucketCount;
}
} else if (joinConf.isDynamicPartitionHashJoin()) {
// For dynamic partitioned hash join, assuming table is split evenly among the reduce tasks.
bucketCount = parentRS.getConf().getNumReducers();
keyCount /= bucketCount;
tableSize /= bucketCount;
}
}
if (keyCount == 0) {
keyCount = 1;
}
if (tableSize == 0) {
tableSize = 1;
}
LOG.info("Mapjoin " + mapJoinOp + "(bucket map join = " + joinConf.isBucketMapJoin() + "), pos: " + pos + " --> " + parentWork.getName() + " (" + keyCount + " keys estimated from " + rowCount + " rows, " + bucketCount + " buckets)");
joinConf.getParentToInput().put(pos, parentWork.getName());
if (keyCount != Long.MAX_VALUE) {
joinConf.getParentKeyCounts().put(pos, keyCount);
}
joinConf.getParentDataSizes().put(pos, tableSize);
int numBuckets = -1;
EdgeType edgeType = EdgeType.BROADCAST_EDGE;
if (joinConf.isBucketMapJoin()) {
numBuckets = (Integer) joinConf.getBigTableBucketNumMapping().values().toArray()[0];
/*
* Here, we can be in one of 4 states.
*
* 1. If map join work is null implies that we have not yet traversed the big table side. We
* just need to see if we can find a reduce sink operator in the big table side. This would
* imply a reduce side operation.
*
* 2. If we don't find a reducesink in 1 it has to be the case that it is a map side operation.
*
* 3. If we have already created a work item for the big table side, we need to see if we can
* find a table scan operator in the big table side. This would imply a map side operation.
*
* 4. If we don't find a table scan operator, it has to be a reduce side operation.
*/
if (mapJoinWork == null) {
Operator<?> rootOp = OperatorUtils.findSingleOperatorUpstreamJoinAccounted(mapJoinOp.getParentOperators().get(joinConf.getPosBigTable()), ReduceSinkOperator.class);
if (rootOp == null) {
// likely we found a table scan operator
edgeType = EdgeType.CUSTOM_EDGE;
} else {
// we have found a reduce sink
edgeType = EdgeType.CUSTOM_SIMPLE_EDGE;
}
} else {
Operator<?> rootOp = OperatorUtils.findSingleOperatorUpstreamJoinAccounted(mapJoinOp.getParentOperators().get(joinConf.getPosBigTable()), TableScanOperator.class);
if (rootOp != null) {
// likely we found a table scan operator
edgeType = EdgeType.CUSTOM_EDGE;
} else {
// we have found a reduce sink
edgeType = EdgeType.CUSTOM_SIMPLE_EDGE;
}
}
} else if (mapJoinOp.getConf().isDynamicPartitionHashJoin()) {
if (parentRS.getConf().isForwarding()) {
edgeType = EdgeType.ONE_TO_ONE_EDGE;
} else {
edgeType = EdgeType.CUSTOM_SIMPLE_EDGE;
}
}
if (edgeType == EdgeType.CUSTOM_EDGE) {
// disable auto parallelism for bucket map joins
parentRS.getConf().setReducerTraits(EnumSet.of(FIXED));
}
TezEdgeProperty edgeProp = new TezEdgeProperty(null, edgeType, numBuckets);
if (mapJoinWork != null) {
for (BaseWork myWork : mapJoinWork) {
// link the work with the work associated with the reduce sink that triggered this rule
TezWork tezWork = context.currentTask.getWork();
LOG.debug("connecting " + parentWork.getName() + " with " + myWork.getName());
tezWork.connect(parentWork, myWork, edgeProp);
if (edgeType == EdgeType.CUSTOM_EDGE) {
tezWork.setVertexType(myWork, VertexType.INITIALIZED_EDGES);
}
ReduceSinkOperator r = null;
if (context.connectedReduceSinks.contains(parentRS)) {
LOG.debug("Cloning reduce sink " + parentRS + " for multi-child broadcast edge");
// we've already set this one up. Need to clone for the next work.
r = (ReduceSinkOperator) OperatorFactory.getAndMakeChild(parentRS.getCompilationOpContext(), (ReduceSinkDesc) parentRS.getConf().clone(), new RowSchema(parentRS.getSchema()), parentRS.getParentOperators());
context.clonedReduceSinks.add(r);
} else {
r = parentRS;
}
// remember the output name of the reduce sink
r.getConf().setOutputName(myWork.getName());
context.connectedReduceSinks.add(r);
}
}
// remember in case we need to connect additional work later
Map<BaseWork, TezEdgeProperty> linkWorkMap = null;
if (context.linkOpWithWorkMap.containsKey(mapJoinOp)) {
linkWorkMap = context.linkOpWithWorkMap.get(mapJoinOp);
} else {
linkWorkMap = new HashMap<BaseWork, TezEdgeProperty>();
}
linkWorkMap.put(parentWork, edgeProp);
context.linkOpWithWorkMap.put(mapJoinOp, linkWorkMap);
List<ReduceSinkOperator> reduceSinks = context.linkWorkWithReduceSinkMap.get(parentWork);
if (reduceSinks == null) {
reduceSinks = new ArrayList<ReduceSinkOperator>();
}
reduceSinks.add(parentRS);
context.linkWorkWithReduceSinkMap.put(parentWork, reduceSinks);
// create the dummy operators
List<Operator<?>> dummyOperators = new ArrayList<Operator<?>>();
// create an new operator: HashTableDummyOperator, which share the table desc
HashTableDummyDesc desc = new HashTableDummyDesc();
@SuppressWarnings("unchecked") HashTableDummyOperator dummyOp = (HashTableDummyOperator) OperatorFactory.get(parentRS.getCompilationOpContext(), desc);
TableDesc tbl;
// need to create the correct table descriptor for key/value
RowSchema rowSchema = parentRS.getParentOperators().get(0).getSchema();
tbl = PlanUtils.getReduceValueTableDesc(PlanUtils.getFieldSchemasFromRowSchema(rowSchema, ""));
dummyOp.getConf().setTbl(tbl);
Map<Byte, List<ExprNodeDesc>> keyExprMap = mapJoinOp.getConf().getKeys();
List<ExprNodeDesc> keyCols = keyExprMap.get(Byte.valueOf((byte) 0));
StringBuilder keyOrder = new StringBuilder();
StringBuilder keyNullOrder = new StringBuilder();
for (ExprNodeDesc k : keyCols) {
keyOrder.append("+");
keyNullOrder.append("a");
}
TableDesc keyTableDesc = PlanUtils.getReduceKeyTableDesc(PlanUtils.getFieldSchemasFromColumnList(keyCols, "mapjoinkey"), keyOrder.toString(), keyNullOrder.toString());
mapJoinOp.getConf().setKeyTableDesc(keyTableDesc);
// let the dummy op be the parent of mapjoin op
mapJoinOp.replaceParent(parentRS, dummyOp);
List<Operator<? extends OperatorDesc>> dummyChildren = new ArrayList<Operator<? extends OperatorDesc>>();
dummyChildren.add(mapJoinOp);
dummyOp.setChildOperators(dummyChildren);
dummyOperators.add(dummyOp);
// cut the operator tree so as to not retain connections from the parent RS downstream
List<Operator<? extends OperatorDesc>> childOperators = parentRS.getChildOperators();
int childIndex = childOperators.indexOf(mapJoinOp);
childOperators.remove(childIndex);
// at task startup
if (mapJoinWork != null) {
for (BaseWork myWork : mapJoinWork) {
LOG.debug("adding dummy op to work " + myWork.getName() + " from MJ work: " + dummyOp);
myWork.addDummyOp(dummyOp);
}
}
if (context.linkChildOpWithDummyOp.containsKey(mapJoinOp)) {
for (Operator<?> op : context.linkChildOpWithDummyOp.get(mapJoinOp)) {
dummyOperators.add(op);
}
}
context.linkChildOpWithDummyOp.put(mapJoinOp, dummyOperators);
return true;
}
use of org.apache.hadoop.hive.ql.exec.ReduceSinkOperator in project hive by apache.
the class SetReducerParallelism method process.
@SuppressWarnings("unchecked")
@Override
public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procContext, Object... nodeOutputs) throws SemanticException {
OptimizeTezProcContext context = (OptimizeTezProcContext) procContext;
ReduceSinkOperator sink = (ReduceSinkOperator) nd;
ReduceSinkDesc desc = sink.getConf();
long bytesPerReducer = context.conf.getLongVar(HiveConf.ConfVars.BYTESPERREDUCER);
int maxReducers = context.conf.getIntVar(HiveConf.ConfVars.MAXREDUCERS);
int constantReducers = context.conf.getIntVar(HiveConf.ConfVars.HADOOPNUMREDUCERS);
if (context.visitedReduceSinks.contains(sink)) {
// skip walking the children
LOG.debug("Already processed reduce sink: " + sink.getName());
return true;
}
context.visitedReduceSinks.add(sink);
if (desc.getNumReducers() <= 0) {
if (constantReducers > 0) {
LOG.info("Parallelism for reduce sink " + sink + " set by user to " + constantReducers);
desc.setNumReducers(constantReducers);
} else {
long numberOfBytes = 0;
// we need to add up all the estimates from the siblings of this reduce sink
for (Operator<? extends OperatorDesc> sibling : sink.getChildOperators().get(0).getParentOperators()) {
if (sibling.getStatistics() != null) {
numberOfBytes = StatsUtils.safeAdd(numberOfBytes, sibling.getStatistics().getDataSize());
} else {
LOG.warn("No stats available from: " + sibling);
}
}
int numReducers = Utilities.estimateReducers(numberOfBytes, bytesPerReducer, maxReducers, false);
LOG.info("Set parallelism for reduce sink " + sink + " to: " + numReducers);
desc.setNumReducers(numReducers);
final Collection<ExprNodeDescEqualityWrapper> keyCols = ExprNodeDescEqualityWrapper.transform(desc.getKeyCols());
final Collection<ExprNodeDescEqualityWrapper> partCols = ExprNodeDescEqualityWrapper.transform(desc.getPartitionCols());
if (keyCols != null && keyCols.equals(partCols)) {
desc.setReducerTraits(EnumSet.of(UNIFORM, AUTOPARALLEL));
} else {
desc.setReducerTraits(EnumSet.of(AUTOPARALLEL));
}
}
} else {
LOG.info("Number of reducers determined to be: " + desc.getNumReducers());
// usually controlled by bucketing
desc.setReducerTraits(EnumSet.of(FIXED));
}
return false;
}
use of org.apache.hadoop.hive.ql.exec.ReduceSinkOperator in project hive by apache.
the class SharedWorkOptimizer method findDescendantWorkOperators.
private static Set<Operator<?>> findDescendantWorkOperators(ParseContext pctx, SharedWorkOptimizerCache optimizerCache, Operator<?> start, Set<Operator<?>> excludeOps) {
// Find operators in work
Set<Operator<?>> workOps = findWorkOperators(optimizerCache, start);
// Gather output works operators
Set<Operator<?>> result = new HashSet<Operator<?>>();
Set<Operator<?>> set;
while (!workOps.isEmpty()) {
set = new HashSet<Operator<?>>();
for (Operator<?> op : workOps) {
if (excludeOps.contains(op)) {
continue;
}
if (op instanceof ReduceSinkOperator) {
if (op.getChildOperators() != null) {
// All children of RS are descendants
for (Operator<?> child : op.getChildOperators()) {
set.addAll(findWorkOperators(optimizerCache, child));
}
}
// Semijoin DPP work is considered a descendant because work needs
// to finish for it to execute
SemiJoinBranchInfo sjbi = pctx.getRsToSemiJoinBranchInfo().get(op);
if (sjbi != null) {
set.addAll(findWorkOperators(optimizerCache, sjbi.getTsOp()));
}
} else if (op.getConf() instanceof DynamicPruningEventDesc) {
// DPP work is considered a descendant because work needs
// to finish for it to execute
set.addAll(findWorkOperators(optimizerCache, ((DynamicPruningEventDesc) op.getConf()).getTableScan()));
}
}
workOps = set;
result.addAll(set);
}
return result;
}
Aggregations