use of org.apache.hadoop.hive.ql.plan.OperatorDesc in project hive by apache.
the class MergeJoinProc method process.
@Override
public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException {
GenTezProcContext context = (GenTezProcContext) procCtx;
CommonMergeJoinOperator mergeJoinOp = (CommonMergeJoinOperator) nd;
if (stack.size() < 2) {
// safety check for L53 to get parentOp, although it is very unlikely that
// stack size is less than 2, i.e., there is only one MergeJoinOperator in the stack.
context.currentMergeJoinOperator = mergeJoinOp;
return null;
}
TezWork tezWork = context.currentTask.getWork();
@SuppressWarnings("unchecked") Operator<? extends OperatorDesc> parentOp = (Operator<? extends OperatorDesc>) ((stack.get(stack.size() - 2)));
// we need to set the merge work that has been created as part of the dummy store walk. If a
// merge work already exists for this merge join operator, add the dummy store work to the
// merge work. Else create a merge work, add above work to the merge work
MergeJoinWork mergeWork = null;
if (context.opMergeJoinWorkMap.containsKey(mergeJoinOp)) {
// we already have the merge work corresponding to this merge join operator
mergeWork = context.opMergeJoinWorkMap.get(mergeJoinOp);
} else {
mergeWork = new MergeJoinWork();
tezWork.add(mergeWork);
context.opMergeJoinWorkMap.put(mergeJoinOp, mergeWork);
}
if (!(stack.get(stack.size() - 2) instanceof DummyStoreOperator)) {
/* this may happen in one of the following case:
TS[0], FIL[26], SEL[2], DUMMY_STORE[30], MERGEJOIN[29]]
/
TS[3], FIL[27], SEL[5], ---------------
*/
context.currentMergeJoinOperator = mergeJoinOp;
mergeWork.setTag(mergeJoinOp.getTagForOperator(parentOp));
return null;
}
// Guaranteed to be just 1 because each DummyStoreOperator can be part of only one work.
BaseWork parentWork = context.childToWorkMap.get(parentOp).get(0);
mergeWork.addMergedWork(null, parentWork, context.leafOperatorToFollowingWork);
mergeWork.setMergeJoinOperator(mergeJoinOp);
tezWork.setVertexType(mergeWork, VertexType.MULTI_INPUT_UNINITIALIZED_EDGES);
for (BaseWork grandParentWork : tezWork.getParents(parentWork)) {
TezEdgeProperty edgeProp = tezWork.getEdgeProperty(grandParentWork, parentWork);
tezWork.disconnect(grandParentWork, parentWork);
tezWork.connect(grandParentWork, mergeWork, edgeProp);
}
for (BaseWork childWork : tezWork.getChildren(parentWork)) {
TezEdgeProperty edgeProp = tezWork.getEdgeProperty(parentWork, childWork);
tezWork.disconnect(parentWork, childWork);
tezWork.connect(mergeWork, childWork, edgeProp);
}
tezWork.remove(parentWork);
DummyStoreOperator dummyOp = (DummyStoreOperator) (stack.get(stack.size() - 2));
parentWork.setTag(mergeJoinOp.getTagForOperator(dummyOp));
mergeJoinOp.getParentOperators().remove(dummyOp);
dummyOp.getChildOperators().clear();
return true;
}
use of org.apache.hadoop.hive.ql.plan.OperatorDesc in project hive by apache.
the class ReduceSinkMapJoinProc method processReduceSinkToHashJoin.
public static Object processReduceSinkToHashJoin(ReduceSinkOperator parentRS, MapJoinOperator mapJoinOp, GenTezProcContext context) throws SemanticException {
// remove the tag for in-memory side of mapjoin
parentRS.getConf().setSkipTag(true);
parentRS.setSkipTag(true);
// Mark this small table as being processed
if (mapJoinOp.getConf().isDynamicPartitionHashJoin()) {
context.mapJoinToUnprocessedSmallTableReduceSinks.get(mapJoinOp).remove(parentRS);
}
List<BaseWork> mapJoinWork = null;
/*
* if there was a pre-existing work generated for the big-table mapjoin side,
* we need to hook the work generated for the RS (associated with the RS-MJ pattern)
* with the pre-existing work.
*
* Otherwise, we need to associate that the mapjoin op
* to be linked to the RS work (associated with the RS-MJ pattern).
*
*/
mapJoinWork = context.mapJoinWorkMap.get(mapJoinOp);
BaseWork parentWork = getMapJoinParentWork(context, parentRS);
// set the link between mapjoin and parent vertex
int pos = context.mapJoinParentMap.get(mapJoinOp).indexOf(parentRS);
if (pos == -1) {
throw new SemanticException("Cannot find position of parent in mapjoin");
}
MapJoinDesc joinConf = mapJoinOp.getConf();
long keyCount = Long.MAX_VALUE, rowCount = Long.MAX_VALUE, bucketCount = 1;
long tableSize = Long.MAX_VALUE;
Statistics stats = parentRS.getStatistics();
if (stats != null) {
keyCount = rowCount = stats.getNumRows();
if (keyCount <= 0) {
keyCount = rowCount = Long.MAX_VALUE;
}
tableSize = stats.getDataSize();
ArrayList<String> keyCols = parentRS.getConf().getOutputKeyColumnNames();
if (keyCols != null && !keyCols.isEmpty()) {
// See if we can arrive at a smaller number using distinct stats from key columns.
long maxKeyCount = 1;
String prefix = Utilities.ReduceField.KEY.toString();
for (String keyCol : keyCols) {
ExprNodeDesc realCol = parentRS.getColumnExprMap().get(prefix + "." + keyCol);
ColStatistics cs = StatsUtils.getColStatisticsFromExpression(context.conf, stats, realCol);
if (cs == null || cs.getCountDistint() <= 0) {
maxKeyCount = Long.MAX_VALUE;
break;
}
maxKeyCount *= cs.getCountDistint();
if (maxKeyCount >= keyCount) {
break;
}
}
keyCount = Math.min(maxKeyCount, keyCount);
}
if (joinConf.isBucketMapJoin()) {
OpTraits opTraits = mapJoinOp.getOpTraits();
bucketCount = (opTraits == null) ? -1 : opTraits.getNumBuckets();
if (bucketCount > 0) {
// We cannot obtain a better estimate without CustomPartitionVertex providing it
// to us somehow; in which case using statistics would be completely unnecessary.
keyCount /= bucketCount;
tableSize /= bucketCount;
}
} else if (joinConf.isDynamicPartitionHashJoin()) {
// For dynamic partitioned hash join, assuming table is split evenly among the reduce tasks.
bucketCount = parentRS.getConf().getNumReducers();
keyCount /= bucketCount;
tableSize /= bucketCount;
}
}
if (keyCount == 0) {
keyCount = 1;
}
if (tableSize == 0) {
tableSize = 1;
}
LOG.info("Mapjoin " + mapJoinOp + "(bucket map join = " + joinConf.isBucketMapJoin() + "), pos: " + pos + " --> " + parentWork.getName() + " (" + keyCount + " keys estimated from " + rowCount + " rows, " + bucketCount + " buckets)");
joinConf.getParentToInput().put(pos, parentWork.getName());
if (keyCount != Long.MAX_VALUE) {
joinConf.getParentKeyCounts().put(pos, keyCount);
}
joinConf.getParentDataSizes().put(pos, tableSize);
int numBuckets = -1;
EdgeType edgeType = EdgeType.BROADCAST_EDGE;
if (joinConf.isBucketMapJoin()) {
numBuckets = (Integer) joinConf.getBigTableBucketNumMapping().values().toArray()[0];
/*
* Here, we can be in one of 4 states.
*
* 1. If map join work is null implies that we have not yet traversed the big table side. We
* just need to see if we can find a reduce sink operator in the big table side. This would
* imply a reduce side operation.
*
* 2. If we don't find a reducesink in 1 it has to be the case that it is a map side operation.
*
* 3. If we have already created a work item for the big table side, we need to see if we can
* find a table scan operator in the big table side. This would imply a map side operation.
*
* 4. If we don't find a table scan operator, it has to be a reduce side operation.
*/
if (mapJoinWork == null) {
Operator<?> rootOp = OperatorUtils.findSingleOperatorUpstreamJoinAccounted(mapJoinOp.getParentOperators().get(joinConf.getPosBigTable()), ReduceSinkOperator.class);
if (rootOp == null) {
// likely we found a table scan operator
edgeType = EdgeType.CUSTOM_EDGE;
} else {
// we have found a reduce sink
edgeType = EdgeType.CUSTOM_SIMPLE_EDGE;
}
} else {
Operator<?> rootOp = OperatorUtils.findSingleOperatorUpstreamJoinAccounted(mapJoinOp.getParentOperators().get(joinConf.getPosBigTable()), TableScanOperator.class);
if (rootOp != null) {
// likely we found a table scan operator
edgeType = EdgeType.CUSTOM_EDGE;
} else {
// we have found a reduce sink
edgeType = EdgeType.CUSTOM_SIMPLE_EDGE;
}
}
} else if (mapJoinOp.getConf().isDynamicPartitionHashJoin()) {
if (parentRS.getConf().isForwarding()) {
edgeType = EdgeType.ONE_TO_ONE_EDGE;
} else {
edgeType = EdgeType.CUSTOM_SIMPLE_EDGE;
}
}
if (edgeType == EdgeType.CUSTOM_EDGE) {
// disable auto parallelism for bucket map joins
parentRS.getConf().setReducerTraits(EnumSet.of(FIXED));
}
TezEdgeProperty edgeProp = new TezEdgeProperty(null, edgeType, numBuckets);
if (mapJoinWork != null) {
for (BaseWork myWork : mapJoinWork) {
// link the work with the work associated with the reduce sink that triggered this rule
TezWork tezWork = context.currentTask.getWork();
LOG.debug("connecting " + parentWork.getName() + " with " + myWork.getName());
tezWork.connect(parentWork, myWork, edgeProp);
if (edgeType == EdgeType.CUSTOM_EDGE) {
tezWork.setVertexType(myWork, VertexType.INITIALIZED_EDGES);
}
ReduceSinkOperator r = null;
if (context.connectedReduceSinks.contains(parentRS)) {
LOG.debug("Cloning reduce sink " + parentRS + " for multi-child broadcast edge");
// we've already set this one up. Need to clone for the next work.
r = (ReduceSinkOperator) OperatorFactory.getAndMakeChild(parentRS.getCompilationOpContext(), (ReduceSinkDesc) parentRS.getConf().clone(), new RowSchema(parentRS.getSchema()), parentRS.getParentOperators());
context.clonedReduceSinks.add(r);
} else {
r = parentRS;
}
// remember the output name of the reduce sink
r.getConf().setOutputName(myWork.getName());
context.connectedReduceSinks.add(r);
}
}
// remember in case we need to connect additional work later
Map<BaseWork, TezEdgeProperty> linkWorkMap = null;
if (context.linkOpWithWorkMap.containsKey(mapJoinOp)) {
linkWorkMap = context.linkOpWithWorkMap.get(mapJoinOp);
} else {
linkWorkMap = new HashMap<BaseWork, TezEdgeProperty>();
}
linkWorkMap.put(parentWork, edgeProp);
context.linkOpWithWorkMap.put(mapJoinOp, linkWorkMap);
List<ReduceSinkOperator> reduceSinks = context.linkWorkWithReduceSinkMap.get(parentWork);
if (reduceSinks == null) {
reduceSinks = new ArrayList<ReduceSinkOperator>();
}
reduceSinks.add(parentRS);
context.linkWorkWithReduceSinkMap.put(parentWork, reduceSinks);
// create the dummy operators
List<Operator<?>> dummyOperators = new ArrayList<Operator<?>>();
// create an new operator: HashTableDummyOperator, which share the table desc
HashTableDummyDesc desc = new HashTableDummyDesc();
@SuppressWarnings("unchecked") HashTableDummyOperator dummyOp = (HashTableDummyOperator) OperatorFactory.get(parentRS.getCompilationOpContext(), desc);
TableDesc tbl;
// need to create the correct table descriptor for key/value
RowSchema rowSchema = parentRS.getParentOperators().get(0).getSchema();
tbl = PlanUtils.getReduceValueTableDesc(PlanUtils.getFieldSchemasFromRowSchema(rowSchema, ""));
dummyOp.getConf().setTbl(tbl);
Map<Byte, List<ExprNodeDesc>> keyExprMap = mapJoinOp.getConf().getKeys();
List<ExprNodeDesc> keyCols = keyExprMap.get(Byte.valueOf((byte) 0));
StringBuilder keyOrder = new StringBuilder();
StringBuilder keyNullOrder = new StringBuilder();
for (ExprNodeDesc k : keyCols) {
keyOrder.append("+");
keyNullOrder.append("a");
}
TableDesc keyTableDesc = PlanUtils.getReduceKeyTableDesc(PlanUtils.getFieldSchemasFromColumnList(keyCols, "mapjoinkey"), keyOrder.toString(), keyNullOrder.toString());
mapJoinOp.getConf().setKeyTableDesc(keyTableDesc);
// let the dummy op be the parent of mapjoin op
mapJoinOp.replaceParent(parentRS, dummyOp);
List<Operator<? extends OperatorDesc>> dummyChildren = new ArrayList<Operator<? extends OperatorDesc>>();
dummyChildren.add(mapJoinOp);
dummyOp.setChildOperators(dummyChildren);
dummyOperators.add(dummyOp);
// cut the operator tree so as to not retain connections from the parent RS downstream
List<Operator<? extends OperatorDesc>> childOperators = parentRS.getChildOperators();
int childIndex = childOperators.indexOf(mapJoinOp);
childOperators.remove(childIndex);
// at task startup
if (mapJoinWork != null) {
for (BaseWork myWork : mapJoinWork) {
LOG.debug("adding dummy op to work " + myWork.getName() + " from MJ work: " + dummyOp);
myWork.addDummyOp(dummyOp);
}
}
if (context.linkChildOpWithDummyOp.containsKey(mapJoinOp)) {
for (Operator<?> op : context.linkChildOpWithDummyOp.get(mapJoinOp)) {
dummyOperators.add(op);
}
}
context.linkChildOpWithDummyOp.put(mapJoinOp, dummyOperators);
return true;
}
use of org.apache.hadoop.hive.ql.plan.OperatorDesc in project hive by apache.
the class SharedWorkOptimizer method transform.
@Override
public ParseContext transform(ParseContext pctx) throws SemanticException {
final Map<String, TableScanOperator> topOps = pctx.getTopOps();
if (topOps.size() < 2) {
// Nothing to do, bail out
return pctx;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Before SharedWorkOptimizer:\n" + Operator.toString(pctx.getTopOps().values()));
}
// Cache to use during optimization
SharedWorkOptimizerCache optimizerCache = new SharedWorkOptimizerCache();
// Gather information about the DPP table scans and store it in the cache
gatherDPPTableScanOps(pctx, optimizerCache);
// Map of dbName.TblName -> TSOperator
Multimap<String, TableScanOperator> tableNameToOps = splitTableScanOpsByTable(pctx);
// We enforce a certain order when we do the reutilization.
// In particular, we use size of table x number of reads to
// rank the tables.
List<Entry<String, Long>> sortedTables = rankTablesByAccumulatedSize(pctx);
LOG.debug("Sorted tables by size: {}", sortedTables);
// Execute optimization
Multimap<String, TableScanOperator> existingOps = ArrayListMultimap.create();
Set<Operator<?>> removedOps = new HashSet<>();
for (Entry<String, Long> tablePair : sortedTables) {
String tableName = tablePair.getKey();
for (TableScanOperator discardableTsOp : tableNameToOps.get(tableName)) {
if (removedOps.contains(discardableTsOp)) {
LOG.debug("Skip {} as it has already been removed", discardableTsOp);
continue;
}
Collection<TableScanOperator> prevTsOps = existingOps.get(tableName);
for (TableScanOperator retainableTsOp : prevTsOps) {
if (removedOps.contains(retainableTsOp)) {
LOG.debug("Skip {} as it has already been removed", retainableTsOp);
continue;
}
// First we quickly check if the two table scan operators can actually be merged
boolean mergeable = areMergeable(pctx, optimizerCache, retainableTsOp, discardableTsOp);
if (!mergeable) {
// Skip
LOG.debug("{} and {} cannot be merged", retainableTsOp, discardableTsOp);
continue;
}
// Secondly, we extract information about the part of the tree that can be merged
// as well as some structural information (memory consumption) that needs to be
// used to determined whether the merge can happen
SharedResult sr = extractSharedOptimizationInfoForRoot(pctx, optimizerCache, retainableTsOp, discardableTsOp);
// tables.
if (!validPreConditions(pctx, optimizerCache, sr)) {
// Skip
LOG.debug("{} and {} do not meet preconditions", retainableTsOp, discardableTsOp);
continue;
}
// We can merge
if (sr.retainableOps.size() > 1) {
// More than TS operator
Operator<?> lastRetainableOp = sr.retainableOps.get(sr.retainableOps.size() - 1);
Operator<?> lastDiscardableOp = sr.discardableOps.get(sr.discardableOps.size() - 1);
if (lastDiscardableOp.getNumChild() != 0) {
List<Operator<? extends OperatorDesc>> allChildren = Lists.newArrayList(lastDiscardableOp.getChildOperators());
for (Operator<? extends OperatorDesc> op : allChildren) {
lastDiscardableOp.getChildOperators().remove(op);
op.replaceParent(lastDiscardableOp, lastRetainableOp);
lastRetainableOp.getChildOperators().add(op);
}
}
LOG.debug("Merging subtree starting at {} into subtree starting at {}", discardableTsOp, retainableTsOp);
} else {
// Only TS operator
ExprNodeGenericFuncDesc exprNode = null;
if (retainableTsOp.getConf().getFilterExpr() != null) {
// Push filter on top of children
pushFilterToTopOfTableScan(optimizerCache, retainableTsOp);
// Clone to push to table scan
exprNode = (ExprNodeGenericFuncDesc) retainableTsOp.getConf().getFilterExpr();
}
if (discardableTsOp.getConf().getFilterExpr() != null) {
// Push filter on top
pushFilterToTopOfTableScan(optimizerCache, discardableTsOp);
ExprNodeGenericFuncDesc tsExprNode = discardableTsOp.getConf().getFilterExpr();
if (exprNode != null && !exprNode.isSame(tsExprNode)) {
// We merge filters from previous scan by ORing with filters from current scan
if (exprNode.getGenericUDF() instanceof GenericUDFOPOr) {
List<ExprNodeDesc> newChildren = new ArrayList<>(exprNode.getChildren().size() + 1);
for (ExprNodeDesc childExprNode : exprNode.getChildren()) {
if (childExprNode.isSame(tsExprNode)) {
// We do not need to do anything, it is in the OR expression
break;
}
newChildren.add(childExprNode);
}
if (exprNode.getChildren().size() == newChildren.size()) {
newChildren.add(tsExprNode);
exprNode = ExprNodeGenericFuncDesc.newInstance(new GenericUDFOPOr(), newChildren);
}
} else {
exprNode = ExprNodeGenericFuncDesc.newInstance(new GenericUDFOPOr(), Arrays.<ExprNodeDesc>asList(exprNode, tsExprNode));
}
}
}
// Replace filter
retainableTsOp.getConf().setFilterExpr(exprNode);
// Replace table scan operator
List<Operator<? extends OperatorDesc>> allChildren = Lists.newArrayList(discardableTsOp.getChildOperators());
for (Operator<? extends OperatorDesc> op : allChildren) {
discardableTsOp.getChildOperators().remove(op);
op.replaceParent(discardableTsOp, retainableTsOp);
retainableTsOp.getChildOperators().add(op);
}
LOG.debug("Merging {} into {}", discardableTsOp, retainableTsOp);
}
// we are going to eliminate
for (Operator<?> op : sr.discardableInputOps) {
OperatorUtils.removeOperator(op);
optimizerCache.removeOp(op);
removedOps.add(op);
// Remove DPP predicates
if (op instanceof ReduceSinkOperator) {
SemiJoinBranchInfo sjbi = pctx.getRsToSemiJoinBranchInfo().get(op);
if (sjbi != null && !sr.discardableOps.contains(sjbi.getTsOp()) && !sr.discardableInputOps.contains(sjbi.getTsOp())) {
GenTezUtils.removeSemiJoinOperator(pctx, (ReduceSinkOperator) op, sjbi.getTsOp());
optimizerCache.tableScanToDPPSource.remove(sjbi.getTsOp(), op);
}
} else if (op instanceof AppMasterEventOperator) {
DynamicPruningEventDesc dped = (DynamicPruningEventDesc) op.getConf();
if (!sr.discardableOps.contains(dped.getTableScan()) && !sr.discardableInputOps.contains(dped.getTableScan())) {
GenTezUtils.removeSemiJoinOperator(pctx, (AppMasterEventOperator) op, dped.getTableScan());
optimizerCache.tableScanToDPPSource.remove(dped.getTableScan(), op);
}
}
LOG.debug("Input operator removed: {}", op);
}
// Then we merge the operators of the works we are going to merge
optimizerCache.removeOpAndCombineWork(discardableTsOp, retainableTsOp);
removedOps.add(discardableTsOp);
// Finally we remove the expression from the tree
for (Operator<?> op : sr.discardableOps) {
OperatorUtils.removeOperator(op);
optimizerCache.removeOp(op);
removedOps.add(op);
if (sr.discardableOps.size() == 1) {
// If there is a single discardable operator, it is a TableScanOperator
// and it means that we have merged filter expressions for it. Thus, we
// might need to remove DPP predicates from the retainable TableScanOperator
Collection<Operator<?>> c = optimizerCache.tableScanToDPPSource.get((TableScanOperator) op);
for (Operator<?> dppSource : c) {
if (dppSource instanceof ReduceSinkOperator) {
GenTezUtils.removeSemiJoinOperator(pctx, (ReduceSinkOperator) dppSource, (TableScanOperator) sr.retainableOps.get(0));
optimizerCache.tableScanToDPPSource.remove(sr.retainableOps.get(0), op);
} else if (dppSource instanceof AppMasterEventOperator) {
GenTezUtils.removeSemiJoinOperator(pctx, (AppMasterEventOperator) dppSource, (TableScanOperator) sr.retainableOps.get(0));
optimizerCache.tableScanToDPPSource.remove(sr.retainableOps.get(0), op);
}
}
}
LOG.debug("Operator removed: {}", op);
}
break;
}
if (removedOps.contains(discardableTsOp)) {
// This operator has been removed, remove it from the list of existing operators
existingOps.remove(tableName, discardableTsOp);
} else {
// This operator has not been removed, include it in the list of existing operators
existingOps.put(tableName, discardableTsOp);
}
}
}
// Remove unused table scan operators
Iterator<Entry<String, TableScanOperator>> it = topOps.entrySet().iterator();
while (it.hasNext()) {
Entry<String, TableScanOperator> e = it.next();
if (e.getValue().getNumChild() == 0) {
it.remove();
}
}
if (LOG.isDebugEnabled()) {
LOG.debug("After SharedWorkOptimizer:\n" + Operator.toString(pctx.getTopOps().values()));
}
if (pctx.getConf().getBoolVar(ConfVars.HIVE_SHARED_WORK_EXTENDED_OPTIMIZATION)) {
// Gather RS operators that 1) belong to root works, i.e., works containing TS operators,
// and 2) share the same input operator.
// These will be the first target for extended shared work optimization
Multimap<Operator<?>, ReduceSinkOperator> parentToRsOps = ArrayListMultimap.create();
Set<Operator<?>> visited = new HashSet<>();
for (Entry<String, TableScanOperator> e : topOps.entrySet()) {
gatherReduceSinkOpsByInput(parentToRsOps, visited, findWorkOperators(optimizerCache, e.getValue()));
}
while (!parentToRsOps.isEmpty()) {
// As above, we enforce a certain order when we do the reutilization.
// In particular, we use size of data in RS x number of uses.
List<Entry<Operator<?>, Long>> sortedRSGroups = rankOpsByAccumulatedSize(parentToRsOps.keySet());
LOG.debug("Sorted operators by size: {}", sortedRSGroups);
// Execute extended optimization
// For each RS, check whether other RS in same work could be merge into this one.
// If they are merged, RS operators in the resulting work will be considered
// mergeable in next loop iteration.
Multimap<Operator<?>, ReduceSinkOperator> existingRsOps = ArrayListMultimap.create();
for (Entry<Operator<?>, Long> rsGroupInfo : sortedRSGroups) {
Operator<?> rsParent = rsGroupInfo.getKey();
for (ReduceSinkOperator discardableRsOp : parentToRsOps.get(rsParent)) {
if (removedOps.contains(discardableRsOp)) {
LOG.debug("Skip {} as it has already been removed", discardableRsOp);
continue;
}
Collection<ReduceSinkOperator> otherRsOps = existingRsOps.get(rsParent);
for (ReduceSinkOperator retainableRsOp : otherRsOps) {
if (removedOps.contains(retainableRsOp)) {
LOG.debug("Skip {} as it has already been removed", retainableRsOp);
continue;
}
// First we quickly check if the two RS operators can actually be merged.
// We already know that these two RS operators have the same parent, but
// we need to check whether both RS are actually equal. Further, we check
// whether their child is also equal. If any of these conditions are not
// met, we are not going to try to merge.
boolean mergeable = compareOperator(pctx, retainableRsOp, discardableRsOp) && compareOperator(pctx, retainableRsOp.getChildOperators().get(0), discardableRsOp.getChildOperators().get(0));
if (!mergeable) {
// Skip
LOG.debug("{} and {} cannot be merged", retainableRsOp, discardableRsOp);
continue;
}
LOG.debug("Checking additional conditions for merging subtree starting at {}" + " into subtree starting at {}", discardableRsOp, retainableRsOp);
// Secondly, we extract information about the part of the tree that can be merged
// as well as some structural information (memory consumption) that needs to be
// used to determined whether the merge can happen
Operator<?> retainableRsOpChild = retainableRsOp.getChildOperators().get(0);
Operator<?> discardableRsOpChild = discardableRsOp.getChildOperators().get(0);
SharedResult sr = extractSharedOptimizationInfo(pctx, optimizerCache, retainableRsOp, discardableRsOp, retainableRsOpChild, discardableRsOpChild);
// tables.
if (sr.retainableOps.isEmpty() || !validPreConditions(pctx, optimizerCache, sr)) {
// Skip
LOG.debug("{} and {} do not meet preconditions", retainableRsOp, discardableRsOp);
continue;
}
// We can merge
Operator<?> lastRetainableOp = sr.retainableOps.get(sr.retainableOps.size() - 1);
Operator<?> lastDiscardableOp = sr.discardableOps.get(sr.discardableOps.size() - 1);
if (lastDiscardableOp.getNumChild() != 0) {
List<Operator<? extends OperatorDesc>> allChildren = Lists.newArrayList(lastDiscardableOp.getChildOperators());
for (Operator<? extends OperatorDesc> op : allChildren) {
lastDiscardableOp.getChildOperators().remove(op);
op.replaceParent(lastDiscardableOp, lastRetainableOp);
lastRetainableOp.getChildOperators().add(op);
}
}
LOG.debug("Merging subtree starting at {} into subtree starting at {}", discardableRsOp, retainableRsOp);
// we are going to eliminate
for (Operator<?> op : sr.discardableInputOps) {
OperatorUtils.removeOperator(op);
optimizerCache.removeOp(op);
removedOps.add(op);
// Remove DPP predicates
if (op instanceof ReduceSinkOperator) {
SemiJoinBranchInfo sjbi = pctx.getRsToSemiJoinBranchInfo().get(op);
if (sjbi != null && !sr.discardableOps.contains(sjbi.getTsOp()) && !sr.discardableInputOps.contains(sjbi.getTsOp())) {
GenTezUtils.removeSemiJoinOperator(pctx, (ReduceSinkOperator) op, sjbi.getTsOp());
optimizerCache.tableScanToDPPSource.remove(sjbi.getTsOp(), op);
}
} else if (op instanceof AppMasterEventOperator) {
DynamicPruningEventDesc dped = (DynamicPruningEventDesc) op.getConf();
if (!sr.discardableOps.contains(dped.getTableScan()) && !sr.discardableInputOps.contains(dped.getTableScan())) {
GenTezUtils.removeSemiJoinOperator(pctx, (AppMasterEventOperator) op, dped.getTableScan());
optimizerCache.tableScanToDPPSource.remove(dped.getTableScan(), op);
}
}
LOG.debug("Input operator removed: {}", op);
}
// We remove the discardable RS operator
OperatorUtils.removeOperator(discardableRsOp);
optimizerCache.removeOp(discardableRsOp);
removedOps.add(discardableRsOp);
LOG.debug("Operator removed: {}", discardableRsOp);
// Then we merge the operators of the works we are going to merge
optimizerCache.removeOpAndCombineWork(discardableRsOpChild, retainableRsOpChild);
// Finally we remove the rest of the expression from the tree
for (Operator<?> op : sr.discardableOps) {
OperatorUtils.removeOperator(op);
optimizerCache.removeOp(op);
removedOps.add(op);
LOG.debug("Operator removed: {}", op);
}
break;
}
if (removedOps.contains(discardableRsOp)) {
// This operator has been removed, remove it from the list of existing operators
existingRsOps.remove(rsParent, discardableRsOp);
} else {
// This operator has not been removed, include it in the list of existing operators
existingRsOps.put(rsParent, discardableRsOp);
}
}
}
// We gather the operators that will be used for next iteration of extended optimization
// (if any)
parentToRsOps = ArrayListMultimap.create();
visited = new HashSet<>();
for (Entry<Operator<?>, ReduceSinkOperator> e : existingRsOps.entries()) {
if (removedOps.contains(e.getValue()) || e.getValue().getNumChild() < 1) {
// semijoin RS), we can quickly skip this one
continue;
}
gatherReduceSinkOpsByInput(parentToRsOps, visited, findWorkOperators(optimizerCache, e.getValue().getChildOperators().get(0)));
}
}
// Remove unused table scan operators
it = topOps.entrySet().iterator();
while (it.hasNext()) {
Entry<String, TableScanOperator> e = it.next();
if (e.getValue().getNumChild() == 0) {
it.remove();
}
}
if (LOG.isDebugEnabled()) {
LOG.debug("After SharedWorkExtendedOptimizer:\n" + Operator.toString(pctx.getTopOps().values()));
}
}
// we use the basic or the extended version of the optimizer.
if (pctx.getConf().getBoolVar(ConfVars.HIVE_IN_TEST)) {
Set<Operator<?>> visited = new HashSet<>();
it = topOps.entrySet().iterator();
while (it.hasNext()) {
Entry<String, TableScanOperator> e = it.next();
for (Operator<?> op : OperatorUtils.findOperators(e.getValue(), Operator.class)) {
if (!visited.contains(op)) {
if (!findWorkOperators(optimizerCache, op).equals(findWorkOperators(op, new HashSet<Operator<?>>()))) {
throw new SemanticException("Error in shared work optimizer: operator cache contents" + "and actual plan differ");
}
visited.add(op);
}
}
}
}
return pctx;
}
use of org.apache.hadoop.hive.ql.plan.OperatorDesc in project hive by apache.
the class HashTableLoader method loadDirectly.
private void loadDirectly(MapJoinTableContainer[] mapJoinTables, String inputFileName) throws Exception {
MapredLocalWork localWork = context.getLocalWork();
List<Operator<?>> directWorks = localWork.getDirectFetchOp().get(joinOp);
if (directWorks == null || directWorks.isEmpty()) {
return;
}
JobConf job = new JobConf(hconf);
MapredLocalTask localTask = new MapredLocalTask(localWork, job, false);
HashTableSinkOperator sink = new TemporaryHashSinkOperator(new CompilationOpContext(), desc);
sink.setParentOperators(new ArrayList<Operator<? extends OperatorDesc>>(directWorks));
for (Operator<?> operator : directWorks) {
if (operator != null) {
operator.setChildOperators(Arrays.<Operator<? extends OperatorDesc>>asList(sink));
}
}
localTask.setExecContext(context);
localTask.startForward(inputFileName);
MapJoinTableContainer[] tables = sink.getMapJoinTables();
for (int i = 0; i < sink.getNumParent(); i++) {
if (sink.getParentOperators().get(i) != null) {
mapJoinTables[i] = tables[i];
}
}
Arrays.fill(tables, null);
}
use of org.apache.hadoop.hive.ql.plan.OperatorDesc in project hive by apache.
the class SparkMapRecordHandler method close.
@Override
public void close() {
// No row was processed
if (!anyRow) {
LOG.trace("Close called. no row processed by map.");
}
// check if there are IOExceptions
if (!abort) {
abort = execContext.getIoCxt().getIOExceptions();
}
// ideally hadoop should let us know whether map execution failed or not
try {
mo.close(abort);
// for close the local work
if (localWork != null) {
List<Operator<? extends OperatorDesc>> dummyOps = localWork.getDummyParentOp();
for (Operator<? extends OperatorDesc> dummyOp : dummyOps) {
dummyOp.close(abort);
}
}
if (LOG.isInfoEnabled()) {
logCloseInfo();
}
ReportStats rps = new ReportStats(rp, jc);
mo.preorderMap(rps);
return;
} catch (Exception e) {
if (!abort) {
// signal new failure to map-reduce
String msg = "Hit error while closing operators - failing tree: " + e;
LOG.error(msg, e);
throw new IllegalStateException(msg, e);
}
} finally {
MapredContext.close();
Utilities.clearWorkMap(jc);
}
}
Aggregations