use of org.apache.hadoop.hive.ql.lib.SemanticDispatcher in project hive by apache.
the class RedundantDynamicPruningConditionsRemoval method transform.
/**
* Transform the query tree.
*
* @param pctx the current parse context
*/
@Override
public ParseContext transform(ParseContext pctx) throws SemanticException {
// Make sure semijoin is not enabled. If it is, then do not remove the dynamic partition pruning predicates.
if (!pctx.getConf().getBoolVar(HiveConf.ConfVars.TEZ_DYNAMIC_SEMIJOIN_REDUCTION)) {
Map<SemanticRule, SemanticNodeProcessor> opRules = new LinkedHashMap<SemanticRule, SemanticNodeProcessor>();
opRules.put(new RuleRegExp("R1", TableScanOperator.getOperatorName() + "%" + FilterOperator.getOperatorName() + "%"), new FilterTransformer());
SemanticDispatcher disp = new DefaultRuleDispatcher(null, opRules, null);
SemanticGraphWalker ogw = new DefaultGraphWalker(disp);
List<Node> topNodes = new ArrayList<Node>();
topNodes.addAll(pctx.getTopOps().values());
ogw.startWalking(topNodes, null);
}
return pctx;
}
use of org.apache.hadoop.hive.ql.lib.SemanticDispatcher in project hive by apache.
the class GroupByOptimizer method transform.
@Override
public ParseContext transform(ParseContext pctx) throws SemanticException {
Map<SemanticRule, SemanticNodeProcessor> opRules = new LinkedHashMap<SemanticRule, SemanticNodeProcessor>();
HiveConf conf = pctx.getConf();
if (!HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVEGROUPBYSKEW)) {
// process group-by pattern
opRules.put(new RuleRegExp("R1", GroupByOperator.getOperatorName() + "%" + ReduceSinkOperator.getOperatorName() + "%" + GroupByOperator.getOperatorName() + "%"), getMapSortedGroupbyProc(pctx));
} else {
// If hive.groupby.skewindata is set to true, the operator tree is as below
opRules.put(new RuleRegExp("R2", GroupByOperator.getOperatorName() + "%" + ReduceSinkOperator.getOperatorName() + "%" + GroupByOperator.getOperatorName() + "%" + ReduceSinkOperator.getOperatorName() + "%" + GroupByOperator.getOperatorName() + "%"), getMapSortedGroupbySkewProc(pctx));
}
// The dispatcher fires the processor corresponding to the closest matching
// rule and passes the context along
SemanticDispatcher disp = new DefaultRuleDispatcher(getDefaultProc(), opRules, new GroupByOptimizerContext(conf));
SemanticGraphWalker ogw = new DefaultGraphWalker(disp);
// Create a list of topop nodes
List<Node> topNodes = new ArrayList<Node>();
topNodes.addAll(pctx.getTopOps().values());
ogw.startWalking(topNodes, null);
return pctx;
}
use of org.apache.hadoop.hive.ql.lib.SemanticDispatcher in project hive by apache.
the class LimitPushdownOptimizer method transform.
@Override
public ParseContext transform(ParseContext pctx) throws SemanticException {
Map<SemanticRule, SemanticNodeProcessor> opRules = new LinkedHashMap<SemanticRule, SemanticNodeProcessor>();
opRules.put(new RuleRegExp("R1", ReduceSinkOperator.getOperatorName() + "%" + ".*" + LimitOperator.getOperatorName() + "%"), new TopNReducer());
opRules.put(new RuleRegExp("R2", ReduceSinkOperator.getOperatorName() + "%" + ".*" + ReduceSinkOperator.getOperatorName() + "%"), new TopNPropagator());
LimitPushdownContext context = new LimitPushdownContext(pctx.getConf());
SemanticDispatcher disp = new DefaultRuleDispatcher(null, opRules, context);
SemanticGraphWalker ogw = new DefaultGraphWalker(disp);
List<Node> topNodes = new ArrayList<Node>(pctx.getTopOps().values());
ogw.startWalking(topNodes, null);
return pctx;
}
use of org.apache.hadoop.hive.ql.lib.SemanticDispatcher in project hive by apache.
the class SortedDynPartitionOptimizer method transform.
@Override
public ParseContext transform(ParseContext pCtx) throws SemanticException {
// create a walker which walks the tree in a DFS manner while maintaining the
// operator stack. The dispatcher generates the plan from the operator tree
Map<SemanticRule, SemanticNodeProcessor> opRules = new LinkedHashMap<SemanticRule, SemanticNodeProcessor>();
String FS = FileSinkOperator.getOperatorName() + "%";
opRules.put(new RuleRegExp("Sorted Dynamic Partition", FS), getSortDynPartProc(pCtx));
SemanticDispatcher disp = new DefaultRuleDispatcher(null, opRules, null);
SemanticGraphWalker ogw = new DefaultGraphWalker(disp);
ArrayList<Node> topNodes = new ArrayList<Node>();
topNodes.addAll(pCtx.getTopOps().values());
ogw.startWalking(topNodes, null);
return pCtx;
}
use of org.apache.hadoop.hive.ql.lib.SemanticDispatcher in project hive by apache.
the class SortedMergeBucketMapJoinOptimizer method transform.
@Override
public ParseContext transform(ParseContext pctx) throws SemanticException {
HiveConf conf = pctx.getConf();
SortBucketJoinProcCtx smbJoinContext = new SortBucketJoinProcCtx(conf);
// Get a list of joins which cannot be converted to a sort merge join
// Only selects and filters operators are allowed between the table scan and
// join currently. More operators can be added - the method supportAutomaticSortMergeJoin
// dictates which operator is allowed
getListOfRejectedJoins(pctx, smbJoinContext);
Map<SemanticRule, SemanticNodeProcessor> opRules = new LinkedHashMap<SemanticRule, SemanticNodeProcessor>();
// go through all map joins and find out all which have enabled bucket map
// join.
opRules.put(new RuleRegExp("R1", MapJoinOperator.getOperatorName() + "%"), getSortedMergeBucketMapjoinProc(pctx));
// converted to sort-merge join
if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_AUTO_SORTMERGE_JOIN)) {
opRules.put(new RuleRegExp("R2", "JOIN%"), getSortedMergeJoinProc(pctx));
}
SemanticDispatcher disp = new DefaultRuleDispatcher(getDefaultProc(), opRules, smbJoinContext);
SemanticGraphWalker ogw = new DefaultGraphWalker(disp);
// Create a list of topop nodes
ArrayList<Node> topNodes = new ArrayList<Node>();
topNodes.addAll(pctx.getTopOps().values());
ogw.startWalking(topNodes, null);
return pctx;
}
Aggregations