use of org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor in project hive by apache.
the class AnnotateWithOpTraits method transform.
@Override
public ParseContext transform(ParseContext pctx) throws SemanticException {
AnnotateOpTraitsProcCtx annotateCtx = new AnnotateOpTraitsProcCtx(pctx);
// create a walker which walks the tree in a BFS manner while maintaining the
// operator stack. The dispatcher generates the plan from the operator tree
Map<SemanticRule, SemanticNodeProcessor> opRules = new LinkedHashMap<SemanticRule, SemanticNodeProcessor>();
opRules.put(new RuleRegExp("TS", TableScanOperator.getOperatorName() + "%"), OpTraitsRulesProcFactory.getTableScanRule());
opRules.put(new RuleRegExp("RS", ReduceSinkOperator.getOperatorName() + "%"), OpTraitsRulesProcFactory.getReduceSinkRule());
opRules.put(new RuleRegExp("JOIN", JoinOperator.getOperatorName() + "%"), OpTraitsRulesProcFactory.getJoinRule());
opRules.put(new RuleRegExp("MAPJOIN", MapJoinOperator.getOperatorName() + "%"), OpTraitsRulesProcFactory.getMultiParentRule());
opRules.put(new RuleRegExp("SMB", SMBMapJoinOperator.getOperatorName() + "%"), OpTraitsRulesProcFactory.getMultiParentRule());
opRules.put(new RuleRegExp("MUX", MuxOperator.getOperatorName() + "%"), OpTraitsRulesProcFactory.getMultiParentRule());
opRules.put(new RuleRegExp("DEMUX", DemuxOperator.getOperatorName() + "%"), OpTraitsRulesProcFactory.getMultiParentRule());
opRules.put(new RuleRegExp("UNION", UnionOperator.getOperatorName() + "%"), OpTraitsRulesProcFactory.getMultiParentRule());
opRules.put(new RuleRegExp("GBY", GroupByOperator.getOperatorName() + "%"), OpTraitsRulesProcFactory.getGroupByRule());
opRules.put(new RuleRegExp("PTF", PTFOperator.getOperatorName() + "%"), OpTraitsRulesProcFactory.getPTFRule());
opRules.put(new RuleRegExp("SEL", SelectOperator.getOperatorName() + "%"), OpTraitsRulesProcFactory.getSelectRule());
// The dispatcher fires the processor corresponding to the closest matching
// rule and passes the context along
SemanticDispatcher disp = new DefaultRuleDispatcher(OpTraitsRulesProcFactory.getDefaultRule(), opRules, annotateCtx);
SemanticGraphWalker ogw = new LevelOrderWalker(disp, 0);
// Create a list of topop nodes
ArrayList<Node> topNodes = new ArrayList<Node>();
topNodes.addAll(pctx.getTopOps().values());
ogw.startWalking(topNodes, null);
return pctx;
}
use of org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor in project hive by apache.
the class PartitionConditionRemover method transform.
/*
* (non-Javadoc)
*
* @see
* org.apache.hadoop.hive.ql.optimizer.Transform#transform(org.apache.hadoop
* .hive.ql.parse.ParseContext)
*/
@Override
public ParseContext transform(ParseContext pctx) throws SemanticException {
// create a the context for walking operators
List<PcrOpWalkerCtx.OpToDeleteInfo> opToRemove = new ArrayList<PcrOpWalkerCtx.OpToDeleteInfo>();
PcrOpWalkerCtx opWalkerCtx = new PcrOpWalkerCtx(pctx, opToRemove);
Map<SemanticRule, SemanticNodeProcessor> opRules = new LinkedHashMap<SemanticRule, SemanticNodeProcessor>();
opRules.put(new RuleRegExp("R1", "(" + TableScanOperator.getOperatorName() + "%" + FilterOperator.getOperatorName() + "%)|(" + TableScanOperator.getOperatorName() + "%" + FilterOperator.getOperatorName() + "%" + FilterOperator.getOperatorName() + "%)"), PcrOpProcFactory.getFilterProc());
// The dispatcher fires the processor corresponding to the closest matching
// rule and passes the context along
SemanticDispatcher disp = new DefaultRuleDispatcher(PcrOpProcFactory.getDefaultProc(), opRules, opWalkerCtx);
SemanticGraphWalker ogw = new DefaultGraphWalker(disp);
// Create a list of topop nodes
ArrayList<Node> topNodes = new ArrayList<Node>();
topNodes.addAll(pctx.getTopOps().values());
ogw.startWalking(topNodes, null);
for (PcrOpWalkerCtx.OpToDeleteInfo entry : opToRemove) {
entry.getParent().removeChildAndAdoptItsChildren(entry.getOperator());
}
return pctx;
}
use of org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor in project hive by apache.
the class TezCompiler method markOperatorsWithUnstableRuntimeStats.
private void markOperatorsWithUnstableRuntimeStats(OptimizeTezProcContext procCtx) throws SemanticException {
Map<SemanticRule, SemanticNodeProcessor> opRules = new LinkedHashMap<SemanticRule, SemanticNodeProcessor>();
opRules.put(new RuleRegExp("R1", ReduceSinkOperator.getOperatorName() + "%"), new MarkRuntimeStatsAsIncorrect());
opRules.put(new RuleRegExp("R2", AppMasterEventOperator.getOperatorName() + "%"), new MarkRuntimeStatsAsIncorrect());
opRules.put(new RuleRegExp("R3", TableScanOperator.getOperatorName() + "%"), new MarkRuntimeStatsAsIncorrect());
SemanticDispatcher disp = new DefaultRuleDispatcher(null, opRules, procCtx);
List<Node> topNodes = new ArrayList<Node>();
topNodes.addAll(procCtx.parseContext.getTopOps().values());
SemanticGraphWalker ogw = new PreOrderOnceWalker(disp);
ogw.startWalking(topNodes, null);
}
use of org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor in project hive by apache.
the class SparkCompiler method runSetReducerParallelism.
private void runSetReducerParallelism(OptimizeSparkProcContext procCtx) throws SemanticException {
ParseContext pCtx = procCtx.getParseContext();
Map<SemanticRule, SemanticNodeProcessor> opRules = new LinkedHashMap<SemanticRule, SemanticNodeProcessor>();
opRules.put(new RuleRegExp("Set parallelism - ReduceSink", ReduceSinkOperator.getOperatorName() + "%"), new SetSparkReducerParallelism(pCtx.getConf()));
// The dispatcher fires the processor corresponding to the closest matching
// rule and passes the context along
SemanticDispatcher disp = new DefaultRuleDispatcher(null, opRules, procCtx);
SemanticGraphWalker ogw = new PreOrderWalker(disp);
// Create a list of topop nodes
ArrayList<Node> topNodes = new ArrayList<Node>();
topNodes.addAll(pCtx.getTopOps().values());
ogw.startWalking(topNodes, null);
}
use of org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor in project hive by apache.
the class SparkCompiler method runDynamicPartitionPruning.
private void runDynamicPartitionPruning(OptimizeSparkProcContext procCtx) throws SemanticException {
if (!conf.isSparkDPPAny()) {
return;
}
ParseContext parseContext = procCtx.getParseContext();
Map<SemanticRule, SemanticNodeProcessor> opRules = new LinkedHashMap<SemanticRule, SemanticNodeProcessor>();
opRules.put(new RuleRegExp(new String("Dynamic Partition Pruning"), FilterOperator.getOperatorName() + "%"), new DynamicPartitionPruningOptimization());
// The dispatcher fires the processor corresponding to the closest matching
// rule and passes the context along
SemanticDispatcher disp = new DefaultRuleDispatcher(null, opRules, procCtx);
SemanticGraphWalker ogw = new ForwardWalker(disp);
List<Node> topNodes = new ArrayList<Node>();
topNodes.addAll(parseContext.getTopOps().values());
ogw.startWalking(topNodes, null);
}
Aggregations