use of org.apache.hadoop.hive.ql.lib.TypeRule in project hive by apache.
the class PrunerUtils method walkExprTree.
/**
* Walk expression tree for pruner generation.
*
* @param pred
* @param ctx
* @param colProc
* @param fieldProc
* @param genFuncProc
* @param defProc
* @return
* @throws SemanticException
*/
public static Map<Node, Object> walkExprTree(ExprNodeDesc pred, NodeProcessorCtx ctx, SemanticNodeProcessor colProc, SemanticNodeProcessor fieldProc, SemanticNodeProcessor genFuncProc, SemanticNodeProcessor defProc) throws SemanticException {
// create a walker which walks the tree in a DFS manner while maintaining
// the operator stack. The dispatcher
// generates the plan from the operator tree
Map<SemanticRule, SemanticNodeProcessor> exprRules = new LinkedHashMap<SemanticRule, SemanticNodeProcessor>();
exprRules.put(new TypeRule(ExprNodeColumnDesc.class), colProc);
exprRules.put(new TypeRule(ExprNodeFieldDesc.class), fieldProc);
exprRules.put(new TypeRule(ExprNodeGenericFuncDesc.class), genFuncProc);
// The dispatcher fires the processor corresponding to the closest matching
// rule and passes the context along
SemanticDispatcher disp = new DefaultRuleDispatcher(defProc, exprRules, ctx);
SemanticGraphWalker egw = new DefaultGraphWalker(disp);
List<Node> startNodes = new ArrayList<Node>();
startNodes.add(pred);
HashMap<Node, Object> outputMap = new HashMap<Node, Object>();
egw.startWalking(startNodes, outputMap);
return outputMap;
}
use of org.apache.hadoop.hive.ql.lib.TypeRule in project hive by apache.
the class SparkCompiler method runJoinOptimizations.
private void runJoinOptimizations(OptimizeSparkProcContext procCtx) throws SemanticException {
ParseContext pCtx = procCtx.getParseContext();
Map<SemanticRule, SemanticNodeProcessor> opRules = new LinkedHashMap<SemanticRule, SemanticNodeProcessor>();
opRules.put(new TypeRule(JoinOperator.class), new SparkJoinOptimizer(pCtx));
opRules.put(new TypeRule(MapJoinOperator.class), new SparkJoinHintOptimizer(pCtx));
// The dispatcher fires the processor corresponding to the closest matching
// rule and passes the context along
SemanticDispatcher disp = new DefaultRuleDispatcher(null, opRules, procCtx);
SemanticGraphWalker ogw = new DefaultGraphWalker(disp);
// Create a list of topop nodes
ArrayList<Node> topNodes = new ArrayList<Node>();
topNodes.addAll(pCtx.getTopOps().values());
ogw.startWalking(topNodes, null);
}
use of org.apache.hadoop.hive.ql.lib.TypeRule in project hive by apache.
the class PrunerUtils method walkExprTree.
/**
* Walk expression tree for pruner generation.
*
* @param pred
* @param ctx
* @param colProc
* @param fieldProc
* @param genFuncProc
* @param defProc
* @return
* @throws SemanticException
*/
public static Map<Node, Object> walkExprTree(ExprNodeDesc pred, NodeProcessorCtx ctx, NodeProcessor colProc, NodeProcessor fieldProc, NodeProcessor genFuncProc, NodeProcessor defProc) throws SemanticException {
// create a walker which walks the tree in a DFS manner while maintaining
// the operator stack. The dispatcher
// generates the plan from the operator tree
Map<Rule, NodeProcessor> exprRules = new LinkedHashMap<Rule, NodeProcessor>();
exprRules.put(new TypeRule(ExprNodeColumnDesc.class), colProc);
exprRules.put(new TypeRule(ExprNodeFieldDesc.class), fieldProc);
exprRules.put(new TypeRule(ExprNodeGenericFuncDesc.class), genFuncProc);
// The dispatcher fires the processor corresponding to the closest matching
// rule and passes the context along
Dispatcher disp = new DefaultRuleDispatcher(defProc, exprRules, ctx);
GraphWalker egw = new DefaultGraphWalker(disp);
List<Node> startNodes = new ArrayList<Node>();
startNodes.add(pred);
HashMap<Node, Object> outputMap = new HashMap<Node, Object>();
egw.startWalking(startNodes, outputMap);
return outputMap;
}
use of org.apache.hadoop.hive.ql.lib.TypeRule in project hive by apache.
the class SparkCompiler method generateTaskTreeHelper.
private void generateTaskTreeHelper(GenSparkProcContext procCtx, List<Node> topNodes) throws SemanticException {
// create a walker which walks the tree in a DFS manner while maintaining
// the operator stack. The dispatcher generates the plan from the operator tree
Map<SemanticRule, SemanticNodeProcessor> opRules = new LinkedHashMap<SemanticRule, SemanticNodeProcessor>();
GenSparkWork genSparkWork = new GenSparkWork(GenSparkUtils.getUtils());
opRules.put(new RuleRegExp("Split Work - ReduceSink", ReduceSinkOperator.getOperatorName() + "%"), genSparkWork);
opRules.put(new RuleRegExp("Split Work - SparkPartitionPruningSink", SparkPartitionPruningSinkOperator.getOperatorName() + "%"), genSparkWork);
opRules.put(new TypeRule(MapJoinOperator.class), new SparkReduceSinkMapJoinProc());
opRules.put(new RuleRegExp("Split Work + Move/Merge - FileSink", FileSinkOperator.getOperatorName() + "%"), new CompositeProcessor(new SparkFileSinkProcessor(), genSparkWork));
opRules.put(new RuleRegExp("Handle Analyze Command", TableScanOperator.getOperatorName() + "%"), new SparkProcessAnalyzeTable(GenSparkUtils.getUtils()));
opRules.put(new RuleRegExp("Remember union", UnionOperator.getOperatorName() + "%"), new SemanticNodeProcessor() {
@Override
public Object process(Node n, Stack<Node> s, NodeProcessorCtx procCtx, Object... os) throws SemanticException {
GenSparkProcContext context = (GenSparkProcContext) procCtx;
UnionOperator union = (UnionOperator) n;
// simply need to remember that we've seen a union.
context.currentUnionOperators.add(union);
return null;
}
});
/**
* SMB join case: (Big) (Small) (Small)
* TS TS TS
* \ | /
* \ DS DS
* \ | /
* SMBJoinOP
*
* Some of the other processors are expecting only one traversal beyond SMBJoinOp.
* We need to traverse from the big-table path only, and stop traversing on the
* small-table path once we reach SMBJoinOp.
* Also add some SMB join information to the context, so we can properly annotate
* the MapWork later on.
*/
opRules.put(new TypeRule(SMBMapJoinOperator.class), new SemanticNodeProcessor() {
@Override
public Object process(Node currNode, Stack<Node> stack, NodeProcessorCtx procCtx, Object... os) throws SemanticException {
GenSparkProcContext context = (GenSparkProcContext) procCtx;
SMBMapJoinOperator currSmbNode = (SMBMapJoinOperator) currNode;
SparkSMBMapJoinInfo smbMapJoinCtx = context.smbMapJoinCtxMap.get(currSmbNode);
if (smbMapJoinCtx == null) {
smbMapJoinCtx = new SparkSMBMapJoinInfo();
context.smbMapJoinCtxMap.put(currSmbNode, smbMapJoinCtx);
}
for (Node stackNode : stack) {
if (stackNode instanceof DummyStoreOperator) {
// If coming from small-table side, do some book-keeping, and skip traversal.
smbMapJoinCtx.smallTableRootOps.add(context.currentRootOperator);
return true;
}
}
// If coming from big-table side, do some book-keeping, and continue traversal
smbMapJoinCtx.bigTableRootOp = context.currentRootOperator;
return false;
}
});
// The dispatcher fires the processor corresponding to the closest matching
// rule and passes the context along
SemanticDispatcher disp = new DefaultRuleDispatcher(null, opRules, procCtx);
SemanticGraphWalker ogw = new GenSparkWorkWalker(disp, procCtx);
ogw.startWalking(topNodes, null);
}
use of org.apache.hadoop.hive.ql.lib.TypeRule in project hive by apache.
the class ExprWalkerProcFactory method extractPushdownPreds.
/**
* Extracts pushdown predicates from the given list of predicate expression.
*
* @param opContext
* operator context used for resolving column references
* @param op
* operator of the predicates being processed
* @param preds
* @return The expression walker information
* @throws SemanticException
*/
public static ExprWalkerInfo extractPushdownPreds(OpWalkerInfo opContext, Operator<? extends OperatorDesc> op, List<ExprNodeDesc> preds) throws SemanticException {
// Create the walker, the rules dispatcher and the context.
ExprWalkerInfo exprContext = new ExprWalkerInfo(op);
// create a walker which walks the tree in a DFS manner while maintaining
// the operator stack. The dispatcher
// generates the plan from the operator tree
Map<SemanticRule, SemanticNodeProcessor> exprRules = new LinkedHashMap<SemanticRule, SemanticNodeProcessor>();
exprRules.put(new TypeRule(ExprNodeColumnDesc.class), getColumnProcessor());
exprRules.put(new TypeRule(ExprNodeFieldDesc.class), getFieldProcessor());
exprRules.put(new TypeRule(ExprNodeGenericFuncDesc.class), getGenericFuncProcessor());
// The dispatcher fires the processor corresponding to the closest matching
// rule and passes the context along
SemanticDispatcher disp = new DefaultRuleDispatcher(getDefaultExprProcessor(), exprRules, exprContext);
SemanticGraphWalker egw = new ExpressionWalker(disp);
List<Node> startNodes = new ArrayList<Node>();
List<ExprNodeDesc> clonedPreds = new ArrayList<ExprNodeDesc>();
for (ExprNodeDesc node : preds) {
ExprNodeDesc clone = node.clone();
clonedPreds.add(clone);
exprContext.getNewToOldExprMap().put(clone, node);
}
startNodes.addAll(clonedPreds);
egw.startWalking(startNodes, null);
HiveConf conf = opContext.getParseContext().getConf();
// check the root expression for final candidates
for (ExprNodeDesc pred : clonedPreds) {
extractFinalCandidates(pred, exprContext, conf);
}
return exprContext;
}
Aggregations