use of org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher in project hive by apache.
the class RewriteCanApplyCtx method populateRewriteVars.
/**
* This method walks all the nodes starting from topOp TableScanOperator node
* and invokes methods from {@link RewriteCanApplyProcFactory} for each of the rules
* added to the opRules map. We use the {@link PreOrderOnceWalker} for a pre-order
* traversal of the operator tree.
*
* The methods from {@link RewriteCanApplyProcFactory} set appropriate values in
* {@link RewriteVars} enum.
*
* @param topOp
* @throws SemanticException
*/
void populateRewriteVars(TableScanOperator topOp) throws SemanticException {
Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();
//^TS%[(SEL%)|(FIL%)]*GRY%[(FIL%)]*RS%[(FIL%)]*GRY%
opRules.put(new RuleRegExp("R1", TableScanOperator.getOperatorName() + "%[(" + SelectOperator.getOperatorName() + "%)|(" + FilterOperator.getOperatorName() + "%)]*" + GroupByOperator.getOperatorName() + "%[" + FilterOperator.getOperatorName() + "%]*" + ReduceSinkOperator.getOperatorName() + "%[" + FilterOperator.getOperatorName() + "%]*" + GroupByOperator.getOperatorName() + "%"), RewriteCanApplyProcFactory.canApplyOnTableScanOperator(topOp));
// The dispatcher fires the processor corresponding to the closest matching
// rule and passes the context along
Dispatcher disp = new DefaultRuleDispatcher(getDefaultProc(), opRules, this);
GraphWalker ogw = new PreOrderOnceWalker(disp);
// Create a list of topop nodes
List<Node> topNodes = new ArrayList<Node>();
topNodes.add(topOp);
try {
ogw.startWalking(topNodes, null);
} catch (SemanticException e) {
LOG.error("Exception in walking operator tree. Rewrite variables not populated");
LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e));
throw new SemanticException(e.getMessage(), e);
}
}
use of org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher in project hive by apache.
the class LineageInfo method getLineageInfo.
/**
* parses given query and gets the lineage info.
*
* @param query
* @throws ParseException
*/
public void getLineageInfo(String query) throws ParseException, SemanticException {
/*
* Get the AST tree
*/
ASTNode tree = ParseUtils.parse(query, null);
while ((tree.getToken() == null) && (tree.getChildCount() > 0)) {
tree = (ASTNode) tree.getChild(0);
}
/*
* initialize Event Processor and dispatcher.
*/
inputTableList.clear();
OutputTableList.clear();
// create a walker which walks the tree in a DFS manner while maintaining
// the operator stack. The dispatcher
// generates the plan from the operator tree
Map<Rule, NodeProcessor> rules = new LinkedHashMap<Rule, NodeProcessor>();
// The dispatcher fires the processor corresponding to the closest matching
// rule and passes the context along
Dispatcher disp = new DefaultRuleDispatcher(this, rules, null);
GraphWalker ogw = new DefaultGraphWalker(disp);
// Create a list of topop nodes
ArrayList<Node> topNodes = new ArrayList<Node>();
topNodes.add(tree);
ogw.startWalking(topNodes, null);
}
use of org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher in project hive by apache.
the class TableAccessAnalyzer method analyzeTableAccess.
public TableAccessInfo analyzeTableAccess() throws SemanticException {
// Set up the rules for the graph walker for group by and join operators
Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();
opRules.put(new RuleRegExp("R1", GroupByOperator.getOperatorName() + "%"), new GroupByProcessor(pGraphContext));
opRules.put(new RuleRegExp("R2", JoinOperator.getOperatorName() + "%"), new JoinProcessor(pGraphContext));
opRules.put(new RuleRegExp("R3", MapJoinOperator.getOperatorName() + "%"), new JoinProcessor(pGraphContext));
TableAccessCtx tableAccessCtx = new TableAccessCtx();
Dispatcher disp = new DefaultRuleDispatcher(getDefaultProc(), opRules, tableAccessCtx);
GraphWalker ogw = new DefaultGraphWalker(disp);
// Create a list of topop nodes and walk!
List<Node> topNodes = new ArrayList<Node>();
topNodes.addAll(pGraphContext.getTopOps().values());
ogw.startWalking(topNodes, null);
return tableAccessCtx.getTableAccessInfo();
}
use of org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher in project phoenix by apache.
the class IndexPredicateAnalyzer method analyzePredicate.
/**
* Analyzes a predicate.
*
* @param predicate predicate to be analyzed
* @param searchConditions receives conditions produced by analysis
* @return residual predicate which could not be translated to
* searchConditions
*/
public ExprNodeDesc analyzePredicate(ExprNodeDesc predicate, final List<IndexSearchCondition> searchConditions) {
Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();
NodeProcessor nodeProcessor = new NodeProcessor() {
@Override
public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException {
// a pure conjunction: reject OR, CASE, etc.
for (Node ancestor : stack) {
if (nd == ancestor) {
break;
}
if (!FunctionRegistry.isOpAnd((ExprNodeDesc) ancestor)) {
return nd;
}
}
return analyzeExpr((ExprNodeGenericFuncDesc) nd, searchConditions, nodeOutputs);
}
};
Dispatcher disp = new DefaultRuleDispatcher(nodeProcessor, opRules, null);
GraphWalker ogw = new DefaultGraphWalker(disp);
ArrayList<Node> topNodes = new ArrayList<Node>();
topNodes.add(predicate);
HashMap<Node, Object> nodeOutput = new HashMap<Node, Object>();
try {
ogw.startWalking(topNodes, nodeOutput);
} catch (SemanticException ex) {
throw new RuntimeException(ex);
}
ExprNodeDesc residualPredicate = (ExprNodeDesc) nodeOutput.get(predicate);
return residualPredicate;
}
Aggregations