use of org.apache.hadoop.hive.ql.lib.NodeProcessor in project phoenix by apache.
the class IndexPredicateAnalyzer method analyzePredicate.
/**
* Analyzes a predicate.
*
* @param predicate predicate to be analyzed
* @param searchConditions receives conditions produced by analysis
* @return residual predicate which could not be translated to
* searchConditions
*/
public ExprNodeDesc analyzePredicate(ExprNodeDesc predicate, final List<IndexSearchCondition> searchConditions) {
Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();
NodeProcessor nodeProcessor = new NodeProcessor() {
@Override
public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException {
// a pure conjunction: reject OR, CASE, etc.
for (Node ancestor : stack) {
if (nd == ancestor) {
break;
}
if (!FunctionRegistry.isOpAnd((ExprNodeDesc) ancestor)) {
return nd;
}
}
return analyzeExpr((ExprNodeGenericFuncDesc) nd, searchConditions, nodeOutputs);
}
};
Dispatcher disp = new DefaultRuleDispatcher(nodeProcessor, opRules, null);
GraphWalker ogw = new DefaultGraphWalker(disp);
ArrayList<Node> topNodes = new ArrayList<Node>();
topNodes.add(predicate);
HashMap<Node, Object> nodeOutput = new HashMap<Node, Object>();
try {
ogw.startWalking(topNodes, nodeOutput);
} catch (SemanticException ex) {
throw new RuntimeException(ex);
}
ExprNodeDesc residualPredicate = (ExprNodeDesc) nodeOutput.get(predicate);
return residualPredicate;
}
use of org.apache.hadoop.hive.ql.lib.NodeProcessor in project flink by apache.
the class HiveParserTypeCheckProcFactory method genExprNode.
public static Map<HiveParserASTNode, ExprNodeDesc> genExprNode(HiveParserASTNode expr, HiveParserTypeCheckCtx tcCtx, HiveParserTypeCheckProcFactory tf) throws SemanticException {
// Create the walker, the rules dispatcher and the context.
// create a walker which walks the tree in a DFS manner while maintaining
// the operator stack. The dispatcher
// generates the plan from the operator tree
Map<Rule, NodeProcessor> opRules = new LinkedHashMap<>();
opRules.put(new RuleRegExp("R1", HiveASTParser.TOK_NULL + "%"), tf.getNullExprProcessor());
opRules.put(new RuleRegExp("R2", HiveASTParser.Number + "%|" + HiveASTParser.IntegralLiteral + "%|" + HiveASTParser.NumberLiteral + "%"), tf.getNumExprProcessor());
opRules.put(new RuleRegExp("R3", HiveASTParser.Identifier + "%|" + HiveASTParser.StringLiteral + "%|" + HiveASTParser.TOK_CHARSETLITERAL + "%|" + HiveASTParser.TOK_STRINGLITERALSEQUENCE + "%|" + "%|" + HiveASTParser.KW_IF + "%|" + HiveASTParser.KW_CASE + "%|" + HiveASTParser.KW_WHEN + "%|" + HiveASTParser.KW_IN + "%|" + HiveASTParser.KW_ARRAY + "%|" + HiveASTParser.KW_MAP + "%|" + HiveASTParser.KW_STRUCT + "%|" + HiveASTParser.KW_EXISTS + "%|" + HiveASTParser.TOK_SUBQUERY_OP_NOTIN + "%"), tf.getStrExprProcessor());
opRules.put(new RuleRegExp("R4", HiveASTParser.KW_TRUE + "%|" + HiveASTParser.KW_FALSE + "%"), tf.getBoolExprProcessor());
opRules.put(new RuleRegExp("R5", HiveASTParser.TOK_DATELITERAL + "%|" + HiveASTParser.TOK_TIMESTAMPLITERAL + "%"), tf.getDateTimeExprProcessor());
opRules.put(new RuleRegExp("R6", HiveASTParser.TOK_INTERVAL_YEAR_MONTH_LITERAL + "%|" + HiveASTParser.TOK_INTERVAL_DAY_TIME_LITERAL + "%|" + HiveASTParser.TOK_INTERVAL_YEAR_LITERAL + "%|" + HiveASTParser.TOK_INTERVAL_MONTH_LITERAL + "%|" + HiveASTParser.TOK_INTERVAL_DAY_LITERAL + "%|" + HiveASTParser.TOK_INTERVAL_HOUR_LITERAL + "%|" + HiveASTParser.TOK_INTERVAL_MINUTE_LITERAL + "%|" + HiveASTParser.TOK_INTERVAL_SECOND_LITERAL + "%"), tf.getIntervalExprProcessor());
opRules.put(new RuleRegExp("R7", HiveASTParser.TOK_TABLE_OR_COL + "%"), tf.getColumnExprProcessor());
opRules.put(new RuleRegExp("R8", HiveASTParser.TOK_SUBQUERY_EXPR + "%"), tf.getSubQueryExprProcessor());
// The dispatcher fires the processor corresponding to the closest matching
// rule and passes the context along
Dispatcher disp = new DefaultRuleDispatcher(tf.getDefaultExprProcessor(), opRules, tcCtx);
GraphWalker ogw = new HiveParserExpressionWalker(disp);
// Create a list of top nodes
ArrayList<Node> topNodes = new ArrayList<>(Collections.singleton(expr));
HashMap<Node, Object> nodeOutputs = new LinkedHashMap<>();
ogw.startWalking(topNodes, nodeOutputs);
return convert(nodeOutputs);
}
Aggregations