use of org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher in project hive by apache.
the class BucketingSortingReduceSinkOptimizer method transform.
@Override
public ParseContext transform(ParseContext pctx) throws SemanticException {
Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();
// process reduce sink added by hive.enforce.bucketing or hive.enforce.sorting
opRules.put(new RuleRegExp("R1", ReduceSinkOperator.getOperatorName() + "%" + SelectOperator.getOperatorName() + "%" + FileSinkOperator.getOperatorName() + "%"), getBucketSortReduceSinkProc(pctx));
// The dispatcher fires the processor corresponding to the closest matching rule
Dispatcher disp = new DefaultRuleDispatcher(getDefaultProc(), opRules, null);
GraphWalker ogw = new DefaultGraphWalker(disp);
// Create a list of top nodes
ArrayList<Node> topNodes = new ArrayList<Node>();
topNodes.addAll(pctx.getTopOps().values());
ogw.startWalking(topNodes, null);
return pctx;
}
use of org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher in project hive by apache.
the class ConstantPropagate method transform.
/**
* Transform the query tree.
*
* @param pactx
* the current parse context
*/
@Override
public ParseContext transform(ParseContext pactx) throws SemanticException {
pGraphContext = pactx;
// generate pruned column list for all relevant operators
ConstantPropagateProcCtx cppCtx = new ConstantPropagateProcCtx(constantPropagateOption);
// create a walker which walks the tree in a DFS manner while maintaining
// the operator stack. The dispatcher
// generates the plan from the operator tree
Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();
opRules.put(new RuleRegExp("R1", FilterOperator.getOperatorName() + "%"), ConstantPropagateProcFactory.getFilterProc());
opRules.put(new RuleRegExp("R2", GroupByOperator.getOperatorName() + "%"), ConstantPropagateProcFactory.getGroupByProc());
opRules.put(new RuleRegExp("R3", SelectOperator.getOperatorName() + "%"), ConstantPropagateProcFactory.getSelectProc());
opRules.put(new RuleRegExp("R4", FileSinkOperator.getOperatorName() + "%"), ConstantPropagateProcFactory.getFileSinkProc());
opRules.put(new RuleRegExp("R5", ReduceSinkOperator.getOperatorName() + "%"), ConstantPropagateProcFactory.getReduceSinkProc());
opRules.put(new RuleRegExp("R6", JoinOperator.getOperatorName() + "%"), ConstantPropagateProcFactory.getJoinProc());
opRules.put(new RuleRegExp("R7", TableScanOperator.getOperatorName() + "%"), ConstantPropagateProcFactory.getTableScanProc());
opRules.put(new RuleRegExp("R8", ScriptOperator.getOperatorName() + "%"), ConstantPropagateProcFactory.getStopProc());
// The dispatcher fires the processor corresponding to the closest matching
// rule and passes the context along
Dispatcher disp = new DefaultRuleDispatcher(ConstantPropagateProcFactory.getDefaultProc(), opRules, cppCtx);
GraphWalker ogw = new ConstantPropagateWalker(disp);
// Create a list of operator nodes to start the walking.
ArrayList<Node> topNodes = new ArrayList<Node>();
topNodes.addAll(pGraphContext.getTopOps().values());
ogw.startWalking(topNodes, null);
for (Operator<? extends Serializable> opToDelete : cppCtx.getOpToDelete()) {
if (opToDelete.getParentOperators() == null || opToDelete.getParentOperators().size() != 1) {
throw new RuntimeException("Error pruning operator " + opToDelete + ". It should have only 1 parent.");
}
opToDelete.getParentOperators().get(0).removeChildAndAdoptItsChildren(opToDelete);
}
cppCtx.getOpToDelete().clear();
return pGraphContext;
}
use of org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher in project hive by apache.
the class SparkReduceSinkMapJoinProc method hasGroupBy.
private boolean hasGroupBy(Operator<? extends OperatorDesc> mapjoinOp, GenSparkProcContext context) throws SemanticException {
List<Operator<? extends OperatorDesc>> childOps = mapjoinOp.getChildOperators();
Map<Rule, NodeProcessor> rules = new LinkedHashMap<Rule, NodeProcessor>();
SparkMapJoinFollowedByGroupByProcessor processor = new SparkMapJoinFollowedByGroupByProcessor();
rules.put(new RuleRegExp("GBY", GroupByOperator.getOperatorName() + "%"), processor);
Dispatcher disp = new DefaultRuleDispatcher(null, rules, context);
GraphWalker ogw = new DefaultGraphWalker(disp);
ArrayList<Node> topNodes = new ArrayList<Node>();
topNodes.addAll(childOps);
ogw.startWalking(topNodes, null);
return processor.getHasGroupBy();
}
use of org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher in project hive by apache.
the class NullScanTaskDispatcher method dispatch.
@Override
public Object dispatch(Node nd, Stack<Node> stack, Object... nodeOutputs) throws SemanticException {
Task<? extends Serializable> task = (Task<? extends Serializable>) nd;
// create a the context for walking operators
ParseContext parseContext = physicalContext.getParseContext();
WalkerCtx walkerCtx = new WalkerCtx();
List<MapWork> mapWorks = new ArrayList<MapWork>(task.getMapWork());
Collections.sort(mapWorks, new Comparator<MapWork>() {
@Override
public int compare(MapWork o1, MapWork o2) {
return o1.getName().compareTo(o2.getName());
}
});
for (MapWork mapWork : mapWorks) {
LOG.debug("Looking at: " + mapWork.getName());
Collection<Operator<? extends OperatorDesc>> topOperators = mapWork.getAliasToWork().values();
if (topOperators.size() == 0) {
LOG.debug("No top operators");
return null;
}
LOG.debug("Looking for table scans where optimization is applicable");
// The dispatcher fires the processor corresponding to the closest
// matching rule and passes the context along
Dispatcher disp = new DefaultRuleDispatcher(null, rules, walkerCtx);
GraphWalker ogw = new PreOrderOnceWalker(disp);
// Create a list of topOp nodes
ArrayList<Node> topNodes = new ArrayList<Node>();
// Get the top Nodes for this task
for (Operator<? extends OperatorDesc> workOperator : topOperators) {
if (parseContext.getTopOps().values().contains(workOperator)) {
topNodes.add(workOperator);
}
}
Operator<? extends OperatorDesc> reducer = task.getReducer(mapWork);
if (reducer != null) {
topNodes.add(reducer);
}
ogw.startWalking(topNodes, null);
LOG.debug(String.format("Found %d null table scans", walkerCtx.getMetadataOnlyTableScans().size()));
if (walkerCtx.getMetadataOnlyTableScans().size() > 0)
processAlias(mapWork, walkerCtx.getMetadataOnlyTableScans());
}
return null;
}
use of org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher in project SQLWindowing by hbutani.
the class WindowingTypeCheckProcFactory method genExprNode.
public static HashMap<Node, Object> genExprNode(ASTNode expr, TypeCheckCtx tcCtx) throws SemanticException {
// Create the walker, the rules dispatcher and the context.
// create a walker which walks the tree in a DFS manner while
// maintaining
// the operator stack. The dispatcher
// generates the plan from the operator tree
Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();
opRules.put(new RuleRegExp("R1", Windowing2Parser.NULL + "%"), getNullExprProcessor());
opRules.put(new RuleRegExp("R2", Windowing2Parser.Number + "%|" + Windowing2Parser.TinyintLiteral + "%|" + Windowing2Parser.SmallintLiteral + "%|" + Windowing2Parser.BigintLiteral + "%"), getNumExprProcessor());
opRules.put(new RuleRegExp("R3", Windowing2Parser.Identifier + "%|" + Windowing2Parser.StringLiteral + "%|" + Windowing2Parser.CHARSETLITERAL + "%|" + Windowing2Parser.STRINGLITERALSEQUENCE + "%|" + "%|" + Windowing2Parser.IF + "%|" + Windowing2Parser.CASE + "%|" + Windowing2Parser.WHEN + "%|" + Windowing2Parser.IN + "%|" + Windowing2Parser.ARRAY + "%|" + Windowing2Parser.MAP + "%|" + Windowing2Parser.BETWEEN + "%|" + Windowing2Parser.STRUCT + "%"), getStrExprProcessor());
opRules.put(new RuleRegExp("R4", Windowing2Parser.TRUE + "%|" + Windowing2Parser.FALSE + "%"), getBoolExprProcessor());
opRules.put(new RuleRegExp("R5", Windowing2Parser.TABLEORCOL + "%"), getColumnExprProcessor());
// The dispatcher fires the processor corresponding to the closest
// matching
// rule and passes the context along
Dispatcher disp = new DefaultRuleDispatcher(getDefaultExprProcessor(), opRules, tcCtx);
GraphWalker ogw = new DefaultGraphWalker(disp);
// Create a list of topop nodes
ArrayList<Node> topNodes = new ArrayList<Node>();
topNodes.add(expr);
HashMap<Node, Object> nodeOutputs = new HashMap<Node, Object>();
ogw.startWalking(topNodes, nodeOutputs);
return nodeOutputs;
}
Aggregations