use of org.apache.hadoop.hive.ql.lib.NodeProcessor in project hive by apache.
the class TablePropertyEnrichmentOptimizer method transform.
@Override
public ParseContext transform(ParseContext pctx) throws SemanticException {
LOG.info("TablePropertyEnrichmentOptimizer::transform().");
Map<Rule, NodeProcessor> opRules = Maps.newLinkedHashMap();
opRules.put(new RuleRegExp("R1", TableScanOperator.getOperatorName() + "%"), new Processor());
WalkerCtx context = new WalkerCtx(pctx.getConf());
Dispatcher disp = new DefaultRuleDispatcher(null, opRules, context);
List<Node> topNodes = Lists.newArrayList();
topNodes.addAll(pctx.getTopOps().values());
GraphWalker walker = new PreOrderWalker(disp);
walker.startWalking(topNodes, null);
LOG.info("TablePropertyEnrichmentOptimizer::transform() complete!");
return pctx;
}
use of org.apache.hadoop.hive.ql.lib.NodeProcessor in project hive by apache.
the class IndexPredicateAnalyzer method analyzePredicate.
/**
* Analyzes a predicate.
*
* @param predicate predicate to be analyzed
*
* @param searchConditions receives conditions produced by analysis
*
* @return residual predicate which could not be translated to
* searchConditions
*/
public ExprNodeDesc analyzePredicate(ExprNodeDesc predicate, final List<IndexSearchCondition> searchConditions) {
Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();
NodeProcessor nodeProcessor = new NodeProcessor() {
@Override
public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException {
// a pure conjunction: reject OR, CASE, etc.
for (Node ancestor : stack) {
if (nd == ancestor) {
break;
}
if (!FunctionRegistry.isOpAnd((ExprNodeDesc) ancestor)) {
return nd;
}
}
return analyzeExpr((ExprNodeGenericFuncDesc) nd, searchConditions, nodeOutputs);
}
};
Dispatcher disp = new DefaultRuleDispatcher(nodeProcessor, opRules, null);
GraphWalker ogw = new DefaultGraphWalker(disp);
ArrayList<Node> topNodes = new ArrayList<Node>();
topNodes.add(predicate);
HashMap<Node, Object> nodeOutput = new HashMap<Node, Object>();
try {
ogw.startWalking(topNodes, nodeOutput);
} catch (SemanticException ex) {
throw new RuntimeException(ex);
}
ExprNodeDesc residualPredicate = (ExprNodeDesc) nodeOutput.get(predicate);
return residualPredicate;
}
use of org.apache.hadoop.hive.ql.lib.NodeProcessor in project hive by apache.
the class LineageInfo method getLineageInfo.
/**
* parses given query and gets the lineage info.
*
* @param query
* @throws ParseException
*/
public void getLineageInfo(String query) throws ParseException, SemanticException {
/*
* Get the AST tree
*/
ASTNode tree = ParseUtils.parse(query, null);
while ((tree.getToken() == null) && (tree.getChildCount() > 0)) {
tree = (ASTNode) tree.getChild(0);
}
/*
* initialize Event Processor and dispatcher.
*/
inputTableList.clear();
OutputTableList.clear();
// create a walker which walks the tree in a DFS manner while maintaining
// the operator stack. The dispatcher
// generates the plan from the operator tree
Map<Rule, NodeProcessor> rules = new LinkedHashMap<Rule, NodeProcessor>();
// The dispatcher fires the processor corresponding to the closest matching
// rule and passes the context along
Dispatcher disp = new DefaultRuleDispatcher(this, rules, null);
GraphWalker ogw = new DefaultGraphWalker(disp);
// Create a list of topop nodes
ArrayList<Node> topNodes = new ArrayList<Node>();
topNodes.add(tree);
ogw.startWalking(topNodes, null);
}
use of org.apache.hadoop.hive.ql.lib.NodeProcessor in project hive by apache.
the class UnionProcessor method transform.
/**
* Transform the query tree. For each union, store the fact whether both the
* sub-queries are map-only
*
* @param pCtx
* the current parse context
*/
public ParseContext transform(ParseContext pCtx) throws SemanticException {
// create a walker which walks the tree in a BFS manner while maintaining
// the operator stack.
Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();
opRules.put(new RuleRegExp("R1", ReduceSinkOperator.getOperatorName() + "%.*" + UnionOperator.getOperatorName() + "%"), UnionProcFactory.getMapRedUnion());
opRules.put(new RuleRegExp("R2", UnionOperator.getOperatorName() + "%.*" + UnionOperator.getOperatorName() + "%"), UnionProcFactory.getUnknownUnion());
opRules.put(new RuleRegExp("R3", TableScanOperator.getOperatorName() + "%.*" + UnionOperator.getOperatorName() + "%"), UnionProcFactory.getMapUnion());
// The dispatcher fires the processor for the matching rule and passes the
// context along
UnionProcContext uCtx = new UnionProcContext();
uCtx.setParseContext(pCtx);
Dispatcher disp = new DefaultRuleDispatcher(UnionProcFactory.getNoUnion(), opRules, uCtx);
LevelOrderWalker ogw = new LevelOrderWalker(disp);
ogw.setNodeTypes(UnionOperator.class);
// Create a list of topop nodes
ArrayList<Node> topNodes = new ArrayList<Node>();
topNodes.addAll(pCtx.getTopOps().values());
ogw.startWalking(topNodes, null);
pCtx.setUCtx(uCtx);
// Walk the tree again to see if the union can be removed completely
HiveConf conf = pCtx.getConf();
opRules.clear();
if (conf.getBoolVar(HiveConf.ConfVars.HIVE_OPTIMIZE_UNION_REMOVE) && !conf.getVar(HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("spark")) {
opRules.put(new RuleRegExp("R5", UnionOperator.getOperatorName() + "%" + ".*" + FileSinkOperator.getOperatorName() + "%"), UnionProcFactory.getUnionNoProcessFile());
disp = new DefaultRuleDispatcher(UnionProcFactory.getNoUnion(), opRules, uCtx);
ogw = new LevelOrderWalker(disp);
ogw.setNodeTypes(FileSinkOperator.class);
// Create a list of topop nodes
topNodes.clear();
topNodes.addAll(pCtx.getTopOps().values());
ogw.startWalking(topNodes, null);
}
return pCtx;
}
use of org.apache.hadoop.hive.ql.lib.NodeProcessor in project hive by apache.
the class CorrelationOptimizer method transform.
/**
* Detect correlations and transform the query tree.
*
* @param pactx
* current parse context
* @throws SemanticException
*/
public ParseContext transform(ParseContext pctx) throws SemanticException {
pCtx = pctx;
if (HiveConf.getBoolVar(pCtx.getConf(), HiveConf.ConfVars.HIVECONVERTJOIN)) {
findPossibleAutoConvertedJoinOperators();
}
// detect correlations
CorrelationNodeProcCtx corrCtx = new CorrelationNodeProcCtx(pCtx);
Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();
opRules.put(new RuleRegExp("R1", ReduceSinkOperator.getOperatorName() + "%"), new CorrelationNodeProc());
Dispatcher disp = new DefaultRuleDispatcher(getDefaultProc(), opRules, corrCtx);
GraphWalker ogw = new DefaultGraphWalker(disp);
// Create a list of topOp nodes
List<Node> topNodes = new ArrayList<Node>();
topNodes.addAll(pCtx.getTopOps().values());
ogw.startWalking(topNodes, null);
// We have finished tree walking (correlation detection).
// We will first see if we need to abort (the operator tree has not been changed).
// If not, we will start to transform the operator tree.
abort = corrCtx.isAbort();
if (abort) {
LOG.info("Abort. Reasons are ...");
for (String reason : corrCtx.getAbortReasons()) {
LOG.info("-- " + reason);
}
} else {
// transform the operator tree
LOG.info("Begain query plan transformation based on intra-query correlations. " + corrCtx.getCorrelations().size() + " correlation(s) to be applied");
for (IntraQueryCorrelation correlation : corrCtx.getCorrelations()) {
QueryPlanTreeTransformation.applyCorrelation(pCtx, corrCtx, correlation);
}
}
return pCtx;
}
Aggregations