use of org.apache.hadoop.hive.ql.lib.Dispatcher in project hive by apache.
the class RedundantDynamicPruningConditionsRemoval method transform.
/**
* Transform the query tree.
*
* @param pctx the current parse context
*/
@Override
public ParseContext transform(ParseContext pctx) throws SemanticException {
// Make sure semijoin is not enabled. If it is, then do not remove the dynamic partition pruning predicates.
if (!pctx.getConf().getBoolVar(HiveConf.ConfVars.TEZ_DYNAMIC_SEMIJOIN_REDUCTION)) {
Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();
opRules.put(new RuleRegExp("R1", TableScanOperator.getOperatorName() + "%" + FilterOperator.getOperatorName() + "%"), new FilterTransformer());
Dispatcher disp = new DefaultRuleDispatcher(null, opRules, null);
GraphWalker ogw = new DefaultGraphWalker(disp);
List<Node> topNodes = new ArrayList<Node>();
topNodes.addAll(pctx.getTopOps().values());
ogw.startWalking(topNodes, null);
}
return pctx;
}
use of org.apache.hadoop.hive.ql.lib.Dispatcher in project hive by apache.
the class TablePropertyEnrichmentOptimizer method transform.
@Override
public ParseContext transform(ParseContext pctx) throws SemanticException {
LOG.info("TablePropertyEnrichmentOptimizer::transform().");
Map<Rule, NodeProcessor> opRules = Maps.newLinkedHashMap();
opRules.put(new RuleRegExp("R1", TableScanOperator.getOperatorName() + "%"), new Processor());
WalkerCtx context = new WalkerCtx(pctx.getConf());
Dispatcher disp = new DefaultRuleDispatcher(null, opRules, context);
List<Node> topNodes = Lists.newArrayList();
topNodes.addAll(pctx.getTopOps().values());
GraphWalker walker = new PreOrderWalker(disp);
walker.startWalking(topNodes, null);
LOG.info("TablePropertyEnrichmentOptimizer::transform() complete!");
return pctx;
}
use of org.apache.hadoop.hive.ql.lib.Dispatcher in project hive by apache.
the class IndexPredicateAnalyzer method analyzePredicate.
/**
* Analyzes a predicate.
*
* @param predicate predicate to be analyzed
*
* @param searchConditions receives conditions produced by analysis
*
* @return residual predicate which could not be translated to
* searchConditions
*/
public ExprNodeDesc analyzePredicate(ExprNodeDesc predicate, final List<IndexSearchCondition> searchConditions) {
Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();
NodeProcessor nodeProcessor = new NodeProcessor() {
@Override
public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException {
// a pure conjunction: reject OR, CASE, etc.
for (Node ancestor : stack) {
if (nd == ancestor) {
break;
}
if (!FunctionRegistry.isOpAnd((ExprNodeDesc) ancestor)) {
return nd;
}
}
return analyzeExpr((ExprNodeGenericFuncDesc) nd, searchConditions, nodeOutputs);
}
};
Dispatcher disp = new DefaultRuleDispatcher(nodeProcessor, opRules, null);
GraphWalker ogw = new DefaultGraphWalker(disp);
ArrayList<Node> topNodes = new ArrayList<Node>();
topNodes.add(predicate);
HashMap<Node, Object> nodeOutput = new HashMap<Node, Object>();
try {
ogw.startWalking(topNodes, nodeOutput);
} catch (SemanticException ex) {
throw new RuntimeException(ex);
}
ExprNodeDesc residualPredicate = (ExprNodeDesc) nodeOutput.get(predicate);
return residualPredicate;
}
use of org.apache.hadoop.hive.ql.lib.Dispatcher in project hive by apache.
the class AccumuloPredicateHandler method generateRanges.
/**
* Encapsulates the traversal over some {@link ExprNodeDesc} tree for the generation of Accumuluo.
* Ranges using expressions involving the Accumulo rowid-mapped Hive column.
*
* @param conf
* Hadoop configuration
* @param columnMapper
* Mapping of Hive to Accumulo columns for the query
* @param hiveRowIdColumnName
* Name of the hive column mapped to the Accumulo rowid
* @param root
* Root of some ExprNodeDesc tree to traverse, the WHERE clause
* @return An object representing the result from the ExprNodeDesc tree traversal using the
* AccumuloRangeGenerator
*/
protected Object generateRanges(Configuration conf, ColumnMapper columnMapper, String hiveRowIdColumnName, ExprNodeDesc root) {
AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(conf, handler, columnMapper.getRowIdMapping(), hiveRowIdColumnName);
Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator, Collections.<Rule, NodeProcessor>emptyMap(), null);
GraphWalker ogw = new DefaultGraphWalker(disp);
List<Node> roots = new ArrayList<Node>();
roots.add(root);
HashMap<Node, Object> nodeOutput = new HashMap<Node, Object>();
try {
ogw.startWalking(roots, nodeOutput);
} catch (SemanticException ex) {
throw new RuntimeException(ex);
}
return nodeOutput.get(root);
}
use of org.apache.hadoop.hive.ql.lib.Dispatcher in project hive by apache.
the class LineageInfo method getLineageInfo.
/**
* parses given query and gets the lineage info.
*
* @param query
* @throws ParseException
*/
public void getLineageInfo(String query) throws ParseException, SemanticException {
/*
* Get the AST tree
*/
ASTNode tree = ParseUtils.parse(query, null);
while ((tree.getToken() == null) && (tree.getChildCount() > 0)) {
tree = (ASTNode) tree.getChild(0);
}
/*
* initialize Event Processor and dispatcher.
*/
inputTableList.clear();
OutputTableList.clear();
// create a walker which walks the tree in a DFS manner while maintaining
// the operator stack. The dispatcher
// generates the plan from the operator tree
Map<Rule, NodeProcessor> rules = new LinkedHashMap<Rule, NodeProcessor>();
// The dispatcher fires the processor corresponding to the closest matching
// rule and passes the context along
Dispatcher disp = new DefaultRuleDispatcher(this, rules, null);
GraphWalker ogw = new DefaultGraphWalker(disp);
// Create a list of topop nodes
ArrayList<Node> topNodes = new ArrayList<Node>();
topNodes.add(tree);
ogw.startWalking(topNodes, null);
}
Aggregations