use of org.apache.hadoop.hive.ql.lib.Dispatcher in project hive by apache.
the class IndexWhereTaskDispatcher method dispatch.
@Override
public Object dispatch(Node nd, Stack<Node> stack, Object... nodeOutputs) throws SemanticException {
Task<? extends Serializable> task = (Task<? extends Serializable>) nd;
ParseContext pctx = physicalContext.getParseContext();
// create the regex's so the walker can recognize our WHERE queries
Map<Rule, NodeProcessor> operatorRules = createOperatorRules(pctx);
// check for no indexes on any table
if (operatorRules == null) {
return null;
}
// create context so the walker can carry the current task with it.
IndexWhereProcCtx indexWhereOptimizeCtx = new IndexWhereProcCtx(task, pctx);
// create the dispatcher, which fires the processor according to the rule that
// best matches
Dispatcher dispatcher = new DefaultRuleDispatcher(getDefaultProcessor(), operatorRules, indexWhereOptimizeCtx);
// walk the mapper operator(not task) tree for each specific task
GraphWalker ogw = new DefaultGraphWalker(dispatcher);
ArrayList<Node> topNodes = new ArrayList<Node>();
if (task.getWork() instanceof MapredWork) {
topNodes.addAll(((MapredWork) task.getWork()).getMapWork().getAliasToWork().values());
} else {
return null;
}
ogw.startWalking(topNodes, null);
return null;
}
use of org.apache.hadoop.hive.ql.lib.Dispatcher in project hive by apache.
the class AnnotateWithStatistics method transform.
@Override
public ParseContext transform(ParseContext pctx) throws SemanticException {
AnnotateStatsProcCtx aspCtx = new AnnotateStatsProcCtx(pctx);
// create a walker which walks the tree in a BFS manner while maintaining the
// operator stack. The dispatcher generates the plan from the operator tree
Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();
opRules.put(new RuleRegExp("TS", TableScanOperator.getOperatorName() + "%"), StatsRulesProcFactory.getTableScanRule());
opRules.put(new RuleRegExp("SEL", SelectOperator.getOperatorName() + "%"), StatsRulesProcFactory.getSelectRule());
opRules.put(new RuleRegExp("FIL", FilterOperator.getOperatorName() + "%"), StatsRulesProcFactory.getFilterRule());
opRules.put(new RuleRegExp("GBY", GroupByOperator.getOperatorName() + "%"), StatsRulesProcFactory.getGroupByRule());
opRules.put(new RuleRegExp("JOIN", CommonJoinOperator.getOperatorName() + "%|" + MapJoinOperator.getOperatorName() + "%"), StatsRulesProcFactory.getJoinRule());
opRules.put(new RuleRegExp("LIM", LimitOperator.getOperatorName() + "%"), StatsRulesProcFactory.getLimitRule());
opRules.put(new RuleRegExp("RS", ReduceSinkOperator.getOperatorName() + "%"), StatsRulesProcFactory.getReduceSinkRule());
// The dispatcher fires the processor corresponding to the closest matching
// rule and passes the context along
Dispatcher disp = new DefaultRuleDispatcher(StatsRulesProcFactory.getDefaultRule(), opRules, aspCtx);
GraphWalker ogw = new LevelOrderWalker(disp, 0);
// Create a list of topop nodes
ArrayList<Node> topNodes = new ArrayList<Node>();
topNodes.addAll(pctx.getTopOps().values());
ogw.startWalking(topNodes, null);
return pctx;
}
use of org.apache.hadoop.hive.ql.lib.Dispatcher in project hive by apache.
the class AccumuloPredicateHandler method generateRanges.
/**
* Encapsulates the traversal over some {@link ExprNodeDesc} tree for the generation of Accumuluo
* Ranges using expressions involving the Accumulo rowid-mapped Hive column
*
* @param columnMapper
* Mapping of Hive to Accumulo columns for the query
* @param hiveRowIdColumnName
* Name of the hive column mapped to the Accumulo rowid
* @param root
* Root of some ExprNodeDesc tree to traverse, the WHERE clause
* @return An object representing the result from the ExprNodeDesc tree traversal using the
* AccumuloRangeGenerator
*/
protected Object generateRanges(ColumnMapper columnMapper, String hiveRowIdColumnName, ExprNodeDesc root) {
AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(handler, columnMapper.getRowIdMapping(), hiveRowIdColumnName);
Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator, Collections.<Rule, NodeProcessor>emptyMap(), null);
GraphWalker ogw = new DefaultGraphWalker(disp);
ArrayList<Node> roots = new ArrayList<Node>();
roots.add(root);
HashMap<Node, Object> nodeOutput = new HashMap<Node, Object>();
try {
ogw.startWalking(roots, nodeOutput);
} catch (SemanticException ex) {
throw new RuntimeException(ex);
}
return nodeOutput.get(root);
}
use of org.apache.hadoop.hive.ql.lib.Dispatcher in project hive by apache.
the class IndexWhereResolver method resolve.
@Override
public PhysicalContext resolve(PhysicalContext physicalContext) throws SemanticException {
Dispatcher dispatcher = new IndexWhereTaskDispatcher(physicalContext);
GraphWalker opGraphWalker = new DefaultGraphWalker(dispatcher);
ArrayList<Node> topNodes = new ArrayList<Node>();
topNodes.addAll(physicalContext.getRootTasks());
opGraphWalker.startWalking(topNodes, null);
return physicalContext;
}
use of org.apache.hadoop.hive.ql.lib.Dispatcher in project hive by apache.
the class LlapDecider method resolve.
@Override
public PhysicalContext resolve(PhysicalContext pctx) throws SemanticException {
this.conf = pctx.getConf();
this.mode = LlapMode.valueOf(HiveConf.getVar(conf, HiveConf.ConfVars.LLAP_EXECUTION_MODE));
Preconditions.checkState(this.mode != null, "Unrecognized LLAP mode configuration: " + HiveConf.getVar(conf, HiveConf.ConfVars.LLAP_EXECUTION_MODE));
LOG.info("llap mode: " + this.mode);
if (mode == none) {
LOG.info("LLAP disabled.");
return pctx;
}
// create dispatcher and graph walker
Dispatcher disp = new LlapDecisionDispatcher(pctx, mode);
TaskGraphWalker ogw = new TaskGraphWalker(disp);
// get all the tasks nodes from root task
ArrayList<Node> topNodes = new ArrayList<Node>();
topNodes.addAll(pctx.getRootTasks());
// begin to walk through the task tree.
ogw.startWalking(topNodes, null);
return pctx;
}
Aggregations