Search in sources :

Example 56 with DefaultRuleDispatcher

use of org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher in project hive by apache.

the class ExprProcFactory method getExprDependency.

/**
   * Gets the expression dependencies for the expression.
   *
   * @param lctx
   *          The lineage context containing the input operators dependencies.
   * @param inpOp
   *          The input operator to the current operator.
   * @param expr
   *          The expression that is being processed.
   * @throws SemanticException
   */
public static Dependency getExprDependency(LineageCtx lctx, Operator<? extends OperatorDesc> inpOp, ExprNodeDesc expr) throws SemanticException {
    // Create the walker, the rules dispatcher and the context.
    ExprProcCtx exprCtx = new ExprProcCtx(lctx, inpOp);
    // create a walker which walks the tree in a DFS manner while maintaining
    // the operator stack. The dispatcher
    // generates the plan from the operator tree
    Map<Rule, NodeProcessor> exprRules = new LinkedHashMap<Rule, NodeProcessor>();
    exprRules.put(new RuleRegExp("R1", ExprNodeColumnDesc.class.getName() + "%"), getColumnProcessor());
    exprRules.put(new RuleRegExp("R2", ExprNodeFieldDesc.class.getName() + "%"), getFieldProcessor());
    exprRules.put(new RuleRegExp("R3", ExprNodeGenericFuncDesc.class.getName() + "%"), getGenericFuncProcessor());
    // The dispatcher fires the processor corresponding to the closest matching
    // rule and passes the context along
    Dispatcher disp = new DefaultRuleDispatcher(getDefaultExprProcessor(), exprRules, exprCtx);
    GraphWalker egw = new DefaultGraphWalker(disp);
    List<Node> startNodes = new ArrayList<Node>();
    startNodes.add(expr);
    HashMap<Node, Object> outputMap = new HashMap<Node, Object>();
    egw.startWalking(startNodes, outputMap);
    return (Dependency) outputMap.get(expr);
}
Also used : NodeProcessor(org.apache.hadoop.hive.ql.lib.NodeProcessor) DefaultRuleDispatcher(org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) DefaultGraphWalker(org.apache.hadoop.hive.ql.lib.DefaultGraphWalker) Node(org.apache.hadoop.hive.ql.lib.Node) RuleRegExp(org.apache.hadoop.hive.ql.lib.RuleRegExp) ArrayList(java.util.ArrayList) ExprNodeGenericFuncDesc(org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc) Dependency(org.apache.hadoop.hive.ql.hooks.LineageInfo.Dependency) Dispatcher(org.apache.hadoop.hive.ql.lib.Dispatcher) DefaultRuleDispatcher(org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher) LinkedHashMap(java.util.LinkedHashMap) ExprNodeFieldDesc(org.apache.hadoop.hive.ql.plan.ExprNodeFieldDesc) ExprNodeColumnDesc(org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc) Rule(org.apache.hadoop.hive.ql.lib.Rule) DefaultGraphWalker(org.apache.hadoop.hive.ql.lib.DefaultGraphWalker) GraphWalker(org.apache.hadoop.hive.ql.lib.GraphWalker)

Example 57 with DefaultRuleDispatcher

use of org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher in project hive by apache.

the class Generator method transform.

/* (non-Javadoc)
   * @see org.apache.hadoop.hive.ql.optimizer.Transform#transform(org.apache.hadoop.hive.ql.parse.ParseContext)
   */
@Override
public ParseContext transform(ParseContext pctx) throws SemanticException {
    Index index = SessionState.get() != null ? SessionState.get().getLineageState().getIndex() : new Index();
    // Create the lineage context
    LineageCtx lCtx = new LineageCtx(pctx, index);
    Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();
    opRules.put(new RuleRegExp("R1", TableScanOperator.getOperatorName() + "%"), OpProcFactory.getTSProc());
    opRules.put(new RuleRegExp("R2", ScriptOperator.getOperatorName() + "%"), OpProcFactory.getTransformProc());
    opRules.put(new RuleRegExp("R3", UDTFOperator.getOperatorName() + "%"), OpProcFactory.getTransformProc());
    opRules.put(new RuleRegExp("R4", SelectOperator.getOperatorName() + "%"), OpProcFactory.getSelProc());
    opRules.put(new RuleRegExp("R5", GroupByOperator.getOperatorName() + "%"), OpProcFactory.getGroupByProc());
    opRules.put(new RuleRegExp("R6", UnionOperator.getOperatorName() + "%"), OpProcFactory.getUnionProc());
    opRules.put(new RuleRegExp("R7", CommonJoinOperator.getOperatorName() + "%|" + MapJoinOperator.getOperatorName() + "%"), OpProcFactory.getJoinProc());
    opRules.put(new RuleRegExp("R8", ReduceSinkOperator.getOperatorName() + "%"), OpProcFactory.getReduceSinkProc());
    opRules.put(new RuleRegExp("R9", LateralViewJoinOperator.getOperatorName() + "%"), OpProcFactory.getLateralViewJoinProc());
    opRules.put(new RuleRegExp("R10", PTFOperator.getOperatorName() + "%"), OpProcFactory.getTransformProc());
    opRules.put(new RuleRegExp("R11", FilterOperator.getOperatorName() + "%"), OpProcFactory.getFilterProc());
    // The dispatcher fires the processor corresponding to the closest matching rule and passes the context along
    Dispatcher disp = new DefaultRuleDispatcher(OpProcFactory.getDefaultProc(), opRules, lCtx);
    GraphWalker ogw = new LevelOrderWalker(disp, 2);
    // Create a list of topop nodes
    ArrayList<Node> topNodes = new ArrayList<Node>();
    topNodes.addAll(pctx.getTopOps().values());
    ogw.startWalking(topNodes, null);
    return pctx;
}
Also used : NodeProcessor(org.apache.hadoop.hive.ql.lib.NodeProcessor) DefaultRuleDispatcher(org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher) Node(org.apache.hadoop.hive.ql.lib.Node) RuleRegExp(org.apache.hadoop.hive.ql.lib.RuleRegExp) ArrayList(java.util.ArrayList) Index(org.apache.hadoop.hive.ql.optimizer.lineage.LineageCtx.Index) Dispatcher(org.apache.hadoop.hive.ql.lib.Dispatcher) DefaultRuleDispatcher(org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher) LinkedHashMap(java.util.LinkedHashMap) Rule(org.apache.hadoop.hive.ql.lib.Rule) LevelOrderWalker(org.apache.hadoop.hive.ql.lib.LevelOrderWalker) GraphWalker(org.apache.hadoop.hive.ql.lib.GraphWalker)

Example 58 with DefaultRuleDispatcher

use of org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher in project hive by apache.

the class PcrExprProcFactory method walkExprTree.

/**
   * Remove partition conditions when necessary from the the expression tree.
   *
   * @param tabAlias
   *          the table alias
   * @param parts
   *          the list of all pruned partitions for the table
   * @param vcs
   *          virtual columns referenced
   * @param pred
   *          expression tree of the target filter operator
   * @return the node information of the root expression
   * @throws SemanticException
   */
public static NodeInfoWrapper walkExprTree(String tabAlias, ArrayList<Partition> parts, List<VirtualColumn> vcs, ExprNodeDesc pred) throws SemanticException {
    // Create the walker, the rules dispatcher and the context.
    PcrExprProcCtx pprCtx = new PcrExprProcCtx(tabAlias, parts, vcs);
    Map<Rule, NodeProcessor> exprRules = new LinkedHashMap<Rule, NodeProcessor>();
    exprRules.put(new RuleRegExp("R1", ExprNodeColumnDesc.class.getName() + "%"), getColumnProcessor());
    exprRules.put(new RuleRegExp("R2", ExprNodeFieldDesc.class.getName() + "%"), getFieldProcessor());
    exprRules.put(new RuleRegExp("R5", ExprNodeGenericFuncDesc.class.getName() + "%"), getGenericFuncProcessor());
    // The dispatcher fires the processor corresponding to the closest matching
    // rule and passes the context along
    Dispatcher disp = new DefaultRuleDispatcher(getDefaultExprProcessor(), exprRules, pprCtx);
    GraphWalker egw = new DefaultGraphWalker(disp);
    List<Node> startNodes = new ArrayList<Node>();
    startNodes.add(pred);
    HashMap<Node, Object> outputMap = new HashMap<Node, Object>();
    egw.startWalking(startNodes, outputMap);
    // Return the wrapper of the root node
    return (NodeInfoWrapper) outputMap.get(pred);
}
Also used : NodeProcessor(org.apache.hadoop.hive.ql.lib.NodeProcessor) DefaultRuleDispatcher(org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) DefaultGraphWalker(org.apache.hadoop.hive.ql.lib.DefaultGraphWalker) Node(org.apache.hadoop.hive.ql.lib.Node) RuleRegExp(org.apache.hadoop.hive.ql.lib.RuleRegExp) ArrayList(java.util.ArrayList) ExprNodeGenericFuncDesc(org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc) Dispatcher(org.apache.hadoop.hive.ql.lib.Dispatcher) DefaultRuleDispatcher(org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher) LinkedHashMap(java.util.LinkedHashMap) ExprNodeFieldDesc(org.apache.hadoop.hive.ql.plan.ExprNodeFieldDesc) ExprNodeColumnDesc(org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc) Rule(org.apache.hadoop.hive.ql.lib.Rule) DefaultGraphWalker(org.apache.hadoop.hive.ql.lib.DefaultGraphWalker) GraphWalker(org.apache.hadoop.hive.ql.lib.GraphWalker)

Example 59 with DefaultRuleDispatcher

use of org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher in project hive by apache.

the class BucketingSortingInferenceOptimizer method inferBucketingSorting.

/**
   * For each map reduce task, if it has a reducer, infer whether or not the final output of the
   * reducer is bucketed and/or sorted
   *
   * @param mapRedTasks
   * @throws SemanticException
   */
private void inferBucketingSorting(List<ExecDriver> mapRedTasks) throws SemanticException {
    for (ExecDriver mapRedTask : mapRedTasks) {
        // of the outputs of intermediate map reduce jobs.
        if (!mapRedTask.getWork().isFinalMapRed()) {
            continue;
        }
        if (mapRedTask.getWork().getReduceWork() == null) {
            continue;
        }
        Operator<? extends OperatorDesc> reducer = mapRedTask.getWork().getReduceWork().getReducer();
        // uses sampling, which means it's not bucketed
        boolean disableBucketing = mapRedTask.getWork().getMapWork().getSamplingType() > 0;
        BucketingSortingCtx bCtx = new BucketingSortingCtx(disableBucketing);
        // RuleRegExp rules are used to match operators anywhere in the tree
        // RuleExactMatch rules are used to specify exactly what the tree should look like
        // In particular, this guarantees that the first operator is the reducer
        // (and its parent(s) are ReduceSinkOperators) since it begins walking the tree from
        // the reducer.
        Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();
        opRules.put(new RuleRegExp("R1", SelectOperator.getOperatorName() + "%"), BucketingSortingOpProcFactory.getSelProc());
        // Matches only GroupByOperators which are reducers, rather than map group by operators,
        // or multi group by optimization specific operators
        opRules.put(new RuleExactMatch("R2", new String[] { GroupByOperator.getOperatorName() }), BucketingSortingOpProcFactory.getGroupByProc());
        // Matches only JoinOperators which are reducers, rather than map joins, SMB map joins, etc.
        opRules.put(new RuleExactMatch("R3", new String[] { JoinOperator.getOperatorName() }), BucketingSortingOpProcFactory.getJoinProc());
        opRules.put(new RuleRegExp("R5", FileSinkOperator.getOperatorName() + "%"), BucketingSortingOpProcFactory.getFileSinkProc());
        opRules.put(new RuleRegExp("R7", FilterOperator.getOperatorName() + "%"), BucketingSortingOpProcFactory.getFilterProc());
        opRules.put(new RuleRegExp("R8", LimitOperator.getOperatorName() + "%"), BucketingSortingOpProcFactory.getLimitProc());
        opRules.put(new RuleRegExp("R9", LateralViewForwardOperator.getOperatorName() + "%"), BucketingSortingOpProcFactory.getLateralViewForwardProc());
        opRules.put(new RuleRegExp("R10", LateralViewJoinOperator.getOperatorName() + "%"), BucketingSortingOpProcFactory.getLateralViewJoinProc());
        // Matches only ForwardOperators which are preceded by some other operator in the tree,
        // in particular it can't be a reducer (and hence cannot be one of the ForwardOperators
        // added by the multi group by optimization)
        opRules.put(new RuleRegExp("R11", ".+" + ForwardOperator.getOperatorName() + "%"), BucketingSortingOpProcFactory.getForwardProc());
        // Matches only ForwardOperators which are reducers and are followed by GroupByOperators
        // (specific to the multi group by optimization)
        opRules.put(new RuleExactMatch("R12", new String[] { ForwardOperator.getOperatorName(), GroupByOperator.getOperatorName() }), BucketingSortingOpProcFactory.getMultiGroupByProc());
        // The dispatcher fires the processor corresponding to the closest matching rule and passes
        // the context along
        Dispatcher disp = new DefaultRuleDispatcher(BucketingSortingOpProcFactory.getDefaultProc(), opRules, bCtx);
        GraphWalker ogw = new PreOrderWalker(disp);
        // Create a list of topop nodes
        ArrayList<Node> topNodes = new ArrayList<Node>();
        topNodes.add(reducer);
        ogw.startWalking(topNodes, null);
        mapRedTask.getWork().getMapWork().getBucketedColsByDirectory().putAll(bCtx.getBucketedColsByDirectory());
        mapRedTask.getWork().getMapWork().getSortedColsByDirectory().putAll(bCtx.getSortedColsByDirectory());
    }
}
Also used : NodeProcessor(org.apache.hadoop.hive.ql.lib.NodeProcessor) DefaultRuleDispatcher(org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher) Node(org.apache.hadoop.hive.ql.lib.Node) RuleRegExp(org.apache.hadoop.hive.ql.lib.RuleRegExp) ArrayList(java.util.ArrayList) Dispatcher(org.apache.hadoop.hive.ql.lib.Dispatcher) DefaultRuleDispatcher(org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher) LinkedHashMap(java.util.LinkedHashMap) RuleExactMatch(org.apache.hadoop.hive.ql.lib.RuleExactMatch) ExecDriver(org.apache.hadoop.hive.ql.exec.mr.ExecDriver) Rule(org.apache.hadoop.hive.ql.lib.Rule) PreOrderWalker(org.apache.hadoop.hive.ql.lib.PreOrderWalker) GraphWalker(org.apache.hadoop.hive.ql.lib.GraphWalker)

Example 60 with DefaultRuleDispatcher

use of org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher in project hive by apache.

the class CorrelationOptimizer method transform.

/**
   * Detect correlations and transform the query tree.
   *
   * @param pactx
   *          current parse context
   * @throws SemanticException
   */
public ParseContext transform(ParseContext pctx) throws SemanticException {
    pCtx = pctx;
    if (HiveConf.getBoolVar(pCtx.getConf(), HiveConf.ConfVars.HIVECONVERTJOIN)) {
        findPossibleAutoConvertedJoinOperators();
    }
    // detect correlations
    CorrelationNodeProcCtx corrCtx = new CorrelationNodeProcCtx(pCtx);
    Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();
    opRules.put(new RuleRegExp("R1", ReduceSinkOperator.getOperatorName() + "%"), new CorrelationNodeProc());
    Dispatcher disp = new DefaultRuleDispatcher(getDefaultProc(), opRules, corrCtx);
    GraphWalker ogw = new DefaultGraphWalker(disp);
    // Create a list of topOp nodes
    List<Node> topNodes = new ArrayList<Node>();
    topNodes.addAll(pCtx.getTopOps().values());
    ogw.startWalking(topNodes, null);
    // We have finished tree walking (correlation detection).
    // We will first see if we need to abort (the operator tree has not been changed).
    // If not, we will start to transform the operator tree.
    abort = corrCtx.isAbort();
    if (abort) {
        LOG.info("Abort. Reasons are ...");
        for (String reason : corrCtx.getAbortReasons()) {
            LOG.info("-- " + reason);
        }
    } else {
        // transform the operator tree
        LOG.info("Begain query plan transformation based on intra-query correlations. " + corrCtx.getCorrelations().size() + " correlation(s) to be applied");
        for (IntraQueryCorrelation correlation : corrCtx.getCorrelations()) {
            QueryPlanTreeTransformation.applyCorrelation(pCtx, corrCtx, correlation);
        }
    }
    return pCtx;
}
Also used : NodeProcessor(org.apache.hadoop.hive.ql.lib.NodeProcessor) DefaultRuleDispatcher(org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher) DefaultGraphWalker(org.apache.hadoop.hive.ql.lib.DefaultGraphWalker) Node(org.apache.hadoop.hive.ql.lib.Node) RuleRegExp(org.apache.hadoop.hive.ql.lib.RuleRegExp) ArrayList(java.util.ArrayList) Dispatcher(org.apache.hadoop.hive.ql.lib.Dispatcher) DefaultRuleDispatcher(org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher) CommonJoinTaskDispatcher(org.apache.hadoop.hive.ql.optimizer.physical.CommonJoinTaskDispatcher) LinkedHashMap(java.util.LinkedHashMap) Rule(org.apache.hadoop.hive.ql.lib.Rule) GraphWalker(org.apache.hadoop.hive.ql.lib.GraphWalker) DefaultGraphWalker(org.apache.hadoop.hive.ql.lib.DefaultGraphWalker)

Aggregations

DefaultRuleDispatcher (org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher)64 Node (org.apache.hadoop.hive.ql.lib.Node)64 GraphWalker (org.apache.hadoop.hive.ql.lib.GraphWalker)63 ArrayList (java.util.ArrayList)62 Dispatcher (org.apache.hadoop.hive.ql.lib.Dispatcher)62 NodeProcessor (org.apache.hadoop.hive.ql.lib.NodeProcessor)55 Rule (org.apache.hadoop.hive.ql.lib.Rule)55 LinkedHashMap (java.util.LinkedHashMap)54 DefaultGraphWalker (org.apache.hadoop.hive.ql.lib.DefaultGraphWalker)49 RuleRegExp (org.apache.hadoop.hive.ql.lib.RuleRegExp)48 HashMap (java.util.HashMap)16 SemanticException (org.apache.hadoop.hive.ql.parse.SemanticException)12 ExprNodeColumnDesc (org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc)11 ExprNodeDesc (org.apache.hadoop.hive.ql.plan.ExprNodeDesc)11 ExprNodeGenericFuncDesc (org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc)11 TypeRule (org.apache.hadoop.hive.ql.lib.TypeRule)10 ExprNodeConstantDesc (org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc)7 GenericUDFOPEqualOrGreaterThan (org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqualOrGreaterThan)7 Test (org.junit.Test)7 List (java.util.List)6