Search in sources :

Example 21 with Node

use of org.apache.hadoop.hive.ql.lib.Node in project hive by apache.

the class GenSparkWorkWalker method walk.

/**
   * Walk the given operator.
   *
   * @param nd operator being walked
   */
@Override
protected void walk(Node nd) throws SemanticException {
    List<? extends Node> children = nd.getChildren();
    // maintain the stack of operators encountered
    opStack.push(nd);
    Boolean skip = dispatchAndReturn(nd, opStack);
    // save some positional state
    Operator<? extends OperatorDesc> currentRoot = ctx.currentRootOperator;
    Operator<? extends OperatorDesc> parentOfRoot = ctx.parentOfRoot;
    BaseWork preceedingWork = ctx.preceedingWork;
    if (skip == null || !skip) {
        // move all the children to the front of queue
        for (Node ch : children) {
            // and restore the state before walking each child
            ctx.currentRootOperator = currentRoot;
            ctx.parentOfRoot = parentOfRoot;
            ctx.preceedingWork = preceedingWork;
            walk(ch);
        }
    }
    // done with this operator
    opStack.pop();
}
Also used : Node(org.apache.hadoop.hive.ql.lib.Node) BaseWork(org.apache.hadoop.hive.ql.plan.BaseWork)

Example 22 with Node

use of org.apache.hadoop.hive.ql.lib.Node in project hive by apache.

the class SparkCompiler method runSetReducerParallelism.

private void runSetReducerParallelism(OptimizeSparkProcContext procCtx) throws SemanticException {
    ParseContext pCtx = procCtx.getParseContext();
    Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();
    opRules.put(new RuleRegExp("Set parallelism - ReduceSink", ReduceSinkOperator.getOperatorName() + "%"), new SetSparkReducerParallelism(pCtx.getConf()));
    // The dispatcher fires the processor corresponding to the closest matching
    // rule and passes the context along
    Dispatcher disp = new DefaultRuleDispatcher(null, opRules, procCtx);
    GraphWalker ogw = new PreOrderWalker(disp);
    // Create a list of topop nodes
    ArrayList<Node> topNodes = new ArrayList<Node>();
    topNodes.addAll(pCtx.getTopOps().values());
    ogw.startWalking(topNodes, null);
}
Also used : NodeProcessor(org.apache.hadoop.hive.ql.lib.NodeProcessor) DefaultRuleDispatcher(org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher) Node(org.apache.hadoop.hive.ql.lib.Node) RuleRegExp(org.apache.hadoop.hive.ql.lib.RuleRegExp) ArrayList(java.util.ArrayList) Dispatcher(org.apache.hadoop.hive.ql.lib.Dispatcher) DefaultRuleDispatcher(org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher) SetSparkReducerParallelism(org.apache.hadoop.hive.ql.optimizer.spark.SetSparkReducerParallelism) LinkedHashMap(java.util.LinkedHashMap) ParseContext(org.apache.hadoop.hive.ql.parse.ParseContext) Rule(org.apache.hadoop.hive.ql.lib.Rule) TypeRule(org.apache.hadoop.hive.ql.lib.TypeRule) PreOrderWalker(org.apache.hadoop.hive.ql.lib.PreOrderWalker) GraphWalker(org.apache.hadoop.hive.ql.lib.GraphWalker) DefaultGraphWalker(org.apache.hadoop.hive.ql.lib.DefaultGraphWalker)

Example 23 with Node

use of org.apache.hadoop.hive.ql.lib.Node in project hive by apache.

the class SparkCompiler method runDynamicPartitionPruning.

private void runDynamicPartitionPruning(OptimizeSparkProcContext procCtx) throws SemanticException {
    if (!conf.getBoolVar(HiveConf.ConfVars.SPARK_DYNAMIC_PARTITION_PRUNING)) {
        return;
    }
    ParseContext parseContext = procCtx.getParseContext();
    Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();
    opRules.put(new RuleRegExp(new String("Dynamic Partition Pruning"), FilterOperator.getOperatorName() + "%"), new DynamicPartitionPruningOptimization());
    // The dispatcher fires the processor corresponding to the closest matching
    // rule and passes the context along
    Dispatcher disp = new DefaultRuleDispatcher(null, opRules, procCtx);
    GraphWalker ogw = new ForwardWalker(disp);
    List<Node> topNodes = new ArrayList<Node>();
    topNodes.addAll(parseContext.getTopOps().values());
    ogw.startWalking(topNodes, null);
    // of "and true and true" conditions.
    if (procCtx.getConf().getBoolVar(HiveConf.ConfVars.HIVEOPTCONSTANTPROPAGATION)) {
        new ConstantPropagate().transform(parseContext);
    }
}
Also used : NodeProcessor(org.apache.hadoop.hive.ql.lib.NodeProcessor) ForwardWalker(org.apache.hadoop.hive.ql.lib.ForwardWalker) DefaultRuleDispatcher(org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher) Node(org.apache.hadoop.hive.ql.lib.Node) RuleRegExp(org.apache.hadoop.hive.ql.lib.RuleRegExp) ArrayList(java.util.ArrayList) Dispatcher(org.apache.hadoop.hive.ql.lib.Dispatcher) DefaultRuleDispatcher(org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher) LinkedHashMap(java.util.LinkedHashMap) DynamicPartitionPruningOptimization(org.apache.hadoop.hive.ql.optimizer.DynamicPartitionPruningOptimization) ParseContext(org.apache.hadoop.hive.ql.parse.ParseContext) Rule(org.apache.hadoop.hive.ql.lib.Rule) TypeRule(org.apache.hadoop.hive.ql.lib.TypeRule) GraphWalker(org.apache.hadoop.hive.ql.lib.GraphWalker) DefaultGraphWalker(org.apache.hadoop.hive.ql.lib.DefaultGraphWalker) ConstantPropagate(org.apache.hadoop.hive.ql.optimizer.ConstantPropagate)

Example 24 with Node

use of org.apache.hadoop.hive.ql.lib.Node in project hive by apache.

the class TezWalker method walk.

/**
   * Walk the given operator.
   *
   * @param nd
   *          operator being walked
   */
@Override
protected void walk(Node nd) throws SemanticException {
    List<? extends Node> children = nd.getChildren();
    // maintain the stack of operators encountered
    opStack.push(nd);
    Boolean skip = dispatchAndReturn(nd, opStack);
    if (skip == null || !skip) {
        // move all the children to the front of queue
        for (Node ch : children) {
            walk(ch);
        }
    }
    // done with this operator
    opStack.pop();
}
Also used : Node(org.apache.hadoop.hive.ql.lib.Node)

Example 25 with Node

use of org.apache.hadoop.hive.ql.lib.Node in project hive by apache.

the class TypeCheckProcFactory method genExprNode.

protected static Map<ASTNode, ExprNodeDesc> genExprNode(ASTNode expr, TypeCheckCtx tcCtx, TypeCheckProcFactory tf) throws SemanticException {
    // Create the walker, the rules dispatcher and the context.
    // create a walker which walks the tree in a DFS manner while maintaining
    // the operator stack. The dispatcher
    // generates the plan from the operator tree
    Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();
    opRules.put(new RuleRegExp("R1", HiveParser.TOK_NULL + "%"), tf.getNullExprProcessor());
    opRules.put(new RuleRegExp("R2", HiveParser.Number + "%|" + HiveParser.IntegralLiteral + "%|" + HiveParser.NumberLiteral + "%"), tf.getNumExprProcessor());
    opRules.put(new RuleRegExp("R3", HiveParser.Identifier + "%|" + HiveParser.StringLiteral + "%|" + HiveParser.TOK_CHARSETLITERAL + "%|" + HiveParser.TOK_STRINGLITERALSEQUENCE + "%|" + "%|" + HiveParser.KW_IF + "%|" + HiveParser.KW_CASE + "%|" + HiveParser.KW_WHEN + "%|" + HiveParser.KW_IN + "%|" + HiveParser.KW_ARRAY + "%|" + HiveParser.KW_MAP + "%|" + HiveParser.KW_STRUCT + "%|" + HiveParser.KW_EXISTS + "%|" + HiveParser.TOK_SUBQUERY_OP_NOTIN + "%"), tf.getStrExprProcessor());
    opRules.put(new RuleRegExp("R4", HiveParser.KW_TRUE + "%|" + HiveParser.KW_FALSE + "%"), tf.getBoolExprProcessor());
    opRules.put(new RuleRegExp("R5", HiveParser.TOK_DATELITERAL + "%|" + HiveParser.TOK_TIMESTAMPLITERAL + "%"), tf.getDateTimeExprProcessor());
    opRules.put(new RuleRegExp("R6", HiveParser.TOK_INTERVAL_YEAR_MONTH_LITERAL + "%|" + HiveParser.TOK_INTERVAL_DAY_TIME_LITERAL + "%|" + HiveParser.TOK_INTERVAL_YEAR_LITERAL + "%|" + HiveParser.TOK_INTERVAL_MONTH_LITERAL + "%|" + HiveParser.TOK_INTERVAL_DAY_LITERAL + "%|" + HiveParser.TOK_INTERVAL_HOUR_LITERAL + "%|" + HiveParser.TOK_INTERVAL_MINUTE_LITERAL + "%|" + HiveParser.TOK_INTERVAL_SECOND_LITERAL + "%"), tf.getIntervalExprProcessor());
    opRules.put(new RuleRegExp("R7", HiveParser.TOK_TABLE_OR_COL + "%"), tf.getColumnExprProcessor());
    opRules.put(new RuleRegExp("R8", HiveParser.TOK_SUBQUERY_EXPR + "%"), tf.getSubQueryExprProcessor());
    // The dispatcher fires the processor corresponding to the closest matching
    // rule and passes the context along
    Dispatcher disp = new DefaultRuleDispatcher(tf.getDefaultExprProcessor(), opRules, tcCtx);
    GraphWalker ogw = new ExpressionWalker(disp);
    // Create a list of top nodes
    ArrayList<Node> topNodes = Lists.<Node>newArrayList(expr);
    HashMap<Node, Object> nodeOutputs = new LinkedHashMap<Node, Object>();
    ogw.startWalking(topNodes, nodeOutputs);
    return convert(nodeOutputs);
}
Also used : NodeProcessor(org.apache.hadoop.hive.ql.lib.NodeProcessor) DefaultRuleDispatcher(org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher) RelNode(org.apache.calcite.rel.RelNode) Node(org.apache.hadoop.hive.ql.lib.Node) RuleRegExp(org.apache.hadoop.hive.ql.lib.RuleRegExp) Dispatcher(org.apache.hadoop.hive.ql.lib.Dispatcher) DefaultRuleDispatcher(org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher) LinkedHashMap(java.util.LinkedHashMap) ExpressionWalker(org.apache.hadoop.hive.ql.lib.ExpressionWalker) Rule(org.apache.hadoop.hive.ql.lib.Rule) GraphWalker(org.apache.hadoop.hive.ql.lib.GraphWalker)

Aggregations

Node (org.apache.hadoop.hive.ql.lib.Node)103 ArrayList (java.util.ArrayList)87 Dispatcher (org.apache.hadoop.hive.ql.lib.Dispatcher)78 DefaultRuleDispatcher (org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher)71 GraphWalker (org.apache.hadoop.hive.ql.lib.GraphWalker)70 LinkedHashMap (java.util.LinkedHashMap)60 NodeProcessor (org.apache.hadoop.hive.ql.lib.NodeProcessor)59 Rule (org.apache.hadoop.hive.ql.lib.Rule)58 DefaultGraphWalker (org.apache.hadoop.hive.ql.lib.DefaultGraphWalker)56 RuleRegExp (org.apache.hadoop.hive.ql.lib.RuleRegExp)50 HashMap (java.util.HashMap)18 ExprNodeDesc (org.apache.hadoop.hive.ql.plan.ExprNodeDesc)14 SemanticException (org.apache.hadoop.hive.ql.parse.SemanticException)13 ExprNodeColumnDesc (org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc)11 ExprNodeGenericFuncDesc (org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc)11 TaskGraphWalker (org.apache.hadoop.hive.ql.lib.TaskGraphWalker)10 TypeRule (org.apache.hadoop.hive.ql.lib.TypeRule)10 List (java.util.List)8 RelNode (org.apache.calcite.rel.RelNode)8 ExprNodeConstantDesc (org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc)7