Search in sources :

Example 1 with NodeProcessorCtx

use of org.apache.hadoop.hive.ql.lib.NodeProcessorCtx in project hive by apache.

the class StatsOptimizer method transform.

@Override
public ParseContext transform(ParseContext pctx) throws SemanticException {
    if (pctx.getFetchTask() != null || !pctx.getQueryProperties().isQuery() || pctx.getQueryProperties().isAnalyzeRewrite() || pctx.getQueryProperties().isCTAS() || pctx.getLoadFileWork().size() > 1 || !pctx.getLoadTableWork().isEmpty() || // tables is being sampled and we can not optimize.
    !pctx.getNameToSplitSample().isEmpty()) {
        return pctx;
    }
    String TS = TableScanOperator.getOperatorName() + "%";
    String GBY = GroupByOperator.getOperatorName() + "%";
    String RS = ReduceSinkOperator.getOperatorName() + "%";
    String SEL = SelectOperator.getOperatorName() + "%";
    String FS = FileSinkOperator.getOperatorName() + "%";
    Map<SemanticRule, SemanticNodeProcessor> opRules = new LinkedHashMap<SemanticRule, SemanticNodeProcessor>();
    opRules.put(new RuleRegExp("R1", TS + SEL + GBY + RS + GBY + SEL + FS), new MetaDataProcessor(pctx));
    opRules.put(new RuleRegExp("R2", TS + SEL + GBY + RS + GBY + FS), new MetaDataProcessor(pctx));
    NodeProcessorCtx soProcCtx = new StatsOptimizerProcContext();
    SemanticDispatcher disp = new DefaultRuleDispatcher(null, opRules, soProcCtx);
    SemanticGraphWalker ogw = new DefaultGraphWalker(disp);
    ArrayList<Node> topNodes = new ArrayList<Node>();
    topNodes.addAll(pctx.getTopOps().values());
    ogw.startWalking(topNodes, null);
    return pctx;
}
Also used : SemanticRule(org.apache.hadoop.hive.ql.lib.SemanticRule) DefaultRuleDispatcher(org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher) DefaultGraphWalker(org.apache.hadoop.hive.ql.lib.DefaultGraphWalker) Node(org.apache.hadoop.hive.ql.lib.Node) RuleRegExp(org.apache.hadoop.hive.ql.lib.RuleRegExp) ArrayList(java.util.ArrayList) SemanticGraphWalker(org.apache.hadoop.hive.ql.lib.SemanticGraphWalker) LinkedHashMap(java.util.LinkedHashMap) NodeProcessorCtx(org.apache.hadoop.hive.ql.lib.NodeProcessorCtx) SemanticDispatcher(org.apache.hadoop.hive.ql.lib.SemanticDispatcher) SemanticNodeProcessor(org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor)

Example 2 with NodeProcessorCtx

use of org.apache.hadoop.hive.ql.lib.NodeProcessorCtx in project phoenix by apache.

the class IndexPredicateAnalyzer method analyzePredicate.

/**
 * Analyzes a predicate.
 *
 * @param predicate        predicate to be analyzed
 * @param searchConditions receives conditions produced by analysis
 * @return residual predicate which could not be translated to
 * searchConditions
 */
public ExprNodeDesc analyzePredicate(ExprNodeDesc predicate, final List<IndexSearchCondition> searchConditions) {
    Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();
    NodeProcessor nodeProcessor = new NodeProcessor() {

        @Override
        public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException {
            // a pure conjunction: reject OR, CASE, etc.
            for (Node ancestor : stack) {
                if (nd == ancestor) {
                    break;
                }
                if (!FunctionRegistry.isOpAnd((ExprNodeDesc) ancestor)) {
                    return nd;
                }
            }
            return analyzeExpr((ExprNodeGenericFuncDesc) nd, searchConditions, nodeOutputs);
        }
    };
    Dispatcher disp = new DefaultRuleDispatcher(nodeProcessor, opRules, null);
    GraphWalker ogw = new DefaultGraphWalker(disp);
    ArrayList<Node> topNodes = new ArrayList<Node>();
    topNodes.add(predicate);
    HashMap<Node, Object> nodeOutput = new HashMap<Node, Object>();
    try {
        ogw.startWalking(topNodes, nodeOutput);
    } catch (SemanticException ex) {
        throw new RuntimeException(ex);
    }
    ExprNodeDesc residualPredicate = (ExprNodeDesc) nodeOutput.get(predicate);
    return residualPredicate;
}
Also used : NodeProcessor(org.apache.hadoop.hive.ql.lib.NodeProcessor) DefaultRuleDispatcher(org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) Node(org.apache.hadoop.hive.ql.lib.Node) DefaultGraphWalker(org.apache.hadoop.hive.ql.lib.DefaultGraphWalker) ArrayList(java.util.ArrayList) Dispatcher(org.apache.hadoop.hive.ql.lib.Dispatcher) DefaultRuleDispatcher(org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher) LinkedHashMap(java.util.LinkedHashMap) Stack(java.util.Stack) NodeProcessorCtx(org.apache.hadoop.hive.ql.lib.NodeProcessorCtx) Rule(org.apache.hadoop.hive.ql.lib.Rule) ExprNodeDesc(org.apache.hadoop.hive.ql.plan.ExprNodeDesc) DefaultGraphWalker(org.apache.hadoop.hive.ql.lib.DefaultGraphWalker) GraphWalker(org.apache.hadoop.hive.ql.lib.GraphWalker) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException)

Example 3 with NodeProcessorCtx

use of org.apache.hadoop.hive.ql.lib.NodeProcessorCtx in project hive by apache.

the class LBExprProcFactory method genPruner.

/**
 * Generates the list bucketing pruner for the expression tree.
 *
 * @param tabAlias
 *          The table alias of the partition table that is being considered
 *          for pruning
 * @param pred
 *          The predicate from which the list bucketing pruner needs to be
 *          generated
 * @param part
 *          The partition this walker is walking
 * @throws SemanticException
 */
public static ExprNodeDesc genPruner(String tabAlias, ExprNodeDesc pred, Partition part) throws SemanticException {
    // Create the walker, the rules dispatcher and the context.
    NodeProcessorCtx lbprCtx = new LBExprProcCtx(tabAlias, part);
    Map<Node, Object> outputMap = PrunerUtils.walkExprTree(pred, lbprCtx, getColumnProcessor(), getFieldProcessor(), getGenericFuncProcessor(), getDefaultExprProcessor());
    // Get the exprNodeDesc corresponding to the first start node;
    return (ExprNodeDesc) outputMap.get(pred);
}
Also used : NodeProcessorCtx(org.apache.hadoop.hive.ql.lib.NodeProcessorCtx) Node(org.apache.hadoop.hive.ql.lib.Node) ExprNodeDesc(org.apache.hadoop.hive.ql.plan.ExprNodeDesc)

Example 4 with NodeProcessorCtx

use of org.apache.hadoop.hive.ql.lib.NodeProcessorCtx in project hive by apache.

the class ListBucketingPruner method transform.

/*
   * (non-Javadoc)
   *
   * @see org.apache.hadoop.hive.ql.optimizer.Transform#transform(org.apache.hadoop.hive.ql.parse.
   * ParseContext)
   */
@Override
public ParseContext transform(ParseContext pctx) throws SemanticException {
    // create a the context for walking operators
    NodeProcessorCtx opPartWalkerCtx = new LBOpPartitionWalkerCtx(pctx);
    // Retrieve all partitions generated from partition pruner and partition column pruner
    PrunerUtils.walkOperatorTree(pctx, opPartWalkerCtx, LBPartitionProcFactory.getFilterProc(), LBPartitionProcFactory.getDefaultProc());
    PrunedPartitionList partsList = ((LBOpPartitionWalkerCtx) opPartWalkerCtx).getPartitions();
    if (partsList != null) {
        Set<Partition> parts = partsList.getPartitions();
        if ((parts != null) && (parts.size() > 0)) {
            for (Partition part : parts) {
                // only process partition which is skewed and list bucketed
                if (ListBucketingPrunerUtils.isListBucketingPart(part)) {
                    // create a the context for walking operators
                    NodeProcessorCtx opWalkerCtx = new LBOpWalkerCtx(pctx.getOpToPartToSkewedPruner(), part);
                    // walk operator tree to create expression tree for list bucketing
                    PrunerUtils.walkOperatorTree(pctx, opWalkerCtx, LBProcFactory.getFilterProc(), LBProcFactory.getDefaultProc());
                }
            }
        }
    }
    return pctx;
}
Also used : Partition(org.apache.hadoop.hive.ql.metadata.Partition) NodeProcessorCtx(org.apache.hadoop.hive.ql.lib.NodeProcessorCtx) PrunedPartitionList(org.apache.hadoop.hive.ql.parse.PrunedPartitionList)

Example 5 with NodeProcessorCtx

use of org.apache.hadoop.hive.ql.lib.NodeProcessorCtx in project hive by apache.

the class SparkCompiler method generateTaskTreeHelper.

private void generateTaskTreeHelper(GenSparkProcContext procCtx, List<Node> topNodes) throws SemanticException {
    // create a walker which walks the tree in a DFS manner while maintaining
    // the operator stack. The dispatcher generates the plan from the operator tree
    Map<SemanticRule, SemanticNodeProcessor> opRules = new LinkedHashMap<SemanticRule, SemanticNodeProcessor>();
    GenSparkWork genSparkWork = new GenSparkWork(GenSparkUtils.getUtils());
    opRules.put(new RuleRegExp("Split Work - ReduceSink", ReduceSinkOperator.getOperatorName() + "%"), genSparkWork);
    opRules.put(new RuleRegExp("Split Work - SparkPartitionPruningSink", SparkPartitionPruningSinkOperator.getOperatorName() + "%"), genSparkWork);
    opRules.put(new TypeRule(MapJoinOperator.class), new SparkReduceSinkMapJoinProc());
    opRules.put(new RuleRegExp("Split Work + Move/Merge - FileSink", FileSinkOperator.getOperatorName() + "%"), new CompositeProcessor(new SparkFileSinkProcessor(), genSparkWork));
    opRules.put(new RuleRegExp("Handle Analyze Command", TableScanOperator.getOperatorName() + "%"), new SparkProcessAnalyzeTable(GenSparkUtils.getUtils()));
    opRules.put(new RuleRegExp("Remember union", UnionOperator.getOperatorName() + "%"), new SemanticNodeProcessor() {

        @Override
        public Object process(Node n, Stack<Node> s, NodeProcessorCtx procCtx, Object... os) throws SemanticException {
            GenSparkProcContext context = (GenSparkProcContext) procCtx;
            UnionOperator union = (UnionOperator) n;
            // simply need to remember that we've seen a union.
            context.currentUnionOperators.add(union);
            return null;
        }
    });
    /**
     *  SMB join case:   (Big)   (Small)  (Small)
     *                     TS       TS       TS
     *                      \       |       /
     *                       \      DS     DS
     *                         \   |    /
     *                         SMBJoinOP
     *
     * Some of the other processors are expecting only one traversal beyond SMBJoinOp.
     * We need to traverse from the big-table path only, and stop traversing on the
     * small-table path once we reach SMBJoinOp.
     * Also add some SMB join information to the context, so we can properly annotate
     * the MapWork later on.
     */
    opRules.put(new TypeRule(SMBMapJoinOperator.class), new SemanticNodeProcessor() {

        @Override
        public Object process(Node currNode, Stack<Node> stack, NodeProcessorCtx procCtx, Object... os) throws SemanticException {
            GenSparkProcContext context = (GenSparkProcContext) procCtx;
            SMBMapJoinOperator currSmbNode = (SMBMapJoinOperator) currNode;
            SparkSMBMapJoinInfo smbMapJoinCtx = context.smbMapJoinCtxMap.get(currSmbNode);
            if (smbMapJoinCtx == null) {
                smbMapJoinCtx = new SparkSMBMapJoinInfo();
                context.smbMapJoinCtxMap.put(currSmbNode, smbMapJoinCtx);
            }
            for (Node stackNode : stack) {
                if (stackNode instanceof DummyStoreOperator) {
                    // If coming from small-table side, do some book-keeping, and skip traversal.
                    smbMapJoinCtx.smallTableRootOps.add(context.currentRootOperator);
                    return true;
                }
            }
            // If coming from big-table side, do some book-keeping, and continue traversal
            smbMapJoinCtx.bigTableRootOp = context.currentRootOperator;
            return false;
        }
    });
    // The dispatcher fires the processor corresponding to the closest matching
    // rule and passes the context along
    SemanticDispatcher disp = new DefaultRuleDispatcher(null, opRules, procCtx);
    SemanticGraphWalker ogw = new GenSparkWorkWalker(disp, procCtx);
    ogw.startWalking(topNodes, null);
}
Also used : Node(org.apache.hadoop.hive.ql.lib.Node) SMBMapJoinOperator(org.apache.hadoop.hive.ql.exec.SMBMapJoinOperator) LinkedHashMap(java.util.LinkedHashMap) NodeProcessorCtx(org.apache.hadoop.hive.ql.lib.NodeProcessorCtx) UnionOperator(org.apache.hadoop.hive.ql.exec.UnionOperator) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) MapJoinOperator(org.apache.hadoop.hive.ql.exec.MapJoinOperator) SMBMapJoinOperator(org.apache.hadoop.hive.ql.exec.SMBMapJoinOperator) SemanticRule(org.apache.hadoop.hive.ql.lib.SemanticRule) DefaultRuleDispatcher(org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher) DummyStoreOperator(org.apache.hadoop.hive.ql.exec.DummyStoreOperator) RuleRegExp(org.apache.hadoop.hive.ql.lib.RuleRegExp) SemanticGraphWalker(org.apache.hadoop.hive.ql.lib.SemanticGraphWalker) CompositeProcessor(org.apache.hadoop.hive.ql.lib.CompositeProcessor) SemanticDispatcher(org.apache.hadoop.hive.ql.lib.SemanticDispatcher) SemanticNodeProcessor(org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor) SparkReduceSinkMapJoinProc(org.apache.hadoop.hive.ql.optimizer.spark.SparkReduceSinkMapJoinProc) TypeRule(org.apache.hadoop.hive.ql.lib.TypeRule)

Aggregations

NodeProcessorCtx (org.apache.hadoop.hive.ql.lib.NodeProcessorCtx)8 Node (org.apache.hadoop.hive.ql.lib.Node)7 SemanticNodeProcessor (org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor)5 ArrayList (java.util.ArrayList)4 LinkedHashMap (java.util.LinkedHashMap)4 Stack (java.util.Stack)4 DefaultRuleDispatcher (org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher)4 SemanticException (org.apache.hadoop.hive.ql.parse.SemanticException)4 ExprNodeDesc (org.apache.hadoop.hive.ql.plan.ExprNodeDesc)4 HashMap (java.util.HashMap)3 DefaultGraphWalker (org.apache.hadoop.hive.ql.lib.DefaultGraphWalker)3 SemanticDispatcher (org.apache.hadoop.hive.ql.lib.SemanticDispatcher)3 SemanticGraphWalker (org.apache.hadoop.hive.ql.lib.SemanticGraphWalker)3 SemanticRule (org.apache.hadoop.hive.ql.lib.SemanticRule)3 MapJoinOperator (org.apache.hadoop.hive.ql.exec.MapJoinOperator)2 Operator (org.apache.hadoop.hive.ql.exec.Operator)2 ReduceSinkOperator (org.apache.hadoop.hive.ql.exec.ReduceSinkOperator)2 Partition (org.apache.hadoop.hive.ql.metadata.Partition)2 PrunedPartitionList (org.apache.hadoop.hive.ql.parse.PrunedPartitionList)2 ExprNodeGenericFuncDesc (org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc)2