Search in sources :

Example 16 with HiveParserASTNode

use of org.apache.flink.table.planner.delegation.hive.copy.HiveParserASTNode in project flink by apache.

the class HiveParserDDLSemanticAnalyzer method convertAlterTableFileFormat.

private Operation convertAlterTableFileFormat(CatalogBaseTable alteredTable, HiveParserASTNode ast, String tableName, HashMap<String, String> partSpec) throws SemanticException {
    HiveParserStorageFormat format = new HiveParserStorageFormat(conf);
    HiveParserASTNode child = (HiveParserASTNode) ast.getChild(0);
    if (!format.fillStorageFormat(child)) {
        throw new ValidationException("Unknown AST node for ALTER TABLE FILEFORMAT: " + child);
    }
    Map<String, String> newProps = new HashMap<>();
    newProps.put(ALTER_TABLE_OP, CHANGE_FILE_FORMAT.name());
    newProps.put(STORED_AS_FILE_FORMAT, format.getGenericName());
    return convertAlterTableProps(alteredTable, tableName, partSpec, newProps);
}
Also used : HiveParserASTNode(org.apache.flink.table.planner.delegation.hive.copy.HiveParserASTNode) ValidationException(org.apache.flink.table.api.ValidationException) HiveParserStorageFormat(org.apache.flink.table.planner.delegation.hive.copy.HiveParserStorageFormat) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap)

Example 17 with HiveParserASTNode

use of org.apache.flink.table.planner.delegation.hive.copy.HiveParserASTNode in project flink by apache.

the class HiveParserUtils method convertGrouping.

private static void convertGrouping(HiveParserASTNode function, List<HiveParserASTNode> grpByAstExprs, boolean noneSet, boolean legacyGrouping, MutableBoolean found) {
    HiveParserASTNode col = (HiveParserASTNode) function.getChild(1);
    for (int i = 0; i < grpByAstExprs.size(); i++) {
        HiveParserASTNode grpByExpr = grpByAstExprs.get(i);
        if (grpByExpr.toStringTree().equals(col.toStringTree())) {
            HiveParserASTNode child1;
            if (noneSet) {
                // Query does not contain CUBE, ROLLUP, or GROUPING
                // SETS, and thus, grouping should return 0
                child1 = (HiveParserASTNode) HiveASTParseDriver.ADAPTOR.create(HiveASTParser.IntegralLiteral, String.valueOf(0));
            } else {
                // We refer to grouping_id column
                child1 = (HiveParserASTNode) HiveASTParseDriver.ADAPTOR.create(HiveASTParser.TOK_TABLE_OR_COL, "TOK_TABLE_OR_COL");
                HiveASTParseDriver.ADAPTOR.addChild(child1, HiveASTParseDriver.ADAPTOR.create(HiveASTParser.Identifier, VirtualColumn.GROUPINGID.getName()));
                if (legacyGrouping) {
                    child1 = convertToLegacyGroupingId(child1, grpByAstExprs.size());
                }
            }
            HiveParserASTNode child2 = (HiveParserASTNode) HiveASTParseDriver.ADAPTOR.create(HiveASTParser.IntegralLiteral, String.valueOf(nonNegativeMod(legacyGrouping ? i : -i - 1, grpByAstExprs.size())));
            function.setChild(1, child1);
            function.addChild(child2);
            found.setValue(true);
            break;
        }
    }
}
Also used : HiveParserASTNode(org.apache.flink.table.planner.delegation.hive.copy.HiveParserASTNode)

Example 18 with HiveParserASTNode

use of org.apache.flink.table.planner.delegation.hive.copy.HiveParserASTNode in project flink by apache.

the class HiveParserUtils method convertToLegacyGroupingId.

private static HiveParserASTNode convertToLegacyGroupingId(HiveParserASTNode groupingId, int numGBExprs) {
    HiveParserASTNode converterFunc = (HiveParserASTNode) HiveASTParseDriver.ADAPTOR.create(HiveASTParser.TOK_FUNCTION, "TOK_FUNCTION");
    // function name
    converterFunc.addChild((Tree) HiveASTParseDriver.ADAPTOR.create(HiveASTParser.StringLiteral, GenericUDFLegacyGroupingID.NAME));
    // origin grouping__id
    converterFunc.addChild(groupingId);
    // num of group by expressions
    converterFunc.addChild((Tree) HiveASTParseDriver.ADAPTOR.create(HiveASTParser.IntegralLiteral, String.valueOf(numGBExprs)));
    return converterFunc;
}
Also used : HiveParserASTNode(org.apache.flink.table.planner.delegation.hive.copy.HiveParserASTNode)

Example 19 with HiveParserASTNode

use of org.apache.flink.table.planner.delegation.hive.copy.HiveParserASTNode in project flink by apache.

the class HiveParserTypeCheckProcFactory method genExprNode.

public static Map<HiveParserASTNode, ExprNodeDesc> genExprNode(HiveParserASTNode expr, HiveParserTypeCheckCtx tcCtx, HiveParserTypeCheckProcFactory tf) throws SemanticException {
    // Create the walker, the rules dispatcher and the context.
    // create a walker which walks the tree in a DFS manner while maintaining
    // the operator stack. The dispatcher
    // generates the plan from the operator tree
    Map<Rule, NodeProcessor> opRules = new LinkedHashMap<>();
    opRules.put(new RuleRegExp("R1", HiveASTParser.TOK_NULL + "%"), tf.getNullExprProcessor());
    opRules.put(new RuleRegExp("R2", HiveASTParser.Number + "%|" + HiveASTParser.IntegralLiteral + "%|" + HiveASTParser.NumberLiteral + "%"), tf.getNumExprProcessor());
    opRules.put(new RuleRegExp("R3", HiveASTParser.Identifier + "%|" + HiveASTParser.StringLiteral + "%|" + HiveASTParser.TOK_CHARSETLITERAL + "%|" + HiveASTParser.TOK_STRINGLITERALSEQUENCE + "%|" + "%|" + HiveASTParser.KW_IF + "%|" + HiveASTParser.KW_CASE + "%|" + HiveASTParser.KW_WHEN + "%|" + HiveASTParser.KW_IN + "%|" + HiveASTParser.KW_ARRAY + "%|" + HiveASTParser.KW_MAP + "%|" + HiveASTParser.KW_STRUCT + "%|" + HiveASTParser.KW_EXISTS + "%|" + HiveASTParser.TOK_SUBQUERY_OP_NOTIN + "%"), tf.getStrExprProcessor());
    opRules.put(new RuleRegExp("R4", HiveASTParser.KW_TRUE + "%|" + HiveASTParser.KW_FALSE + "%"), tf.getBoolExprProcessor());
    opRules.put(new RuleRegExp("R5", HiveASTParser.TOK_DATELITERAL + "%|" + HiveASTParser.TOK_TIMESTAMPLITERAL + "%"), tf.getDateTimeExprProcessor());
    opRules.put(new RuleRegExp("R6", HiveASTParser.TOK_INTERVAL_YEAR_MONTH_LITERAL + "%|" + HiveASTParser.TOK_INTERVAL_DAY_TIME_LITERAL + "%|" + HiveASTParser.TOK_INTERVAL_YEAR_LITERAL + "%|" + HiveASTParser.TOK_INTERVAL_MONTH_LITERAL + "%|" + HiveASTParser.TOK_INTERVAL_DAY_LITERAL + "%|" + HiveASTParser.TOK_INTERVAL_HOUR_LITERAL + "%|" + HiveASTParser.TOK_INTERVAL_MINUTE_LITERAL + "%|" + HiveASTParser.TOK_INTERVAL_SECOND_LITERAL + "%"), tf.getIntervalExprProcessor());
    opRules.put(new RuleRegExp("R7", HiveASTParser.TOK_TABLE_OR_COL + "%"), tf.getColumnExprProcessor());
    opRules.put(new RuleRegExp("R8", HiveASTParser.TOK_SUBQUERY_EXPR + "%"), tf.getSubQueryExprProcessor());
    // The dispatcher fires the processor corresponding to the closest matching
    // rule and passes the context along
    Dispatcher disp = new DefaultRuleDispatcher(tf.getDefaultExprProcessor(), opRules, tcCtx);
    GraphWalker ogw = new HiveParserExpressionWalker(disp);
    // Create a list of top nodes
    ArrayList<Node> topNodes = new ArrayList<>(Collections.singleton(expr));
    HashMap<Node, Object> nodeOutputs = new LinkedHashMap<>();
    ogw.startWalking(topNodes, nodeOutputs);
    return convert(nodeOutputs);
}
Also used : NodeProcessor(org.apache.hadoop.hive.ql.lib.NodeProcessor) DefaultRuleDispatcher(org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher) HiveParserASTNode(org.apache.flink.table.planner.delegation.hive.copy.HiveParserASTNode) RexNode(org.apache.calcite.rex.RexNode) RelNode(org.apache.calcite.rel.RelNode) Node(org.apache.hadoop.hive.ql.lib.Node) RuleRegExp(org.apache.hadoop.hive.ql.lib.RuleRegExp) ArrayList(java.util.ArrayList) Dispatcher(org.apache.hadoop.hive.ql.lib.Dispatcher) DefaultRuleDispatcher(org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher) LinkedHashMap(java.util.LinkedHashMap) Rule(org.apache.hadoop.hive.ql.lib.Rule) GraphWalker(org.apache.hadoop.hive.ql.lib.GraphWalker) HiveParserExpressionWalker(org.apache.flink.table.planner.delegation.hive.copy.HiveParserExpressionWalker)

Example 20 with HiveParserASTNode

use of org.apache.flink.table.planner.delegation.hive.copy.HiveParserASTNode in project flink by apache.

the class HiveParserTypeCheckProcFactory method processGByExpr.

/**
 * Function to do groupby subexpression elimination. This is called by all the processors
 * initially. As an example, consider the query select a+b, count(1) from T group by a+b; Then
 * a+b is already precomputed in the group by operators key, so we substitute a+b in the select
 * list with the internal column name of the a+b expression that appears in the in input row
 * resolver.
 *
 * @param nd The node that is being inspected.
 * @param procCtx The processor context.
 * @return exprNodeColumnDesc.
 */
public static ExprNodeDesc processGByExpr(Node nd, Object procCtx) throws SemanticException {
    // We recursively create the exprNodeDesc. Base cases: when we encounter
    // a column ref, we convert that into an exprNodeColumnDesc; when we
    // encounter
    // a constant, we convert that into an exprNodeConstantDesc. For others we
    // just
    // build the exprNodeFuncDesc with recursively built children.
    HiveParserASTNode expr = (HiveParserASTNode) nd;
    HiveParserTypeCheckCtx ctx = (HiveParserTypeCheckCtx) procCtx;
    // having key in (select .. where a = min(b.value)
    if (!ctx.isUseCaching() && ctx.getOuterRR() == null) {
        return null;
    }
    HiveParserRowResolver input = ctx.getInputRR();
    ExprNodeDesc desc = null;
    if (input == null || !ctx.getAllowGBExprElimination()) {
        return null;
    }
    // If the current subExpression is pre-calculated, as in Group-By etc.
    ColumnInfo colInfo = input.getExpression(expr);
    // try outer row resolver
    HiveParserRowResolver outerRR = ctx.getOuterRR();
    if (colInfo == null && outerRR != null) {
        colInfo = outerRR.getExpression(expr);
    }
    if (colInfo != null) {
        desc = new ExprNodeColumnDesc(colInfo);
        HiveParserASTNode source = input.getExpressionSource(expr);
        if (source != null) {
            ctx.getUnparseTranslator().addCopyTranslation(expr, source);
        }
        return desc;
    }
    return desc;
}
Also used : HiveParserASTNode(org.apache.flink.table.planner.delegation.hive.copy.HiveParserASTNode) HiveParserRowResolver(org.apache.flink.table.planner.delegation.hive.copy.HiveParserRowResolver) ExprNodeColumnDesc(org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc) ColumnInfo(org.apache.hadoop.hive.ql.exec.ColumnInfo) ExprNodeDesc(org.apache.hadoop.hive.ql.plan.ExprNodeDesc) HiveParserTypeCheckCtx(org.apache.flink.table.planner.delegation.hive.copy.HiveParserTypeCheckCtx)

Aggregations

HiveParserASTNode (org.apache.flink.table.planner.delegation.hive.copy.HiveParserASTNode)38 LinkedHashMap (java.util.LinkedHashMap)18 HashMap (java.util.HashMap)15 ArrayList (java.util.ArrayList)14 RelNode (org.apache.calcite.rel.RelNode)14 SemanticException (org.apache.hadoop.hive.ql.parse.SemanticException)14 ValidationException (org.apache.flink.table.api.ValidationException)10 UniqueConstraint (org.apache.flink.table.api.constraints.UniqueConstraint)10 NotNullConstraint (org.apache.flink.table.planner.delegation.hive.copy.HiveParserBaseSemanticAnalyzer.NotNullConstraint)10 HiveParserRowResolver (org.apache.flink.table.planner.delegation.hive.copy.HiveParserRowResolver)10 RexNode (org.apache.calcite.rex.RexNode)9 ColumnInfo (org.apache.hadoop.hive.ql.exec.ColumnInfo)9 ExprNodeDesc (org.apache.hadoop.hive.ql.plan.ExprNodeDesc)9 Map (java.util.Map)8 HiveParserTypeCheckCtx (org.apache.flink.table.planner.delegation.hive.copy.HiveParserTypeCheckCtx)6 Table (org.apache.hadoop.hive.ql.metadata.Table)6 CatalogBaseTable (org.apache.flink.table.catalog.CatalogBaseTable)5 ObjectIdentifier (org.apache.flink.table.catalog.ObjectIdentifier)5 HiveParserQBParseInfo (org.apache.flink.table.planner.delegation.hive.copy.HiveParserQBParseInfo)5 RelDataType (org.apache.calcite.rel.type.RelDataType)4