use of org.apache.flink.table.planner.delegation.hive.copy.HiveParserASTNode in project flink by apache.
the class HiveParserDDLSemanticAnalyzer method convertAlterTableFileFormat.
private Operation convertAlterTableFileFormat(CatalogBaseTable alteredTable, HiveParserASTNode ast, String tableName, HashMap<String, String> partSpec) throws SemanticException {
HiveParserStorageFormat format = new HiveParserStorageFormat(conf);
HiveParserASTNode child = (HiveParserASTNode) ast.getChild(0);
if (!format.fillStorageFormat(child)) {
throw new ValidationException("Unknown AST node for ALTER TABLE FILEFORMAT: " + child);
}
Map<String, String> newProps = new HashMap<>();
newProps.put(ALTER_TABLE_OP, CHANGE_FILE_FORMAT.name());
newProps.put(STORED_AS_FILE_FORMAT, format.getGenericName());
return convertAlterTableProps(alteredTable, tableName, partSpec, newProps);
}
use of org.apache.flink.table.planner.delegation.hive.copy.HiveParserASTNode in project flink by apache.
the class HiveParserUtils method convertGrouping.
private static void convertGrouping(HiveParserASTNode function, List<HiveParserASTNode> grpByAstExprs, boolean noneSet, boolean legacyGrouping, MutableBoolean found) {
HiveParserASTNode col = (HiveParserASTNode) function.getChild(1);
for (int i = 0; i < grpByAstExprs.size(); i++) {
HiveParserASTNode grpByExpr = grpByAstExprs.get(i);
if (grpByExpr.toStringTree().equals(col.toStringTree())) {
HiveParserASTNode child1;
if (noneSet) {
// Query does not contain CUBE, ROLLUP, or GROUPING
// SETS, and thus, grouping should return 0
child1 = (HiveParserASTNode) HiveASTParseDriver.ADAPTOR.create(HiveASTParser.IntegralLiteral, String.valueOf(0));
} else {
// We refer to grouping_id column
child1 = (HiveParserASTNode) HiveASTParseDriver.ADAPTOR.create(HiveASTParser.TOK_TABLE_OR_COL, "TOK_TABLE_OR_COL");
HiveASTParseDriver.ADAPTOR.addChild(child1, HiveASTParseDriver.ADAPTOR.create(HiveASTParser.Identifier, VirtualColumn.GROUPINGID.getName()));
if (legacyGrouping) {
child1 = convertToLegacyGroupingId(child1, grpByAstExprs.size());
}
}
HiveParserASTNode child2 = (HiveParserASTNode) HiveASTParseDriver.ADAPTOR.create(HiveASTParser.IntegralLiteral, String.valueOf(nonNegativeMod(legacyGrouping ? i : -i - 1, grpByAstExprs.size())));
function.setChild(1, child1);
function.addChild(child2);
found.setValue(true);
break;
}
}
}
use of org.apache.flink.table.planner.delegation.hive.copy.HiveParserASTNode in project flink by apache.
the class HiveParserUtils method convertToLegacyGroupingId.
private static HiveParserASTNode convertToLegacyGroupingId(HiveParserASTNode groupingId, int numGBExprs) {
HiveParserASTNode converterFunc = (HiveParserASTNode) HiveASTParseDriver.ADAPTOR.create(HiveASTParser.TOK_FUNCTION, "TOK_FUNCTION");
// function name
converterFunc.addChild((Tree) HiveASTParseDriver.ADAPTOR.create(HiveASTParser.StringLiteral, GenericUDFLegacyGroupingID.NAME));
// origin grouping__id
converterFunc.addChild(groupingId);
// num of group by expressions
converterFunc.addChild((Tree) HiveASTParseDriver.ADAPTOR.create(HiveASTParser.IntegralLiteral, String.valueOf(numGBExprs)));
return converterFunc;
}
use of org.apache.flink.table.planner.delegation.hive.copy.HiveParserASTNode in project flink by apache.
the class HiveParserTypeCheckProcFactory method genExprNode.
public static Map<HiveParserASTNode, ExprNodeDesc> genExprNode(HiveParserASTNode expr, HiveParserTypeCheckCtx tcCtx, HiveParserTypeCheckProcFactory tf) throws SemanticException {
// Create the walker, the rules dispatcher and the context.
// create a walker which walks the tree in a DFS manner while maintaining
// the operator stack. The dispatcher
// generates the plan from the operator tree
Map<Rule, NodeProcessor> opRules = new LinkedHashMap<>();
opRules.put(new RuleRegExp("R1", HiveASTParser.TOK_NULL + "%"), tf.getNullExprProcessor());
opRules.put(new RuleRegExp("R2", HiveASTParser.Number + "%|" + HiveASTParser.IntegralLiteral + "%|" + HiveASTParser.NumberLiteral + "%"), tf.getNumExprProcessor());
opRules.put(new RuleRegExp("R3", HiveASTParser.Identifier + "%|" + HiveASTParser.StringLiteral + "%|" + HiveASTParser.TOK_CHARSETLITERAL + "%|" + HiveASTParser.TOK_STRINGLITERALSEQUENCE + "%|" + "%|" + HiveASTParser.KW_IF + "%|" + HiveASTParser.KW_CASE + "%|" + HiveASTParser.KW_WHEN + "%|" + HiveASTParser.KW_IN + "%|" + HiveASTParser.KW_ARRAY + "%|" + HiveASTParser.KW_MAP + "%|" + HiveASTParser.KW_STRUCT + "%|" + HiveASTParser.KW_EXISTS + "%|" + HiveASTParser.TOK_SUBQUERY_OP_NOTIN + "%"), tf.getStrExprProcessor());
opRules.put(new RuleRegExp("R4", HiveASTParser.KW_TRUE + "%|" + HiveASTParser.KW_FALSE + "%"), tf.getBoolExprProcessor());
opRules.put(new RuleRegExp("R5", HiveASTParser.TOK_DATELITERAL + "%|" + HiveASTParser.TOK_TIMESTAMPLITERAL + "%"), tf.getDateTimeExprProcessor());
opRules.put(new RuleRegExp("R6", HiveASTParser.TOK_INTERVAL_YEAR_MONTH_LITERAL + "%|" + HiveASTParser.TOK_INTERVAL_DAY_TIME_LITERAL + "%|" + HiveASTParser.TOK_INTERVAL_YEAR_LITERAL + "%|" + HiveASTParser.TOK_INTERVAL_MONTH_LITERAL + "%|" + HiveASTParser.TOK_INTERVAL_DAY_LITERAL + "%|" + HiveASTParser.TOK_INTERVAL_HOUR_LITERAL + "%|" + HiveASTParser.TOK_INTERVAL_MINUTE_LITERAL + "%|" + HiveASTParser.TOK_INTERVAL_SECOND_LITERAL + "%"), tf.getIntervalExprProcessor());
opRules.put(new RuleRegExp("R7", HiveASTParser.TOK_TABLE_OR_COL + "%"), tf.getColumnExprProcessor());
opRules.put(new RuleRegExp("R8", HiveASTParser.TOK_SUBQUERY_EXPR + "%"), tf.getSubQueryExprProcessor());
// The dispatcher fires the processor corresponding to the closest matching
// rule and passes the context along
Dispatcher disp = new DefaultRuleDispatcher(tf.getDefaultExprProcessor(), opRules, tcCtx);
GraphWalker ogw = new HiveParserExpressionWalker(disp);
// Create a list of top nodes
ArrayList<Node> topNodes = new ArrayList<>(Collections.singleton(expr));
HashMap<Node, Object> nodeOutputs = new LinkedHashMap<>();
ogw.startWalking(topNodes, nodeOutputs);
return convert(nodeOutputs);
}
use of org.apache.flink.table.planner.delegation.hive.copy.HiveParserASTNode in project flink by apache.
the class HiveParserTypeCheckProcFactory method processGByExpr.
/**
* Function to do groupby subexpression elimination. This is called by all the processors
* initially. As an example, consider the query select a+b, count(1) from T group by a+b; Then
* a+b is already precomputed in the group by operators key, so we substitute a+b in the select
* list with the internal column name of the a+b expression that appears in the in input row
* resolver.
*
* @param nd The node that is being inspected.
* @param procCtx The processor context.
* @return exprNodeColumnDesc.
*/
public static ExprNodeDesc processGByExpr(Node nd, Object procCtx) throws SemanticException {
// We recursively create the exprNodeDesc. Base cases: when we encounter
// a column ref, we convert that into an exprNodeColumnDesc; when we
// encounter
// a constant, we convert that into an exprNodeConstantDesc. For others we
// just
// build the exprNodeFuncDesc with recursively built children.
HiveParserASTNode expr = (HiveParserASTNode) nd;
HiveParserTypeCheckCtx ctx = (HiveParserTypeCheckCtx) procCtx;
// having key in (select .. where a = min(b.value)
if (!ctx.isUseCaching() && ctx.getOuterRR() == null) {
return null;
}
HiveParserRowResolver input = ctx.getInputRR();
ExprNodeDesc desc = null;
if (input == null || !ctx.getAllowGBExprElimination()) {
return null;
}
// If the current subExpression is pre-calculated, as in Group-By etc.
ColumnInfo colInfo = input.getExpression(expr);
// try outer row resolver
HiveParserRowResolver outerRR = ctx.getOuterRR();
if (colInfo == null && outerRR != null) {
colInfo = outerRR.getExpression(expr);
}
if (colInfo != null) {
desc = new ExprNodeColumnDesc(colInfo);
HiveParserASTNode source = input.getExpressionSource(expr);
if (source != null) {
ctx.getUnparseTranslator().addCopyTranslation(expr, source);
}
return desc;
}
return desc;
}
Aggregations