use of org.apache.flink.table.planner.delegation.hive.copy.HiveParserRowResolver in project flink by apache.
the class HiveParserCalcitePlanner method getWindowRexAndType.
private Pair<RexNode, TypeInfo> getWindowRexAndType(HiveParserWindowingSpec.WindowExpressionSpec winExprSpec, RelNode srcRel) throws SemanticException {
RexNode window;
if (winExprSpec instanceof HiveParserWindowingSpec.WindowFunctionSpec) {
HiveParserWindowingSpec.WindowFunctionSpec wFnSpec = (HiveParserWindowingSpec.WindowFunctionSpec) winExprSpec;
HiveParserASTNode windowProjAst = wFnSpec.getExpression();
// TODO: do we need to get to child?
int wndSpecASTIndx = getWindowSpecIndx(windowProjAst);
// 2. Get Hive Aggregate Info
AggInfo hiveAggInfo = getHiveAggInfo(windowProjAst, wndSpecASTIndx - 1, relToRowResolver.get(srcRel), (HiveParserWindowingSpec.WindowFunctionSpec) winExprSpec, semanticAnalyzer, frameworkConfig, cluster);
// 3. Get Calcite Return type for Agg Fn
RelDataType calciteAggFnRetType = HiveParserUtils.toRelDataType(hiveAggInfo.getReturnType(), cluster.getTypeFactory());
// 4. Convert Agg Fn args to Calcite
Map<String, Integer> posMap = relToHiveColNameCalcitePosMap.get(srcRel);
HiveParserRexNodeConverter converter = new HiveParserRexNodeConverter(cluster, srcRel.getRowType(), posMap, 0, false, funcConverter);
List<RexNode> calciteAggFnArgs = new ArrayList<>();
List<RelDataType> calciteAggFnArgTypes = new ArrayList<>();
for (int i = 0; i < hiveAggInfo.getAggParams().size(); i++) {
calciteAggFnArgs.add(converter.convert(hiveAggInfo.getAggParams().get(i)));
calciteAggFnArgTypes.add(HiveParserUtils.toRelDataType(hiveAggInfo.getAggParams().get(i).getTypeInfo(), cluster.getTypeFactory()));
}
// 5. Get Calcite Agg Fn
final SqlAggFunction calciteAggFn = HiveParserSqlFunctionConverter.getCalciteAggFn(hiveAggInfo.getUdfName(), hiveAggInfo.isDistinct(), calciteAggFnArgTypes, calciteAggFnRetType);
// 6. Translate Window spec
HiveParserRowResolver inputRR = relToRowResolver.get(srcRel);
HiveParserWindowingSpec.WindowSpec wndSpec = ((HiveParserWindowingSpec.WindowFunctionSpec) winExprSpec).getWindowSpec();
List<RexNode> partitionKeys = getPartitionKeys(wndSpec.getPartition(), converter, inputRR, new HiveParserTypeCheckCtx(inputRR, frameworkConfig, cluster), semanticAnalyzer);
List<RexFieldCollation> orderKeys = getOrderKeys(wndSpec.getOrder(), converter, inputRR, new HiveParserTypeCheckCtx(inputRR, frameworkConfig, cluster), semanticAnalyzer);
RexWindowBound lowerBound = getBound(wndSpec.getWindowFrame().getStart(), cluster);
RexWindowBound upperBound = getBound(wndSpec.getWindowFrame().getEnd(), cluster);
boolean isRows = wndSpec.getWindowFrame().getWindowType() == HiveParserWindowingSpec.WindowType.ROWS;
window = HiveParserUtils.makeOver(cluster.getRexBuilder(), calciteAggFnRetType, calciteAggFn, calciteAggFnArgs, partitionKeys, orderKeys, lowerBound, upperBound, isRows, true, false, false, false);
window = window.accept(funcConverter);
} else {
throw new SemanticException("Unsupported window Spec");
}
return new Pair<>(window, HiveParserTypeConverter.convert(window.getType()));
}
use of org.apache.flink.table.planner.delegation.hive.copy.HiveParserRowResolver in project flink by apache.
the class HiveParserCalcitePlanner method genDistSortBy.
// Generate plan for sort by, cluster by and distribute by. This is basically same as generating
// order by plan.
// Should refactor to combine them.
private Pair<RelNode, RelNode> genDistSortBy(HiveParserQB qb, RelNode srcRel, boolean outermostOB) throws SemanticException {
RelNode res = null;
RelNode originalInput = null;
HiveParserQBParseInfo qbp = qb.getParseInfo();
String destClause = qbp.getClauseNames().iterator().next();
HiveParserASTNode sortAST = qbp.getSortByForClause(destClause);
HiveParserASTNode distAST = qbp.getDistributeByForClause(destClause);
HiveParserASTNode clusterAST = qbp.getClusterByForClause(destClause);
if (sortAST != null || distAST != null || clusterAST != null) {
List<RexNode> virtualCols = new ArrayList<>();
List<Pair<HiveParserASTNode, TypeInfo>> vcASTAndType = new ArrayList<>();
List<RelFieldCollation> fieldCollations = new ArrayList<>();
List<Integer> distKeys = new ArrayList<>();
HiveParserRowResolver inputRR = relToRowResolver.get(srcRel);
HiveParserRexNodeConverter converter = new HiveParserRexNodeConverter(cluster, srcRel.getRowType(), relToHiveColNameCalcitePosMap.get(srcRel), 0, false, funcConverter);
int numSrcFields = srcRel.getRowType().getFieldCount();
// handle cluster by
if (clusterAST != null) {
if (sortAST != null) {
throw new SemanticException("Cannot have both CLUSTER BY and SORT BY");
}
if (distAST != null) {
throw new SemanticException("Cannot have both CLUSTER BY and DISTRIBUTE BY");
}
for (Node node : clusterAST.getChildren()) {
HiveParserASTNode childAST = (HiveParserASTNode) node;
Map<HiveParserASTNode, ExprNodeDesc> astToExprNodeDesc = semanticAnalyzer.genAllExprNodeDesc(childAST, inputRR);
ExprNodeDesc childNodeDesc = astToExprNodeDesc.get(childAST);
if (childNodeDesc == null) {
throw new SemanticException("Invalid CLUSTER BY expression: " + childAST.toString());
}
RexNode childRexNode = converter.convert(childNodeDesc).accept(funcConverter);
int fieldIndex;
if (childRexNode instanceof RexInputRef) {
fieldIndex = ((RexInputRef) childRexNode).getIndex();
} else {
fieldIndex = numSrcFields + virtualCols.size();
virtualCols.add(childRexNode);
vcASTAndType.add(new Pair<>(childAST, childNodeDesc.getTypeInfo()));
}
// cluster by doesn't support specifying ASC/DESC or NULLS FIRST/LAST, so use
// default values
fieldCollations.add(new RelFieldCollation(fieldIndex, RelFieldCollation.Direction.ASCENDING, RelFieldCollation.NullDirection.FIRST));
distKeys.add(fieldIndex);
}
} else {
// handle sort by
if (sortAST != null) {
for (Node node : sortAST.getChildren()) {
HiveParserASTNode childAST = (HiveParserASTNode) node;
HiveParserASTNode nullOrderAST = (HiveParserASTNode) childAST.getChild(0);
HiveParserASTNode fieldAST = (HiveParserASTNode) nullOrderAST.getChild(0);
Map<HiveParserASTNode, ExprNodeDesc> astToExprNodeDesc = semanticAnalyzer.genAllExprNodeDesc(fieldAST, inputRR);
ExprNodeDesc fieldNodeDesc = astToExprNodeDesc.get(fieldAST);
if (fieldNodeDesc == null) {
throw new SemanticException("Invalid sort by expression: " + fieldAST.toString());
}
RexNode childRexNode = converter.convert(fieldNodeDesc).accept(funcConverter);
int fieldIndex;
if (childRexNode instanceof RexInputRef) {
fieldIndex = ((RexInputRef) childRexNode).getIndex();
} else {
fieldIndex = numSrcFields + virtualCols.size();
virtualCols.add(childRexNode);
vcASTAndType.add(new Pair<>(childAST, fieldNodeDesc.getTypeInfo()));
}
RelFieldCollation.Direction direction = RelFieldCollation.Direction.DESCENDING;
if (childAST.getType() == HiveASTParser.TOK_TABSORTCOLNAMEASC) {
direction = RelFieldCollation.Direction.ASCENDING;
}
RelFieldCollation.NullDirection nullOrder;
if (nullOrderAST.getType() == HiveASTParser.TOK_NULLS_FIRST) {
nullOrder = RelFieldCollation.NullDirection.FIRST;
} else if (nullOrderAST.getType() == HiveASTParser.TOK_NULLS_LAST) {
nullOrder = RelFieldCollation.NullDirection.LAST;
} else {
throw new SemanticException("Unexpected null ordering option: " + nullOrderAST.getType());
}
fieldCollations.add(new RelFieldCollation(fieldIndex, direction, nullOrder));
}
}
// handle distribute by
if (distAST != null) {
for (Node node : distAST.getChildren()) {
HiveParserASTNode childAST = (HiveParserASTNode) node;
Map<HiveParserASTNode, ExprNodeDesc> astToExprNodeDesc = semanticAnalyzer.genAllExprNodeDesc(childAST, inputRR);
ExprNodeDesc childNodeDesc = astToExprNodeDesc.get(childAST);
if (childNodeDesc == null) {
throw new SemanticException("Invalid DISTRIBUTE BY expression: " + childAST.toString());
}
RexNode childRexNode = converter.convert(childNodeDesc).accept(funcConverter);
int fieldIndex;
if (childRexNode instanceof RexInputRef) {
fieldIndex = ((RexInputRef) childRexNode).getIndex();
} else {
fieldIndex = numSrcFields + virtualCols.size();
virtualCols.add(childRexNode);
vcASTAndType.add(new Pair<>(childAST, childNodeDesc.getTypeInfo()));
}
distKeys.add(fieldIndex);
}
}
}
Preconditions.checkState(!fieldCollations.isEmpty() || !distKeys.isEmpty(), "Both field collations and dist keys are empty");
// add child SEL if needed
RelNode realInput = srcRel;
HiveParserRowResolver outputRR = new HiveParserRowResolver();
if (!virtualCols.isEmpty()) {
List<RexNode> originalInputRefs = srcRel.getRowType().getFieldList().stream().map(input -> new RexInputRef(input.getIndex(), input.getType())).collect(Collectors.toList());
HiveParserRowResolver addedProjectRR = new HiveParserRowResolver();
if (!HiveParserRowResolver.add(addedProjectRR, inputRR)) {
throw new SemanticException("Duplicates detected when adding columns to RR: see previous message");
}
int vColPos = inputRR.getRowSchema().getSignature().size();
for (Pair<HiveParserASTNode, TypeInfo> astTypePair : vcASTAndType) {
addedProjectRR.putExpression(astTypePair.getKey(), new ColumnInfo(getColumnInternalName(vColPos), astTypePair.getValue(), null, false));
vColPos++;
}
realInput = genSelectRelNode(CompositeList.of(originalInputRefs, virtualCols), addedProjectRR, srcRel);
if (outermostOB) {
if (!HiveParserRowResolver.add(outputRR, inputRR)) {
throw new SemanticException("Duplicates detected when adding columns to RR: see previous message");
}
} else {
if (!HiveParserRowResolver.add(outputRR, addedProjectRR)) {
throw new SemanticException("Duplicates detected when adding columns to RR: see previous message");
}
}
originalInput = srcRel;
} else {
if (!HiveParserRowResolver.add(outputRR, inputRR)) {
throw new SemanticException("Duplicates detected when adding columns to RR: see previous message");
}
}
// create rel node
RelTraitSet traitSet = cluster.traitSet();
RelCollation canonizedCollation = traitSet.canonize(RelCollationImpl.of(fieldCollations));
res = LogicalDistribution.create(realInput, canonizedCollation, distKeys);
Map<String, Integer> hiveColNameCalcitePosMap = buildHiveToCalciteColumnMap(outputRR);
relToRowResolver.put(res, outputRR);
relToHiveColNameCalcitePosMap.put(res, hiveColNameCalcitePosMap);
}
return (new Pair<>(res, originalInput));
}
use of org.apache.flink.table.planner.delegation.hive.copy.HiveParserRowResolver in project flink by apache.
the class HiveParserCalcitePlanner method genSelectLogicalPlan.
// NOTE: there can only be one select clause since we don't handle multi destination insert.
private RelNode genSelectLogicalPlan(HiveParserQB qb, RelNode srcRel, RelNode starSrcRel, Map<String, Integer> outerNameToPos, HiveParserRowResolver outerRR) throws SemanticException {
// 0. Generate a Select Node for Windowing
// Exclude the newly-generated select columns from */etc. resolution.
HashSet<ColumnInfo> excludedColumns = new HashSet<>();
RelNode selForWindow = genSelectForWindowing(qb, srcRel, excludedColumns);
srcRel = (selForWindow == null) ? srcRel : selForWindow;
ArrayList<ExprNodeDesc> exprNodeDescs = new ArrayList<>();
// 1. Get Select Expression List
HiveParserQBParseInfo qbp = qb.getParseInfo();
String selClauseName = qbp.getClauseNames().iterator().next();
HiveParserASTNode selExprList = qbp.getSelForClause(selClauseName);
// make sure if there is subquery it is top level expression
HiveParserSubQueryUtils.checkForTopLevelSubqueries(selExprList);
final boolean cubeRollupGrpSetPresent = !qbp.getDestRollups().isEmpty() || !qbp.getDestGroupingSets().isEmpty() || !qbp.getDestCubes().isEmpty();
// 3. Query Hints
int posn = 0;
boolean hintPresent = selExprList.getChild(0).getType() == HiveASTParser.QUERY_HINT;
if (hintPresent) {
posn++;
}
// 4. Bailout if select involves Transform
boolean isInTransform = selExprList.getChild(posn).getChild(0).getType() == HiveASTParser.TOK_TRANSFORM;
if (isInTransform) {
String msg = "SELECT TRANSFORM is currently not supported in CBO, turn off cbo to use TRANSFORM.";
throw new SemanticException(msg);
}
// 2.Row resolvers for input, output
HiveParserRowResolver outRR = new HiveParserRowResolver();
Integer pos = 0;
// TODO: will this also fix windowing? try
HiveParserRowResolver inputRR = relToRowResolver.get(srcRel), starRR = inputRR;
if (starSrcRel != null) {
starRR = relToRowResolver.get(starSrcRel);
}
// 5. Check if select involves UDTF
String udtfTableAlias = null;
SqlOperator udtfOperator = null;
String genericUDTFName = null;
ArrayList<String> udtfColAliases = new ArrayList<>();
HiveParserASTNode expr = (HiveParserASTNode) selExprList.getChild(posn).getChild(0);
int exprType = expr.getType();
if (exprType == HiveASTParser.TOK_FUNCTION || exprType == HiveASTParser.TOK_FUNCTIONSTAR) {
String funcName = HiveParserTypeCheckProcFactory.DefaultExprProcessor.getFunctionText(expr, true);
// we can't just try to get table function here because the operator table throws
// exception if it's not a table function
SqlOperator sqlOperator = HiveParserUtils.getAnySqlOperator(funcName, frameworkConfig.getOperatorTable());
if (HiveParserUtils.isUDTF(sqlOperator)) {
LOG.debug("Found UDTF " + funcName);
udtfOperator = sqlOperator;
genericUDTFName = funcName;
if (!HiveParserUtils.isNative(sqlOperator)) {
semanticAnalyzer.unparseTranslator.addIdentifierTranslation((HiveParserASTNode) expr.getChild(0));
}
if (exprType == HiveASTParser.TOK_FUNCTIONSTAR) {
semanticAnalyzer.genColListRegex(".*", null, (HiveParserASTNode) expr.getChild(0), exprNodeDescs, null, inputRR, starRR, pos, outRR, qb.getAliases(), false);
}
}
}
if (udtfOperator != null) {
// Only support a single expression when it's a UDTF
if (selExprList.getChildCount() > 1) {
throw new SemanticException(generateErrorMessage((HiveParserASTNode) selExprList.getChild(1), ErrorMsg.UDTF_MULTIPLE_EXPR.getMsg()));
}
HiveParserASTNode selExpr = (HiveParserASTNode) selExprList.getChild(posn);
// column names also can be inferred from result of UDTF
for (int i = 1; i < selExpr.getChildCount(); i++) {
HiveParserASTNode selExprChild = (HiveParserASTNode) selExpr.getChild(i);
switch(selExprChild.getType()) {
case HiveASTParser.Identifier:
udtfColAliases.add(unescapeIdentifier(selExprChild.getText().toLowerCase()));
semanticAnalyzer.unparseTranslator.addIdentifierTranslation(selExprChild);
break;
case HiveASTParser.TOK_TABALIAS:
assert (selExprChild.getChildCount() == 1);
udtfTableAlias = unescapeIdentifier(selExprChild.getChild(0).getText());
qb.addAlias(udtfTableAlias);
semanticAnalyzer.unparseTranslator.addIdentifierTranslation((HiveParserASTNode) selExprChild.getChild(0));
break;
default:
throw new SemanticException("Find invalid token type " + selExprChild.getType() + " in UDTF.");
}
}
LOG.debug("UDTF table alias is " + udtfTableAlias);
LOG.debug("UDTF col aliases are " + udtfColAliases);
}
// 6. Iterate over all expression (after SELECT)
HiveParserASTNode exprList;
if (udtfOperator != null) {
exprList = expr;
} else {
exprList = selExprList;
}
// For UDTF's, skip the function name to get the expressions
int startPos = udtfOperator != null ? posn + 1 : posn;
// track the col aliases provided by user
List<String> colAliases = new ArrayList<>();
for (int i = startPos; i < exprList.getChildCount(); ++i) {
colAliases.add(null);
// 6.1 child can be EXPR AS ALIAS, or EXPR.
HiveParserASTNode child = (HiveParserASTNode) exprList.getChild(i);
boolean hasAsClause = child.getChildCount() == 2;
// slightly different.
if (udtfOperator == null && child.getChildCount() > 2) {
throw new SemanticException(generateErrorMessage((HiveParserASTNode) child.getChild(2), ErrorMsg.INVALID_AS.getMsg()));
}
String tabAlias;
String colAlias;
if (udtfOperator != null) {
tabAlias = null;
colAlias = semanticAnalyzer.getAutogenColAliasPrfxLbl() + i;
expr = child;
} else {
// 6.3 Get rid of TOK_SELEXPR
expr = (HiveParserASTNode) child.getChild(0);
String[] colRef = HiveParserUtils.getColAlias(child, semanticAnalyzer.getAutogenColAliasPrfxLbl(), inputRR, semanticAnalyzer.autogenColAliasPrfxIncludeFuncName(), i);
tabAlias = colRef[0];
colAlias = colRef[1];
if (hasAsClause) {
colAliases.set(colAliases.size() - 1, colAlias);
semanticAnalyzer.unparseTranslator.addIdentifierTranslation((HiveParserASTNode) child.getChild(1));
}
}
Map<HiveParserASTNode, RelNode> subQueryToRelNode = new HashMap<>();
boolean isSubQuery = genSubQueryRelNode(qb, expr, srcRel, false, subQueryToRelNode);
if (isSubQuery) {
ExprNodeDesc subQueryDesc = semanticAnalyzer.genExprNodeDesc(expr, relToRowResolver.get(srcRel), outerRR, subQueryToRelNode, false);
exprNodeDescs.add(subQueryDesc);
ColumnInfo colInfo = new ColumnInfo(getColumnInternalName(pos), subQueryDesc.getWritableObjectInspector(), tabAlias, false);
if (!outRR.putWithCheck(tabAlias, colAlias, null, colInfo)) {
throw new SemanticException("Cannot add column to RR: " + tabAlias + "." + colAlias + " => " + colInfo + " due to duplication, see previous warnings");
}
} else {
// 6.4 Build ExprNode corresponding to columns
if (expr.getType() == HiveASTParser.TOK_ALLCOLREF) {
pos = semanticAnalyzer.genColListRegex(".*", expr.getChildCount() == 0 ? null : HiveParserBaseSemanticAnalyzer.getUnescapedName((HiveParserASTNode) expr.getChild(0)).toLowerCase(), expr, exprNodeDescs, excludedColumns, inputRR, starRR, pos, outRR, qb.getAliases(), false);
} else if (expr.getType() == HiveASTParser.TOK_TABLE_OR_COL && !hasAsClause && !inputRR.getIsExprResolver() && HiveParserUtils.isRegex(unescapeIdentifier(expr.getChild(0).getText()), semanticAnalyzer.getConf())) {
// In case the expression is a regex COL. This can only happen without AS clause
// We don't allow this for ExprResolver - the Group By case
pos = semanticAnalyzer.genColListRegex(unescapeIdentifier(expr.getChild(0).getText()), null, expr, exprNodeDescs, excludedColumns, inputRR, starRR, pos, outRR, qb.getAliases(), true);
} else if (expr.getType() == HiveASTParser.DOT && expr.getChild(0).getType() == HiveASTParser.TOK_TABLE_OR_COL && inputRR.hasTableAlias(unescapeIdentifier(expr.getChild(0).getChild(0).getText().toLowerCase())) && !hasAsClause && !inputRR.getIsExprResolver() && HiveParserUtils.isRegex(unescapeIdentifier(expr.getChild(1).getText()), semanticAnalyzer.getConf())) {
// In case the expression is TABLE.COL (col can be regex). This can only happen
// without AS clause
// We don't allow this for ExprResolver - the Group By case
pos = semanticAnalyzer.genColListRegex(unescapeIdentifier(expr.getChild(1).getText()), unescapeIdentifier(expr.getChild(0).getChild(0).getText().toLowerCase()), expr, exprNodeDescs, excludedColumns, inputRR, starRR, pos, outRR, qb.getAliases(), false);
} else if (HiveASTParseUtils.containsTokenOfType(expr, HiveASTParser.TOK_FUNCTIONDI) && !(srcRel instanceof Aggregate)) {
// Likely a malformed query eg, select hash(distinct c1) from t1;
throw new SemanticException("Distinct without an aggregation.");
} else {
// Case when this is an expression
HiveParserTypeCheckCtx typeCheckCtx = new HiveParserTypeCheckCtx(inputRR, frameworkConfig, cluster);
// We allow stateful functions in the SELECT list (but nowhere else)
typeCheckCtx.setAllowStatefulFunctions(true);
if (!qbp.getDestToGroupBy().isEmpty()) {
// Special handling of grouping function
expr = rewriteGroupingFunctionAST(getGroupByForClause(qbp, selClauseName), expr, !cubeRollupGrpSetPresent);
}
ExprNodeDesc exprDesc = semanticAnalyzer.genExprNodeDesc(expr, inputRR, typeCheckCtx);
String recommended = semanticAnalyzer.recommendName(exprDesc, colAlias);
if (recommended != null && outRR.get(null, recommended) == null) {
colAlias = recommended;
}
exprNodeDescs.add(exprDesc);
ColumnInfo colInfo = new ColumnInfo(getColumnInternalName(pos), exprDesc.getWritableObjectInspector(), tabAlias, false);
colInfo.setSkewedCol(exprDesc instanceof ExprNodeColumnDesc && ((ExprNodeColumnDesc) exprDesc).isSkewedCol());
// Hive errors out in case of duplication. We allow it and see what happens.
outRR.put(tabAlias, colAlias, colInfo);
if (exprDesc instanceof ExprNodeColumnDesc) {
ExprNodeColumnDesc colExp = (ExprNodeColumnDesc) exprDesc;
String[] altMapping = inputRR.getAlternateMappings(colExp.getColumn());
if (altMapping != null) {
// TODO: this can overwrite the mapping. Should this be allowed?
outRR.put(altMapping[0], altMapping[1], colInfo);
}
}
pos++;
}
}
}
// 7. Convert Hive projections to Calcite
List<RexNode> calciteColLst = new ArrayList<>();
HiveParserRexNodeConverter rexNodeConverter = new HiveParserRexNodeConverter(cluster, srcRel.getRowType(), outerNameToPos, buildHiveColNameToInputPosMap(exprNodeDescs, inputRR), relToRowResolver.get(srcRel), outerRR, 0, false, subqueryId, funcConverter);
for (ExprNodeDesc colExpr : exprNodeDescs) {
RexNode calciteCol = rexNodeConverter.convert(colExpr);
calciteCol = convertNullLiteral(calciteCol).accept(funcConverter);
calciteColLst.add(calciteCol);
}
// 8. Build Calcite Rel
RelNode res;
if (udtfOperator != null) {
// The basic idea for CBO support of UDTF is to treat UDTF as a special project.
res = genUDTFPlan(udtfOperator, genericUDTFName, udtfTableAlias, udtfColAliases, qb, calciteColLst, outRR.getColumnInfos(), srcRel, true, false);
} else {
// and thus introduces unnecessary agg node.
if (HiveParserUtils.isIdentityProject(srcRel, calciteColLst, colAliases) && outerRR != null) {
res = srcRel;
} else {
res = genSelectRelNode(calciteColLst, outRR, srcRel);
}
}
// 9. Handle select distinct as GBY if there exist windowing functions
if (selForWindow != null && selExprList.getToken().getType() == HiveASTParser.TOK_SELECTDI) {
ImmutableBitSet groupSet = ImmutableBitSet.range(res.getRowType().getFieldList().size());
res = LogicalAggregate.create(res, groupSet, Collections.emptyList(), Collections.emptyList());
HiveParserRowResolver groupByOutputRowResolver = new HiveParserRowResolver();
for (int i = 0; i < outRR.getColumnInfos().size(); i++) {
ColumnInfo colInfo = outRR.getColumnInfos().get(i);
ColumnInfo newColInfo = new ColumnInfo(colInfo.getInternalName(), colInfo.getType(), colInfo.getTabAlias(), colInfo.getIsVirtualCol());
groupByOutputRowResolver.put(colInfo.getTabAlias(), colInfo.getAlias(), newColInfo);
}
relToHiveColNameCalcitePosMap.put(res, buildHiveToCalciteColumnMap(groupByOutputRowResolver));
relToRowResolver.put(res, groupByOutputRowResolver);
}
return res;
}
use of org.apache.flink.table.planner.delegation.hive.copy.HiveParserRowResolver in project flink by apache.
the class HiveParserCalcitePlanner method genTableLogicalPlan.
private RelNode genTableLogicalPlan(String tableAlias, HiveParserQB qb) throws SemanticException {
HiveParserRowResolver rowResolver = new HiveParserRowResolver();
try {
// 2. if returnpath is on and hivetestmode is on bail
if (qb.getParseInfo().needTableSample(tableAlias) || semanticAnalyzer.getNameToSplitSampleMap().containsKey(tableAlias) || Boolean.parseBoolean(semanticAnalyzer.getConf().get("hive.cbo.returnpath.hiveop", "false")) && semanticAnalyzer.getConf().getBoolVar(HiveConf.ConfVars.HIVETESTMODE)) {
String msg = String.format("Table Sample specified for %s." + " Currently we don't support Table Sample clauses in CBO," + " turn off cbo for queries on tableSamples.", tableAlias);
LOG.debug(msg);
throw new SemanticException(msg);
}
// 2. Get Table Metadata
Table table = qb.getMetaData().getSrcForAlias(tableAlias);
if (table.isTemporary()) {
// Hive creates a temp table for VALUES, we need to convert it to LogicalValues
RelNode values = genValues(tableAlias, table, rowResolver, cluster, getQB().getValuesTableToData().get(tableAlias));
relToRowResolver.put(values, rowResolver);
relToHiveColNameCalcitePosMap.put(values, buildHiveToCalciteColumnMap(rowResolver));
return values;
} else {
// 3. Get Table Logical Schema (Row Type)
// NOTE: Table logical schema = Non Partition Cols + Partition Cols + Virtual Cols
// 3.1 Add Column info for non partition cols (Object Inspector fields)
StructObjectInspector rowObjectInspector = (StructObjectInspector) table.getDeserializer().getObjectInspector();
List<? extends StructField> fields = rowObjectInspector.getAllStructFieldRefs();
ColumnInfo colInfo;
String colName;
for (StructField field : fields) {
colName = field.getFieldName();
colInfo = new ColumnInfo(field.getFieldName(), TypeInfoUtils.getTypeInfoFromObjectInspector(field.getFieldObjectInspector()), tableAlias, false);
colInfo.setSkewedCol(HiveParserUtils.isSkewedCol(tableAlias, qb, colName));
rowResolver.put(tableAlias, colName, colInfo);
}
// 3.2 Add column info corresponding to partition columns
for (FieldSchema partCol : table.getPartCols()) {
colName = partCol.getName();
colInfo = new ColumnInfo(colName, TypeInfoFactory.getPrimitiveTypeInfo(partCol.getType()), tableAlias, true);
rowResolver.put(tableAlias, colName, colInfo);
}
final TableType tableType = obtainTableType(table);
Preconditions.checkArgument(tableType == TableType.NATIVE, "Only native tables are supported");
// Build Hive Table Scan Rel
RelNode tableRel = catalogReader.getTable(Arrays.asList(catalogManager.getCurrentCatalog(), table.getDbName(), table.getTableName())).toRel(ViewExpanders.toRelContext(flinkPlanner.createToRelContext(), cluster));
// 6. Add Schema(RR) to RelNode-Schema map
Map<String, Integer> hiveToCalciteColMap = buildHiveToCalciteColumnMap(rowResolver);
relToRowResolver.put(tableRel, rowResolver);
relToHiveColNameCalcitePosMap.put(tableRel, hiveToCalciteColMap);
return tableRel;
}
} catch (Exception e) {
if (e instanceof SemanticException) {
throw (SemanticException) e;
} else {
throw (new RuntimeException(e));
}
}
}
use of org.apache.flink.table.planner.delegation.hive.copy.HiveParserRowResolver in project flink by apache.
the class HiveParserCalcitePlanner method genLogicalPlan.
private RelNode genLogicalPlan(HiveParserQB qb, boolean outerMostQB, Map<String, Integer> outerNameToPosMap, HiveParserRowResolver outerRR) throws SemanticException {
RelNode res;
// First generate all the opInfos for the elements in the from clause
Map<String, RelNode> aliasToRel = new HashMap<>();
// 0. Check if we can handle the SubQuery;
// canHandleQbForCbo returns null if the query can be handled.
String reason = HiveParserUtils.canHandleQbForCbo(semanticAnalyzer.getQueryProperties());
if (reason != null) {
String msg = "CBO can not handle Sub Query" + " because it: " + reason;
throw new SemanticException(msg);
}
// 1.1. Recurse over the subqueries to fill the subquery part of the plan
for (String subqAlias : qb.getSubqAliases()) {
HiveParserQBExpr qbexpr = qb.getSubqForAlias(subqAlias);
RelNode relNode = genLogicalPlan(qbexpr);
aliasToRel.put(subqAlias, relNode);
if (qb.getViewToTabSchema().containsKey(subqAlias)) {
if (!(relNode instanceof Project)) {
throw new SemanticException("View " + subqAlias + " is corresponding to " + relNode.toString() + ", rather than a Project.");
}
}
}
// 1.2 Recurse over all the source tables
for (String tableAlias : qb.getTabAliases()) {
RelNode op = genTableLogicalPlan(tableAlias, qb);
aliasToRel.put(tableAlias, op);
}
if (aliasToRel.isEmpty()) {
RelNode dummySrc = LogicalValues.createOneRow(cluster);
aliasToRel.put(HiveParserSemanticAnalyzer.DUMMY_TABLE, dummySrc);
HiveParserRowResolver dummyRR = new HiveParserRowResolver();
dummyRR.put(HiveParserSemanticAnalyzer.DUMMY_TABLE, "dummy_col", new ColumnInfo(getColumnInternalName(0), TypeInfoFactory.intTypeInfo, HiveParserSemanticAnalyzer.DUMMY_TABLE, false));
relToRowResolver.put(dummySrc, dummyRR);
relToHiveColNameCalcitePosMap.put(dummySrc, buildHiveToCalciteColumnMap(dummyRR));
}
if (!qb.getParseInfo().getAliasToLateralViews().isEmpty()) {
// process lateral views
res = genLateralViewPlan(qb, aliasToRel);
} else if (qb.getParseInfo().getJoinExpr() != null) {
// 1.3 process join
res = genJoinLogicalPlan(qb.getParseInfo().getJoinExpr(), aliasToRel);
} else {
// If no join then there should only be either 1 TS or 1 SubQuery
res = aliasToRel.values().iterator().next();
}
// 2. Build Rel for where Clause
RelNode filterRel = genFilterLogicalPlan(qb, res, outerNameToPosMap, outerRR);
res = (filterRel == null) ? res : filterRel;
RelNode starSrcRel = res;
// 3. Build Rel for GB Clause
RelNode gbRel = genGBLogicalPlan(qb, res);
res = gbRel == null ? res : gbRel;
// 4. Build Rel for GB Having Clause
RelNode gbHavingRel = genGBHavingLogicalPlan(qb, res);
res = gbHavingRel == null ? res : gbHavingRel;
// 5. Build Rel for Select Clause
RelNode selectRel = genSelectLogicalPlan(qb, res, starSrcRel, outerNameToPosMap, outerRR);
res = selectRel == null ? res : selectRel;
// 6. Build Rel for OB Clause
Pair<Sort, RelNode> obAndTopProj = genOBLogicalPlan(qb, res, outerMostQB);
Sort orderRel = obAndTopProj.getKey();
RelNode topConstrainingProjRel = obAndTopProj.getValue();
res = orderRel == null ? res : orderRel;
// Build Rel for SortBy/ClusterBy/DistributeBy. It can happen only if we don't have OrderBy.
if (orderRel == null) {
Pair<RelNode, RelNode> distAndTopProj = genDistSortBy(qb, res, outerMostQB);
RelNode distRel = distAndTopProj.getKey();
topConstrainingProjRel = distAndTopProj.getValue();
res = distRel == null ? res : distRel;
}
// 7. Build Rel for Limit Clause
Sort limitRel = genLimitLogicalPlan(qb, res);
if (limitRel != null) {
if (orderRel != null) {
// merge limit into the order-by node
HiveParserRowResolver orderRR = relToRowResolver.remove(orderRel);
Map<String, Integer> orderColNameToPos = relToHiveColNameCalcitePosMap.remove(orderRel);
res = LogicalSort.create(orderRel.getInput(), orderRel.collation, limitRel.offset, limitRel.fetch);
relToRowResolver.put(res, orderRR);
relToHiveColNameCalcitePosMap.put(res, orderColNameToPos);
relToRowResolver.remove(limitRel);
relToHiveColNameCalcitePosMap.remove(limitRel);
} else {
res = limitRel;
}
}
// 8. Introduce top constraining select if needed.
if (topConstrainingProjRel != null) {
List<RexNode> originalInputRefs = topConstrainingProjRel.getRowType().getFieldList().stream().map(input -> new RexInputRef(input.getIndex(), input.getType())).collect(Collectors.toList());
HiveParserRowResolver topConstrainingProjRR = new HiveParserRowResolver();
if (!HiveParserRowResolver.add(topConstrainingProjRR, relToRowResolver.get(topConstrainingProjRel))) {
LOG.warn("Duplicates detected when adding columns to RR: see previous message");
}
res = genSelectRelNode(originalInputRefs, topConstrainingProjRR, res);
}
// TODO: cleanup this
if (qb.getParseInfo().getAlias() != null) {
HiveParserRowResolver rr = relToRowResolver.get(res);
HiveParserRowResolver newRR = new HiveParserRowResolver();
String alias = qb.getParseInfo().getAlias();
for (ColumnInfo colInfo : rr.getColumnInfos()) {
String name = colInfo.getInternalName();
String[] tmp = rr.reverseLookup(name);
if ("".equals(tmp[0]) || tmp[1] == null) {
// ast expression is not a valid column name for table
tmp[1] = colInfo.getInternalName();
}
ColumnInfo newColInfo = new ColumnInfo(colInfo);
newColInfo.setTabAlias(alias);
newRR.put(alias, tmp[1], newColInfo);
}
relToRowResolver.put(res, newRR);
relToHiveColNameCalcitePosMap.put(res, buildHiveToCalciteColumnMap(newRR));
}
if (LOG.isDebugEnabled()) {
LOG.debug("Created Plan for Query Block " + qb.getId());
}
semanticAnalyzer.setQB(qb);
return res;
}
Aggregations