use of org.apache.flink.table.planner.delegation.hive.copy.HiveParserRowResolver in project flink by apache.
the class HiveParserCalcitePlanner method genOBLogicalPlan.
private Pair<Sort, RelNode> genOBLogicalPlan(HiveParserQB qb, RelNode srcRel, boolean outermostOB) throws SemanticException {
Sort sortRel = null;
RelNode originalOBInput = null;
HiveParserQBParseInfo qbp = qb.getParseInfo();
String dest = qbp.getClauseNames().iterator().next();
HiveParserASTNode obAST = qbp.getOrderByForClause(dest);
if (obAST != null) {
// 1. OB Expr sanity test
// in strict mode, in the presence of order by, limit must be specified
Integer limit = qb.getParseInfo().getDestLimit(dest);
if (limit == null) {
String mapRedMode = semanticAnalyzer.getConf().getVar(HiveConf.ConfVars.HIVEMAPREDMODE);
boolean banLargeQuery = Boolean.parseBoolean(semanticAnalyzer.getConf().get("hive.strict.checks.large.query", "false"));
if ("strict".equalsIgnoreCase(mapRedMode) || banLargeQuery) {
throw new SemanticException(generateErrorMessage(obAST, "Order by-s without limit"));
}
}
// 2. Walk through OB exprs and extract field collations and additional
// virtual columns needed
final List<RexNode> virtualCols = new ArrayList<>();
final List<RelFieldCollation> fieldCollations = new ArrayList<>();
int fieldIndex;
List<Node> obASTExprLst = obAST.getChildren();
HiveParserASTNode obASTExpr;
HiveParserASTNode nullOrderASTExpr;
List<Pair<HiveParserASTNode, TypeInfo>> vcASTAndType = new ArrayList<>();
HiveParserRowResolver inputRR = relToRowResolver.get(srcRel);
HiveParserRowResolver outputRR = new HiveParserRowResolver();
HiveParserRexNodeConverter converter = new HiveParserRexNodeConverter(cluster, srcRel.getRowType(), relToHiveColNameCalcitePosMap.get(srcRel), 0, false, funcConverter);
int numSrcFields = srcRel.getRowType().getFieldCount();
for (Node node : obASTExprLst) {
// 2.1 Convert AST Expr to ExprNode
obASTExpr = (HiveParserASTNode) node;
nullOrderASTExpr = (HiveParserASTNode) obASTExpr.getChild(0);
HiveParserASTNode ref = (HiveParserASTNode) nullOrderASTExpr.getChild(0);
Map<HiveParserASTNode, ExprNodeDesc> astToExprNodeDesc = semanticAnalyzer.genAllExprNodeDesc(ref, inputRR);
ExprNodeDesc obExprNodeDesc = astToExprNodeDesc.get(ref);
if (obExprNodeDesc == null) {
throw new SemanticException("Invalid order by expression: " + obASTExpr.toString());
}
// 2.2 Convert ExprNode to RexNode
RexNode rexNode = converter.convert(obExprNodeDesc).accept(funcConverter);
// present in the child (& hence we add a child Project Rel)
if (rexNode instanceof RexInputRef) {
fieldIndex = ((RexInputRef) rexNode).getIndex();
} else {
fieldIndex = numSrcFields + virtualCols.size();
virtualCols.add(rexNode);
vcASTAndType.add(new Pair<>(ref, obExprNodeDesc.getTypeInfo()));
}
// 2.4 Determine the Direction of order by
RelFieldCollation.Direction direction = RelFieldCollation.Direction.DESCENDING;
if (obASTExpr.getType() == HiveASTParser.TOK_TABSORTCOLNAMEASC) {
direction = RelFieldCollation.Direction.ASCENDING;
}
RelFieldCollation.NullDirection nullOrder;
if (nullOrderASTExpr.getType() == HiveASTParser.TOK_NULLS_FIRST) {
nullOrder = RelFieldCollation.NullDirection.FIRST;
} else if (nullOrderASTExpr.getType() == HiveASTParser.TOK_NULLS_LAST) {
nullOrder = RelFieldCollation.NullDirection.LAST;
} else {
throw new SemanticException("Unexpected null ordering option: " + nullOrderASTExpr.getType());
}
// 2.5 Add to field collations
fieldCollations.add(new RelFieldCollation(fieldIndex, direction, nullOrder));
}
// 3. Add Child Project Rel if needed, Generate Output RR, input Sel Rel
// for top constraining Sel
RelNode obInputRel = srcRel;
if (!virtualCols.isEmpty()) {
List<RexNode> originalInputRefs = srcRel.getRowType().getFieldList().stream().map(input -> new RexInputRef(input.getIndex(), input.getType())).collect(Collectors.toList());
HiveParserRowResolver obSyntheticProjectRR = new HiveParserRowResolver();
if (!HiveParserRowResolver.add(obSyntheticProjectRR, inputRR)) {
throw new SemanticException("Duplicates detected when adding columns to RR: see previous message");
}
int vcolPos = inputRR.getRowSchema().getSignature().size();
for (Pair<HiveParserASTNode, TypeInfo> astTypePair : vcASTAndType) {
obSyntheticProjectRR.putExpression(astTypePair.getKey(), new ColumnInfo(getColumnInternalName(vcolPos), astTypePair.getValue(), null, false));
vcolPos++;
}
obInputRel = genSelectRelNode(CompositeList.of(originalInputRefs, virtualCols), obSyntheticProjectRR, srcRel);
if (outermostOB) {
if (!HiveParserRowResolver.add(outputRR, inputRR)) {
throw new SemanticException("Duplicates detected when adding columns to RR: see previous message");
}
} else {
if (!HiveParserRowResolver.add(outputRR, obSyntheticProjectRR)) {
throw new SemanticException("Duplicates detected when adding columns to RR: see previous message");
}
}
originalOBInput = srcRel;
} else {
if (!HiveParserRowResolver.add(outputRR, inputRR)) {
throw new SemanticException("Duplicates detected when adding columns to RR: see previous message");
}
}
// 4. Construct SortRel
RelTraitSet traitSet = cluster.traitSet();
RelCollation canonizedCollation = traitSet.canonize(RelCollationImpl.of(fieldCollations));
sortRel = LogicalSort.create(obInputRel, canonizedCollation, null, null);
// 5. Update the maps
Map<String, Integer> hiveColNameCalcitePosMap = buildHiveToCalciteColumnMap(outputRR);
relToRowResolver.put(sortRel, outputRR);
relToHiveColNameCalcitePosMap.put(sortRel, hiveColNameCalcitePosMap);
}
return (new Pair<>(sortRel, originalOBInput));
}
use of org.apache.flink.table.planner.delegation.hive.copy.HiveParserRowResolver in project flink by apache.
the class HiveParserCalcitePlanner method subqueryRestrictionCheck.
private void subqueryRestrictionCheck(HiveParserQB qb, HiveParserASTNode searchCond, RelNode srcRel, boolean forHavingClause, Set<HiveParserASTNode> corrScalarQueries) throws SemanticException {
List<HiveParserASTNode> subQueriesInOriginalTree = HiveParserSubQueryUtils.findSubQueries(searchCond);
HiveParserASTNode clonedSearchCond = (HiveParserASTNode) HiveParserSubQueryUtils.ADAPTOR.dupTree(searchCond);
List<HiveParserASTNode> subQueries = HiveParserSubQueryUtils.findSubQueries(clonedSearchCond);
for (int i = 0; i < subQueriesInOriginalTree.size(); i++) {
int sqIdx = qb.incrNumSubQueryPredicates();
HiveParserASTNode originalSubQueryAST = subQueriesInOriginalTree.get(i);
HiveParserASTNode subQueryAST = subQueries.get(i);
// HiveParserSubQueryUtils.rewriteParentQueryWhere(clonedSearchCond, subQueryAST);
ObjectPair<Boolean, Integer> subqInfo = new ObjectPair<>(false, 0);
if (!topLevelConjunctCheck(clonedSearchCond, subqInfo)) {
// Restriction.7.h :: SubQuery predicates can appear only as top level conjuncts.
throw new SemanticException(HiveParserErrorMsg.getMsg(ErrorMsg.UNSUPPORTED_SUBQUERY_EXPRESSION, subQueryAST, "Only SubQuery expressions that are top level conjuncts are allowed"));
}
HiveParserASTNode outerQueryExpr = (HiveParserASTNode) subQueryAST.getChild(2);
if (outerQueryExpr != null && outerQueryExpr.getType() == HiveASTParser.TOK_SUBQUERY_EXPR) {
throw new SemanticException(HiveParserErrorMsg.getMsg(ErrorMsg.UNSUPPORTED_SUBQUERY_EXPRESSION, outerQueryExpr, "IN/NOT IN subqueries are not allowed in LHS"));
}
HiveParserQBSubQuery subQuery = HiveParserSubQueryUtils.buildSubQuery(sqIdx, subQueryAST, originalSubQueryAST, semanticAnalyzer.ctx, frameworkConfig, cluster);
HiveParserRowResolver inputRR = relToRowResolver.get(srcRel);
String havingInputAlias = null;
boolean isCorrScalarWithAgg = subQuery.subqueryRestrictionsCheck(inputRR, forHavingClause, havingInputAlias);
if (isCorrScalarWithAgg) {
corrScalarQueries.add(originalSubQueryAST);
}
}
}
use of org.apache.flink.table.planner.delegation.hive.copy.HiveParserRowResolver in project flink by apache.
the class HiveParserCalcitePlanner method genUDTFPlan.
private RelNode genUDTFPlan(SqlOperator sqlOperator, String genericUDTFName, String outputTableAlias, List<String> colAliases, HiveParserQB qb, List<RexNode> operands, List<ColumnInfo> opColInfos, RelNode input, boolean inSelect, boolean isOuter) throws SemanticException {
Preconditions.checkState(!isOuter || !inSelect, "OUTER is not supported for SELECT UDTF");
// No GROUP BY / DISTRIBUTE BY / SORT BY / CLUSTER BY
HiveParserQBParseInfo qbp = qb.getParseInfo();
if (inSelect && !qbp.getDestToGroupBy().isEmpty()) {
throw new SemanticException(ErrorMsg.UDTF_NO_GROUP_BY.getMsg());
}
if (inSelect && !qbp.getDestToDistributeBy().isEmpty()) {
throw new SemanticException(ErrorMsg.UDTF_NO_DISTRIBUTE_BY.getMsg());
}
if (inSelect && !qbp.getDestToSortBy().isEmpty()) {
throw new SemanticException(ErrorMsg.UDTF_NO_SORT_BY.getMsg());
}
if (inSelect && !qbp.getDestToClusterBy().isEmpty()) {
throw new SemanticException(ErrorMsg.UDTF_NO_CLUSTER_BY.getMsg());
}
if (inSelect && !qbp.getAliasToLateralViews().isEmpty()) {
throw new SemanticException(ErrorMsg.UDTF_LATERAL_VIEW.getMsg());
}
LOG.debug("Table alias: " + outputTableAlias + " Col aliases: " + colAliases);
// Create the object inspector for the input columns and initialize the UDTF
RelDataType relDataType = HiveParserUtils.inferReturnTypeForOperands(sqlOperator, operands, cluster.getTypeFactory());
DataType dataType = HiveParserUtils.toDataType(relDataType);
StructObjectInspector outputOI = (StructObjectInspector) HiveInspectors.getObjectInspector(HiveTypeUtil.toHiveTypeInfo(dataType, false));
// this should only happen for select udtf
if (outputTableAlias == null) {
Preconditions.checkState(inSelect, "Table alias not specified for lateral view");
String prefix = "select_" + genericUDTFName + "_alias_";
int i = 0;
while (qb.getAliases().contains(prefix + i)) {
i++;
}
outputTableAlias = prefix + i;
}
if (colAliases.isEmpty()) {
// user did not specify alias names, infer names from outputOI
for (StructField field : outputOI.getAllStructFieldRefs()) {
colAliases.add(field.getFieldName());
}
}
// Make sure that the number of column aliases in the AS clause matches the number of
// columns output by the UDTF
int numOutputCols = outputOI.getAllStructFieldRefs().size();
int numSuppliedAliases = colAliases.size();
if (numOutputCols != numSuppliedAliases) {
throw new SemanticException(ErrorMsg.UDTF_ALIAS_MISMATCH.getMsg("expected " + numOutputCols + " aliases " + "but got " + numSuppliedAliases));
}
// Generate the output column info's / row resolver using internal names.
ArrayList<ColumnInfo> udtfOutputCols = new ArrayList<>();
Iterator<String> colAliasesIter = colAliases.iterator();
for (StructField sf : outputOI.getAllStructFieldRefs()) {
String colAlias = colAliasesIter.next();
assert (colAlias != null);
// Since the UDTF operator feeds into a LVJ operator that will rename all the internal
// names,
// we can just use field name from the UDTF's OI as the internal name
ColumnInfo col = new ColumnInfo(sf.getFieldName(), TypeInfoUtils.getTypeInfoFromObjectInspector(sf.getFieldObjectInspector()), outputTableAlias, false);
udtfOutputCols.add(col);
}
// Create the row resolver for the table function scan
HiveParserRowResolver udtfOutRR = new HiveParserRowResolver();
for (int i = 0; i < udtfOutputCols.size(); i++) {
udtfOutRR.put(outputTableAlias, colAliases.get(i), udtfOutputCols.get(i));
}
// Build row type from field <type, name>
RelDataType retType = HiveParserTypeConverter.getType(cluster, udtfOutRR, null);
List<RelDataType> argTypes = new ArrayList<>();
RelDataTypeFactory dtFactory = cluster.getRexBuilder().getTypeFactory();
for (ColumnInfo ci : opColInfos) {
argTypes.add(HiveParserUtils.toRelDataType(ci.getType(), dtFactory));
}
SqlOperator calciteOp = HiveParserSqlFunctionConverter.getCalciteFn(genericUDTFName, argTypes, retType, false);
RexNode rexNode = cluster.getRexBuilder().makeCall(calciteOp, operands);
// convert the rex call
TableFunctionConverter udtfConverter = new TableFunctionConverter(cluster, input, frameworkConfig.getOperatorTable(), catalogReader.nameMatcher());
RexCall convertedCall = (RexCall) rexNode.accept(udtfConverter);
SqlOperator convertedOperator = convertedCall.getOperator();
Preconditions.checkState(convertedOperator instanceof SqlUserDefinedTableFunction, "Expect operator to be " + SqlUserDefinedTableFunction.class.getSimpleName() + ", actually got " + convertedOperator.getClass().getSimpleName());
// TODO: how to decide this?
Type elementType = Object[].class;
// create LogicalTableFunctionScan
RelNode tableFunctionScan = LogicalTableFunctionScan.create(input.getCluster(), Collections.emptyList(), convertedCall, elementType, retType, null);
// remember the table alias for the UDTF so that we can reference the cols later
qb.addAlias(outputTableAlias);
RelNode correlRel;
RexBuilder rexBuilder = cluster.getRexBuilder();
// find correlation in the converted call
Pair<List<CorrelationId>, ImmutableBitSet> correlUse = getCorrelationUse(convertedCall);
// create correlate node
if (correlUse == null) {
correlRel = plannerContext.createRelBuilder(catalogManager.getCurrentCatalog(), catalogManager.getCurrentDatabase()).push(input).push(tableFunctionScan).join(isOuter ? JoinRelType.LEFT : JoinRelType.INNER, rexBuilder.makeLiteral(true)).build();
} else {
if (correlUse.left.size() > 1) {
tableFunctionScan = DeduplicateCorrelateVariables.go(rexBuilder, correlUse.left.get(0), Util.skip(correlUse.left), tableFunctionScan);
}
correlRel = LogicalCorrelate.create(input, tableFunctionScan, correlUse.left.get(0), correlUse.right, isOuter ? JoinRelType.LEFT : JoinRelType.INNER);
}
// Add new rel & its RR to the maps
relToHiveColNameCalcitePosMap.put(tableFunctionScan, buildHiveToCalciteColumnMap(udtfOutRR));
relToRowResolver.put(tableFunctionScan, udtfOutRR);
HiveParserRowResolver correlRR = HiveParserRowResolver.getCombinedRR(relToRowResolver.get(input), relToRowResolver.get(tableFunctionScan));
relToHiveColNameCalcitePosMap.put(correlRel, buildHiveToCalciteColumnMap(correlRR));
relToRowResolver.put(correlRel, correlRR);
if (!inSelect) {
return correlRel;
}
// create project node
List<RexNode> projects = new ArrayList<>();
HiveParserRowResolver projectRR = new HiveParserRowResolver();
int j = 0;
for (int i = input.getRowType().getFieldCount(); i < correlRel.getRowType().getFieldCount(); i++) {
projects.add(cluster.getRexBuilder().makeInputRef(correlRel, i));
ColumnInfo inputColInfo = correlRR.getRowSchema().getSignature().get(i);
String colAlias = inputColInfo.getAlias();
ColumnInfo colInfo = new ColumnInfo(getColumnInternalName(j++), inputColInfo.getObjectInspector(), null, false);
projectRR.put(null, colAlias, colInfo);
}
RelNode projectNode = LogicalProject.create(correlRel, Collections.emptyList(), projects, tableFunctionScan.getRowType());
relToHiveColNameCalcitePosMap.put(projectNode, buildHiveToCalciteColumnMap(projectRR));
relToRowResolver.put(projectNode, projectRR);
return projectNode;
}
use of org.apache.flink.table.planner.delegation.hive.copy.HiveParserRowResolver in project flink by apache.
the class HiveParserTypeCheckProcFactory method processGByExpr.
/**
* Function to do groupby subexpression elimination. This is called by all the processors
* initially. As an example, consider the query select a+b, count(1) from T group by a+b; Then
* a+b is already precomputed in the group by operators key, so we substitute a+b in the select
* list with the internal column name of the a+b expression that appears in the in input row
* resolver.
*
* @param nd The node that is being inspected.
* @param procCtx The processor context.
* @return exprNodeColumnDesc.
*/
public static ExprNodeDesc processGByExpr(Node nd, Object procCtx) throws SemanticException {
// We recursively create the exprNodeDesc. Base cases: when we encounter
// a column ref, we convert that into an exprNodeColumnDesc; when we
// encounter
// a constant, we convert that into an exprNodeConstantDesc. For others we
// just
// build the exprNodeFuncDesc with recursively built children.
HiveParserASTNode expr = (HiveParserASTNode) nd;
HiveParserTypeCheckCtx ctx = (HiveParserTypeCheckCtx) procCtx;
// having key in (select .. where a = min(b.value)
if (!ctx.isUseCaching() && ctx.getOuterRR() == null) {
return null;
}
HiveParserRowResolver input = ctx.getInputRR();
ExprNodeDesc desc = null;
if (input == null || !ctx.getAllowGBExprElimination()) {
return null;
}
// If the current subExpression is pre-calculated, as in Group-By etc.
ColumnInfo colInfo = input.getExpression(expr);
// try outer row resolver
HiveParserRowResolver outerRR = ctx.getOuterRR();
if (colInfo == null && outerRR != null) {
colInfo = outerRR.getExpression(expr);
}
if (colInfo != null) {
desc = new ExprNodeColumnDesc(colInfo);
HiveParserASTNode source = input.getExpressionSource(expr);
if (source != null) {
ctx.getUnparseTranslator().addCopyTranslation(expr, source);
}
return desc;
}
return desc;
}
use of org.apache.flink.table.planner.delegation.hive.copy.HiveParserRowResolver in project flink by apache.
the class HiveParserCalcitePlanner method genJoinRelNode.
private RelNode genJoinRelNode(RelNode leftRel, String leftTableAlias, RelNode rightRel, String rightTableAlias, JoinType hiveJoinType, HiveParserASTNode joinCondAst) throws SemanticException {
HiveParserRowResolver leftRR = relToRowResolver.get(leftRel);
HiveParserRowResolver rightRR = relToRowResolver.get(rightRel);
// 1. Construct ExpressionNodeDesc representing Join Condition
RexNode joinCondRex;
List<String> namedColumns = null;
if (joinCondAst != null) {
HiveParserJoinTypeCheckCtx jCtx = new HiveParserJoinTypeCheckCtx(leftRR, rightRR, hiveJoinType, frameworkConfig, cluster);
HiveParserRowResolver combinedRR = HiveParserRowResolver.getCombinedRR(leftRR, rightRR);
if (joinCondAst.getType() == HiveASTParser.TOK_TABCOLNAME && !hiveJoinType.equals(JoinType.LEFTSEMI)) {
namedColumns = new ArrayList<>();
// We will transform using clause and make it look like an on-clause.
// So, lets generate a valid on-clause AST from using.
HiveParserASTNode and = (HiveParserASTNode) HiveASTParseDriver.ADAPTOR.create(HiveASTParser.KW_AND, "and");
HiveParserASTNode equal = null;
int count = 0;
for (Node child : joinCondAst.getChildren()) {
String columnName = ((HiveParserASTNode) child).getText();
// dealing with views
if (semanticAnalyzer.unparseTranslator != null && semanticAnalyzer.unparseTranslator.isEnabled()) {
semanticAnalyzer.unparseTranslator.addIdentifierTranslation((HiveParserASTNode) child);
}
namedColumns.add(columnName);
HiveParserASTNode left = HiveParserASTBuilder.qualifiedName(leftTableAlias, columnName);
HiveParserASTNode right = HiveParserASTBuilder.qualifiedName(rightTableAlias, columnName);
equal = (HiveParserASTNode) HiveASTParseDriver.ADAPTOR.create(HiveASTParser.EQUAL, "=");
HiveASTParseDriver.ADAPTOR.addChild(equal, left);
HiveASTParseDriver.ADAPTOR.addChild(equal, right);
HiveASTParseDriver.ADAPTOR.addChild(and, equal);
count++;
}
joinCondAst = count > 1 ? and : equal;
} else if (semanticAnalyzer.unparseTranslator != null && semanticAnalyzer.unparseTranslator.isEnabled()) {
semanticAnalyzer.genAllExprNodeDesc(joinCondAst, combinedRR, jCtx);
}
Map<HiveParserASTNode, ExprNodeDesc> exprNodes = HiveParserUtils.genExprNode(joinCondAst, jCtx);
if (jCtx.getError() != null) {
throw new SemanticException(generateErrorMessage(jCtx.getErrorSrcNode(), jCtx.getError()));
}
ExprNodeDesc joinCondExprNode = exprNodes.get(joinCondAst);
List<RelNode> inputRels = new ArrayList<>();
inputRels.add(leftRel);
inputRels.add(rightRel);
joinCondRex = HiveParserRexNodeConverter.convert(cluster, joinCondExprNode, inputRels, relToRowResolver, relToHiveColNameCalcitePosMap, false, funcConverter).accept(funcConverter);
} else {
joinCondRex = cluster.getRexBuilder().makeLiteral(true);
}
// 3. Construct Join Rel Node and HiveParserRowResolver for the new Join Node
boolean leftSemiJoin = false;
JoinRelType calciteJoinType;
switch(hiveJoinType) {
case LEFTOUTER:
calciteJoinType = JoinRelType.LEFT;
break;
case RIGHTOUTER:
calciteJoinType = JoinRelType.RIGHT;
break;
case FULLOUTER:
calciteJoinType = JoinRelType.FULL;
break;
case LEFTSEMI:
calciteJoinType = JoinRelType.SEMI;
leftSemiJoin = true;
break;
case INNER:
default:
calciteJoinType = JoinRelType.INNER;
break;
}
RelNode topRel;
HiveParserRowResolver topRR;
if (leftSemiJoin) {
List<RelDataTypeField> sysFieldList = new ArrayList<>();
List<RexNode> leftJoinKeys = new ArrayList<>();
List<RexNode> rightJoinKeys = new ArrayList<>();
RexNode nonEquiConds = RelOptUtil.splitJoinCondition(sysFieldList, leftRel, rightRel, joinCondRex, leftJoinKeys, rightJoinKeys, null, null);
if (!nonEquiConds.isAlwaysTrue()) {
throw new SemanticException("Non equality condition not supported in Semi-Join" + nonEquiConds);
}
RelNode[] inputRels = new RelNode[] { leftRel, rightRel };
final List<Integer> leftKeys = new ArrayList<>();
final List<Integer> rightKeys = new ArrayList<>();
joinCondRex = HiveParserUtils.projectNonColumnEquiConditions(RelFactories.DEFAULT_PROJECT_FACTORY, inputRels, leftJoinKeys, rightJoinKeys, 0, leftKeys, rightKeys);
topRel = LogicalJoin.create(inputRels[0], inputRels[1], Collections.emptyList(), joinCondRex, Collections.emptySet(), calciteJoinType);
// previous call to projectNonColumnEquiConditions updated it
if (inputRels[0] != leftRel) {
HiveParserRowResolver newLeftRR = new HiveParserRowResolver();
if (!HiveParserRowResolver.add(newLeftRR, leftRR)) {
LOG.warn("Duplicates detected when adding columns to RR: see previous message");
}
for (int i = leftRel.getRowType().getFieldCount(); i < inputRels[0].getRowType().getFieldCount(); i++) {
ColumnInfo oColInfo = new ColumnInfo(getColumnInternalName(i), HiveParserTypeConverter.convert(inputRels[0].getRowType().getFieldList().get(i).getType()), null, false);
newLeftRR.put(oColInfo.getTabAlias(), oColInfo.getInternalName(), oColInfo);
}
HiveParserRowResolver joinRR = new HiveParserRowResolver();
if (!HiveParserRowResolver.add(joinRR, newLeftRR)) {
LOG.warn("Duplicates detected when adding columns to RR: see previous message");
}
relToHiveColNameCalcitePosMap.put(topRel, buildHiveToCalciteColumnMap(joinRR));
relToRowResolver.put(topRel, joinRR);
// Introduce top project operator to remove additional column(s) that have been
// introduced
List<RexNode> topFields = new ArrayList<>();
List<String> topFieldNames = new ArrayList<>();
for (int i = 0; i < leftRel.getRowType().getFieldCount(); i++) {
final RelDataTypeField field = leftRel.getRowType().getFieldList().get(i);
topFields.add(leftRel.getCluster().getRexBuilder().makeInputRef(field.getType(), i));
topFieldNames.add(field.getName());
}
topRel = LogicalProject.create(topRel, Collections.emptyList(), topFields, topFieldNames);
}
topRR = new HiveParserRowResolver();
if (!HiveParserRowResolver.add(topRR, leftRR)) {
LOG.warn("Duplicates detected when adding columns to RR: see previous message");
}
} else {
topRel = LogicalJoin.create(leftRel, rightRel, Collections.emptyList(), joinCondRex, Collections.emptySet(), calciteJoinType);
topRR = HiveParserRowResolver.getCombinedRR(leftRR, rightRR);
if (namedColumns != null) {
List<String> tableAliases = new ArrayList<>();
tableAliases.add(leftTableAlias);
tableAliases.add(rightTableAlias);
topRR.setNamedJoinInfo(new HiveParserNamedJoinInfo(tableAliases, namedColumns, hiveJoinType));
}
}
relToHiveColNameCalcitePosMap.put(topRel, buildHiveToCalciteColumnMap(topRR));
relToRowResolver.put(topRel, topRR);
return topRel;
}
Aggregations