use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.util.Pair in project samza by apache.
the class TestProjectTranslator method testTranslate.
@Test
public void testTranslate() throws IOException, ClassNotFoundException {
// setup mock values to the constructor of FilterTranslator
LogicalProject mockProject = PowerMockito.mock(LogicalProject.class);
Context mockContext = mock(Context.class);
ContainerContext mockContainerContext = mock(ContainerContext.class);
TranslatorContext mockTranslatorContext = mock(TranslatorContext.class);
TestMetricsRegistryImpl testMetricsRegistryImpl = new TestMetricsRegistryImpl();
RelNode mockInput = mock(RelNode.class);
List<RelNode> inputs = new ArrayList<>();
inputs.add(mockInput);
when(mockInput.getId()).thenReturn(1);
when(mockProject.getId()).thenReturn(2);
when(mockProject.getInputs()).thenReturn(inputs);
when(mockProject.getInput()).thenReturn(mockInput);
RelDataType mockRowType = mock(RelDataType.class);
when(mockRowType.getFieldCount()).thenReturn(1);
when(mockProject.getRowType()).thenReturn(mockRowType);
RexNode mockRexField = mock(RexNode.class);
List<Pair<RexNode, String>> namedProjects = new ArrayList<>();
namedProjects.add(Pair.of(mockRexField, "test_field"));
when(mockProject.getNamedProjects()).thenReturn(namedProjects);
StreamApplicationDescriptorImpl mockAppDesc = mock(StreamApplicationDescriptorImpl.class);
OperatorSpec<Object, SamzaSqlRelMessage> mockInputOp = mock(OperatorSpec.class);
MessageStream<SamzaSqlRelMessage> mockStream = new MessageStreamImpl<>(mockAppDesc, mockInputOp);
when(mockTranslatorContext.getMessageStream(eq(1))).thenReturn(mockStream);
doAnswer(this.getRegisterMessageStreamAnswer()).when(mockTranslatorContext).registerMessageStream(eq(2), any(MessageStream.class));
RexToJavaCompiler mockCompiler = mock(RexToJavaCompiler.class);
when(mockTranslatorContext.getExpressionCompiler()).thenReturn(mockCompiler);
Expression mockExpr = mock(Expression.class);
when(mockCompiler.compile(any(), any())).thenReturn(mockExpr);
when(mockContext.getContainerContext()).thenReturn(mockContainerContext);
when(mockContainerContext.getContainerMetricsRegistry()).thenReturn(testMetricsRegistryImpl);
// Apply translate() method to verify that we are getting the correct map operator constructed
ProjectTranslator projectTranslator = new ProjectTranslator(1);
projectTranslator.translate(mockProject, LOGICAL_OP_ID, mockTranslatorContext);
// make sure that context has been registered with LogicFilter and output message streams
verify(mockTranslatorContext, times(1)).registerRelNode(2, mockProject);
verify(mockTranslatorContext, times(1)).registerMessageStream(2, this.getRegisteredMessageStream(2));
when(mockTranslatorContext.getRelNode(2)).thenReturn(mockProject);
when(mockTranslatorContext.getMessageStream(2)).thenReturn(this.getRegisteredMessageStream(2));
StreamOperatorSpec projectSpec = (StreamOperatorSpec) Whitebox.getInternalState(this.getRegisteredMessageStream(2), "operatorSpec");
assertNotNull(projectSpec);
assertEquals(projectSpec.getOpCode(), OperatorSpec.OpCode.MAP);
// Verify that the bootstrap() method will establish the context for the map function
Map<Integer, TranslatorContext> mockContexts = new HashMap<>();
mockContexts.put(1, mockTranslatorContext);
when(mockContext.getApplicationTaskContext()).thenReturn(new SamzaSqlApplicationContext(mockContexts));
projectSpec.getTransformFn().init(mockContext);
MapFunction mapFn = (MapFunction) Whitebox.getInternalState(projectSpec, "mapFn");
assertNotNull(mapFn);
assertEquals(mockTranslatorContext, Whitebox.getInternalState(mapFn, "translatorContext"));
assertEquals(mockProject, Whitebox.getInternalState(mapFn, "project"));
assertEquals(mockExpr, Whitebox.getInternalState(mapFn, "expr"));
// Verify TestMetricsRegistryImpl works with Project
assertEquals(1, testMetricsRegistryImpl.getGauges().size());
assertEquals(2, testMetricsRegistryImpl.getGauges().get(LOGICAL_OP_ID).size());
assertEquals(1, testMetricsRegistryImpl.getCounters().size());
assertEquals(2, testMetricsRegistryImpl.getCounters().get(LOGICAL_OP_ID).size());
assertEquals(0, testMetricsRegistryImpl.getCounters().get(LOGICAL_OP_ID).get(0).getCount());
assertEquals(0, testMetricsRegistryImpl.getCounters().get(LOGICAL_OP_ID).get(1).getCount());
// Calling mapFn.apply() to verify the filter function is correctly applied to the input message
SamzaSqlRelMessage mockInputMsg = new SamzaSqlRelMessage(new ArrayList<>(), new ArrayList<>(), new SamzaSqlRelMsgMetadata(0L, 0L));
SamzaSqlExecutionContext executionContext = mock(SamzaSqlExecutionContext.class);
DataContext dataContext = mock(DataContext.class);
when(mockTranslatorContext.getExecutionContext()).thenReturn(executionContext);
when(mockTranslatorContext.getDataContext()).thenReturn(dataContext);
Object[] result = new Object[1];
final Object mockFieldObj = new Object();
doAnswer(invocation -> {
Object[] retValue = invocation.getArgumentAt(4, Object[].class);
retValue[0] = mockFieldObj;
return null;
}).when(mockExpr).execute(eq(executionContext), eq(mockContext), eq(dataContext), eq(mockInputMsg.getSamzaSqlRelRecord().getFieldValues().toArray()), eq(result));
SamzaSqlRelMessage retMsg = (SamzaSqlRelMessage) mapFn.apply(mockInputMsg);
assertEquals(retMsg.getSamzaSqlRelRecord().getFieldNames(), Collections.singletonList("test_field"));
assertEquals(retMsg.getSamzaSqlRelRecord().getFieldValues(), Collections.singletonList(mockFieldObj));
// Verify mapFn.apply() updates the TestMetricsRegistryImpl metrics
assertEquals(1, testMetricsRegistryImpl.getCounters().get(LOGICAL_OP_ID).get(0).getCount());
assertEquals(1, testMetricsRegistryImpl.getCounters().get(LOGICAL_OP_ID).get(1).getCount());
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.util.Pair in project flink by apache.
the class HiveParserUtils method projectNonColumnEquiConditions.
/**
* Push any equi join conditions that are not column references as Projections on top of the
* children.
*/
public static RexNode projectNonColumnEquiConditions(RelFactories.ProjectFactory factory, RelNode[] inputRels, List<RexNode> leftJoinKeys, List<RexNode> rightJoinKeys, int systemColCount, List<Integer> leftKeys, List<Integer> rightKeys) {
RelNode leftRel = inputRels[0];
RelNode rightRel = inputRels[1];
RexBuilder rexBuilder = leftRel.getCluster().getRexBuilder();
RexNode outJoinCond = null;
int origLeftInputSize = leftRel.getRowType().getFieldCount();
int origRightInputSize = rightRel.getRowType().getFieldCount();
List<RexNode> newLeftFields = new ArrayList<>();
List<String> newLeftFieldNames = new ArrayList<>();
List<RexNode> newRightFields = new ArrayList<>();
List<String> newRightFieldNames = new ArrayList<>();
int leftKeyCount = leftJoinKeys.size();
int i;
for (i = 0; i < origLeftInputSize; i++) {
final RelDataTypeField field = leftRel.getRowType().getFieldList().get(i);
newLeftFields.add(rexBuilder.makeInputRef(field.getType(), i));
newLeftFieldNames.add(field.getName());
}
for (i = 0; i < origRightInputSize; i++) {
final RelDataTypeField field = rightRel.getRowType().getFieldList().get(i);
newRightFields.add(rexBuilder.makeInputRef(field.getType(), i));
newRightFieldNames.add(field.getName());
}
ImmutableBitSet.Builder origColEqCondsPosBuilder = ImmutableBitSet.builder();
int newKeyCount = 0;
List<Pair<Integer, Integer>> origColEqConds = new ArrayList<>();
for (i = 0; i < leftKeyCount; i++) {
RexNode leftKey = leftJoinKeys.get(i);
RexNode rightKey = rightJoinKeys.get(i);
if (leftKey instanceof RexInputRef && rightKey instanceof RexInputRef) {
origColEqConds.add(Pair.of(((RexInputRef) leftKey).getIndex(), ((RexInputRef) rightKey).getIndex()));
origColEqCondsPosBuilder.set(i);
} else {
newLeftFields.add(leftKey);
newLeftFieldNames.add(null);
newRightFields.add(rightKey);
newRightFieldNames.add(null);
newKeyCount++;
}
}
ImmutableBitSet origColEqCondsPos = origColEqCondsPosBuilder.build();
for (i = 0; i < origColEqConds.size(); i++) {
Pair<Integer, Integer> p = origColEqConds.get(i);
int condPos = origColEqCondsPos.nth(i);
RexNode leftKey = leftJoinKeys.get(condPos);
RexNode rightKey = rightJoinKeys.get(condPos);
leftKeys.add(p.left);
rightKeys.add(p.right);
RexNode cond = rexBuilder.makeCall(SqlStdOperatorTable.EQUALS, rexBuilder.makeInputRef(leftKey.getType(), systemColCount + p.left), rexBuilder.makeInputRef(rightKey.getType(), systemColCount + origLeftInputSize + newKeyCount + p.right));
if (outJoinCond == null) {
outJoinCond = cond;
} else {
outJoinCond = rexBuilder.makeCall(SqlStdOperatorTable.AND, outJoinCond, cond);
}
}
if (newKeyCount == 0) {
return outJoinCond;
}
int newLeftOffset = systemColCount + origLeftInputSize;
int newRightOffset = systemColCount + origLeftInputSize + origRightInputSize + newKeyCount;
for (i = 0; i < newKeyCount; i++) {
leftKeys.add(origLeftInputSize + i);
rightKeys.add(origRightInputSize + i);
RexNode cond = rexBuilder.makeCall(SqlStdOperatorTable.EQUALS, rexBuilder.makeInputRef(newLeftFields.get(origLeftInputSize + i).getType(), newLeftOffset + i), rexBuilder.makeInputRef(newRightFields.get(origRightInputSize + i).getType(), newRightOffset + i));
if (outJoinCond == null) {
outJoinCond = cond;
} else {
outJoinCond = rexBuilder.makeCall(SqlStdOperatorTable.AND, outJoinCond, cond);
}
}
// added project if need to produce new keys than the original input fields
if (newKeyCount > 0) {
leftRel = factory.createProject(leftRel, Collections.emptyList(), newLeftFields, SqlValidatorUtil.uniquify(newLeftFieldNames, false));
rightRel = factory.createProject(rightRel, Collections.emptyList(), newRightFields, SqlValidatorUtil.uniquify(newRightFieldNames, false));
}
inputRels[0] = leftRel;
inputRels[1] = rightRel;
return outJoinCond;
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.util.Pair in project flink by apache.
the class HiveParserCalcitePlanner method getWindowRexAndType.
private Pair<RexNode, TypeInfo> getWindowRexAndType(HiveParserWindowingSpec.WindowExpressionSpec winExprSpec, RelNode srcRel) throws SemanticException {
RexNode window;
if (winExprSpec instanceof HiveParserWindowingSpec.WindowFunctionSpec) {
HiveParserWindowingSpec.WindowFunctionSpec wFnSpec = (HiveParserWindowingSpec.WindowFunctionSpec) winExprSpec;
HiveParserASTNode windowProjAst = wFnSpec.getExpression();
// TODO: do we need to get to child?
int wndSpecASTIndx = getWindowSpecIndx(windowProjAst);
// 2. Get Hive Aggregate Info
AggInfo hiveAggInfo = getHiveAggInfo(windowProjAst, wndSpecASTIndx - 1, relToRowResolver.get(srcRel), (HiveParserWindowingSpec.WindowFunctionSpec) winExprSpec, semanticAnalyzer, frameworkConfig, cluster);
// 3. Get Calcite Return type for Agg Fn
RelDataType calciteAggFnRetType = HiveParserUtils.toRelDataType(hiveAggInfo.getReturnType(), cluster.getTypeFactory());
// 4. Convert Agg Fn args to Calcite
Map<String, Integer> posMap = relToHiveColNameCalcitePosMap.get(srcRel);
HiveParserRexNodeConverter converter = new HiveParserRexNodeConverter(cluster, srcRel.getRowType(), posMap, 0, false, funcConverter);
List<RexNode> calciteAggFnArgs = new ArrayList<>();
List<RelDataType> calciteAggFnArgTypes = new ArrayList<>();
for (int i = 0; i < hiveAggInfo.getAggParams().size(); i++) {
calciteAggFnArgs.add(converter.convert(hiveAggInfo.getAggParams().get(i)));
calciteAggFnArgTypes.add(HiveParserUtils.toRelDataType(hiveAggInfo.getAggParams().get(i).getTypeInfo(), cluster.getTypeFactory()));
}
// 5. Get Calcite Agg Fn
final SqlAggFunction calciteAggFn = HiveParserSqlFunctionConverter.getCalciteAggFn(hiveAggInfo.getUdfName(), hiveAggInfo.isDistinct(), calciteAggFnArgTypes, calciteAggFnRetType);
// 6. Translate Window spec
HiveParserRowResolver inputRR = relToRowResolver.get(srcRel);
HiveParserWindowingSpec.WindowSpec wndSpec = ((HiveParserWindowingSpec.WindowFunctionSpec) winExprSpec).getWindowSpec();
List<RexNode> partitionKeys = getPartitionKeys(wndSpec.getPartition(), converter, inputRR, new HiveParserTypeCheckCtx(inputRR, frameworkConfig, cluster), semanticAnalyzer);
List<RexFieldCollation> orderKeys = getOrderKeys(wndSpec.getOrder(), converter, inputRR, new HiveParserTypeCheckCtx(inputRR, frameworkConfig, cluster), semanticAnalyzer);
RexWindowBound lowerBound = getBound(wndSpec.getWindowFrame().getStart(), cluster);
RexWindowBound upperBound = getBound(wndSpec.getWindowFrame().getEnd(), cluster);
boolean isRows = wndSpec.getWindowFrame().getWindowType() == HiveParserWindowingSpec.WindowType.ROWS;
window = HiveParserUtils.makeOver(cluster.getRexBuilder(), calciteAggFnRetType, calciteAggFn, calciteAggFnArgs, partitionKeys, orderKeys, lowerBound, upperBound, isRows, true, false, false, false);
window = window.accept(funcConverter);
} else {
throw new SemanticException("Unsupported window Spec");
}
return new Pair<>(window, HiveParserTypeConverter.convert(window.getType()));
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.util.Pair in project flink by apache.
the class HiveParserCalcitePlanner method genDistSortBy.
// Generate plan for sort by, cluster by and distribute by. This is basically same as generating
// order by plan.
// Should refactor to combine them.
private Pair<RelNode, RelNode> genDistSortBy(HiveParserQB qb, RelNode srcRel, boolean outermostOB) throws SemanticException {
RelNode res = null;
RelNode originalInput = null;
HiveParserQBParseInfo qbp = qb.getParseInfo();
String destClause = qbp.getClauseNames().iterator().next();
HiveParserASTNode sortAST = qbp.getSortByForClause(destClause);
HiveParserASTNode distAST = qbp.getDistributeByForClause(destClause);
HiveParserASTNode clusterAST = qbp.getClusterByForClause(destClause);
if (sortAST != null || distAST != null || clusterAST != null) {
List<RexNode> virtualCols = new ArrayList<>();
List<Pair<HiveParserASTNode, TypeInfo>> vcASTAndType = new ArrayList<>();
List<RelFieldCollation> fieldCollations = new ArrayList<>();
List<Integer> distKeys = new ArrayList<>();
HiveParserRowResolver inputRR = relToRowResolver.get(srcRel);
HiveParserRexNodeConverter converter = new HiveParserRexNodeConverter(cluster, srcRel.getRowType(), relToHiveColNameCalcitePosMap.get(srcRel), 0, false, funcConverter);
int numSrcFields = srcRel.getRowType().getFieldCount();
// handle cluster by
if (clusterAST != null) {
if (sortAST != null) {
throw new SemanticException("Cannot have both CLUSTER BY and SORT BY");
}
if (distAST != null) {
throw new SemanticException("Cannot have both CLUSTER BY and DISTRIBUTE BY");
}
for (Node node : clusterAST.getChildren()) {
HiveParserASTNode childAST = (HiveParserASTNode) node;
Map<HiveParserASTNode, ExprNodeDesc> astToExprNodeDesc = semanticAnalyzer.genAllExprNodeDesc(childAST, inputRR);
ExprNodeDesc childNodeDesc = astToExprNodeDesc.get(childAST);
if (childNodeDesc == null) {
throw new SemanticException("Invalid CLUSTER BY expression: " + childAST.toString());
}
RexNode childRexNode = converter.convert(childNodeDesc).accept(funcConverter);
int fieldIndex;
if (childRexNode instanceof RexInputRef) {
fieldIndex = ((RexInputRef) childRexNode).getIndex();
} else {
fieldIndex = numSrcFields + virtualCols.size();
virtualCols.add(childRexNode);
vcASTAndType.add(new Pair<>(childAST, childNodeDesc.getTypeInfo()));
}
// cluster by doesn't support specifying ASC/DESC or NULLS FIRST/LAST, so use
// default values
fieldCollations.add(new RelFieldCollation(fieldIndex, RelFieldCollation.Direction.ASCENDING, RelFieldCollation.NullDirection.FIRST));
distKeys.add(fieldIndex);
}
} else {
// handle sort by
if (sortAST != null) {
for (Node node : sortAST.getChildren()) {
HiveParserASTNode childAST = (HiveParserASTNode) node;
HiveParserASTNode nullOrderAST = (HiveParserASTNode) childAST.getChild(0);
HiveParserASTNode fieldAST = (HiveParserASTNode) nullOrderAST.getChild(0);
Map<HiveParserASTNode, ExprNodeDesc> astToExprNodeDesc = semanticAnalyzer.genAllExprNodeDesc(fieldAST, inputRR);
ExprNodeDesc fieldNodeDesc = astToExprNodeDesc.get(fieldAST);
if (fieldNodeDesc == null) {
throw new SemanticException("Invalid sort by expression: " + fieldAST.toString());
}
RexNode childRexNode = converter.convert(fieldNodeDesc).accept(funcConverter);
int fieldIndex;
if (childRexNode instanceof RexInputRef) {
fieldIndex = ((RexInputRef) childRexNode).getIndex();
} else {
fieldIndex = numSrcFields + virtualCols.size();
virtualCols.add(childRexNode);
vcASTAndType.add(new Pair<>(childAST, fieldNodeDesc.getTypeInfo()));
}
RelFieldCollation.Direction direction = RelFieldCollation.Direction.DESCENDING;
if (childAST.getType() == HiveASTParser.TOK_TABSORTCOLNAMEASC) {
direction = RelFieldCollation.Direction.ASCENDING;
}
RelFieldCollation.NullDirection nullOrder;
if (nullOrderAST.getType() == HiveASTParser.TOK_NULLS_FIRST) {
nullOrder = RelFieldCollation.NullDirection.FIRST;
} else if (nullOrderAST.getType() == HiveASTParser.TOK_NULLS_LAST) {
nullOrder = RelFieldCollation.NullDirection.LAST;
} else {
throw new SemanticException("Unexpected null ordering option: " + nullOrderAST.getType());
}
fieldCollations.add(new RelFieldCollation(fieldIndex, direction, nullOrder));
}
}
// handle distribute by
if (distAST != null) {
for (Node node : distAST.getChildren()) {
HiveParserASTNode childAST = (HiveParserASTNode) node;
Map<HiveParserASTNode, ExprNodeDesc> astToExprNodeDesc = semanticAnalyzer.genAllExprNodeDesc(childAST, inputRR);
ExprNodeDesc childNodeDesc = astToExprNodeDesc.get(childAST);
if (childNodeDesc == null) {
throw new SemanticException("Invalid DISTRIBUTE BY expression: " + childAST.toString());
}
RexNode childRexNode = converter.convert(childNodeDesc).accept(funcConverter);
int fieldIndex;
if (childRexNode instanceof RexInputRef) {
fieldIndex = ((RexInputRef) childRexNode).getIndex();
} else {
fieldIndex = numSrcFields + virtualCols.size();
virtualCols.add(childRexNode);
vcASTAndType.add(new Pair<>(childAST, childNodeDesc.getTypeInfo()));
}
distKeys.add(fieldIndex);
}
}
}
Preconditions.checkState(!fieldCollations.isEmpty() || !distKeys.isEmpty(), "Both field collations and dist keys are empty");
// add child SEL if needed
RelNode realInput = srcRel;
HiveParserRowResolver outputRR = new HiveParserRowResolver();
if (!virtualCols.isEmpty()) {
List<RexNode> originalInputRefs = srcRel.getRowType().getFieldList().stream().map(input -> new RexInputRef(input.getIndex(), input.getType())).collect(Collectors.toList());
HiveParserRowResolver addedProjectRR = new HiveParserRowResolver();
if (!HiveParserRowResolver.add(addedProjectRR, inputRR)) {
throw new SemanticException("Duplicates detected when adding columns to RR: see previous message");
}
int vColPos = inputRR.getRowSchema().getSignature().size();
for (Pair<HiveParserASTNode, TypeInfo> astTypePair : vcASTAndType) {
addedProjectRR.putExpression(astTypePair.getKey(), new ColumnInfo(getColumnInternalName(vColPos), astTypePair.getValue(), null, false));
vColPos++;
}
realInput = genSelectRelNode(CompositeList.of(originalInputRefs, virtualCols), addedProjectRR, srcRel);
if (outermostOB) {
if (!HiveParserRowResolver.add(outputRR, inputRR)) {
throw new SemanticException("Duplicates detected when adding columns to RR: see previous message");
}
} else {
if (!HiveParserRowResolver.add(outputRR, addedProjectRR)) {
throw new SemanticException("Duplicates detected when adding columns to RR: see previous message");
}
}
originalInput = srcRel;
} else {
if (!HiveParserRowResolver.add(outputRR, inputRR)) {
throw new SemanticException("Duplicates detected when adding columns to RR: see previous message");
}
}
// create rel node
RelTraitSet traitSet = cluster.traitSet();
RelCollation canonizedCollation = traitSet.canonize(RelCollationImpl.of(fieldCollations));
res = LogicalDistribution.create(realInput, canonizedCollation, distKeys);
Map<String, Integer> hiveColNameCalcitePosMap = buildHiveToCalciteColumnMap(outputRR);
relToRowResolver.put(res, outputRR);
relToHiveColNameCalcitePosMap.put(res, hiveColNameCalcitePosMap);
}
return (new Pair<>(res, originalInput));
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.util.Pair in project flink by apache.
the class HiveParserCalcitePlanner method genLogicalPlan.
private RelNode genLogicalPlan(HiveParserQB qb, boolean outerMostQB, Map<String, Integer> outerNameToPosMap, HiveParserRowResolver outerRR) throws SemanticException {
RelNode res;
// First generate all the opInfos for the elements in the from clause
Map<String, RelNode> aliasToRel = new HashMap<>();
// 0. Check if we can handle the SubQuery;
// canHandleQbForCbo returns null if the query can be handled.
String reason = HiveParserUtils.canHandleQbForCbo(semanticAnalyzer.getQueryProperties());
if (reason != null) {
String msg = "CBO can not handle Sub Query" + " because it: " + reason;
throw new SemanticException(msg);
}
// 1.1. Recurse over the subqueries to fill the subquery part of the plan
for (String subqAlias : qb.getSubqAliases()) {
HiveParserQBExpr qbexpr = qb.getSubqForAlias(subqAlias);
RelNode relNode = genLogicalPlan(qbexpr);
aliasToRel.put(subqAlias, relNode);
if (qb.getViewToTabSchema().containsKey(subqAlias)) {
if (!(relNode instanceof Project)) {
throw new SemanticException("View " + subqAlias + " is corresponding to " + relNode.toString() + ", rather than a Project.");
}
}
}
// 1.2 Recurse over all the source tables
for (String tableAlias : qb.getTabAliases()) {
RelNode op = genTableLogicalPlan(tableAlias, qb);
aliasToRel.put(tableAlias, op);
}
if (aliasToRel.isEmpty()) {
RelNode dummySrc = LogicalValues.createOneRow(cluster);
aliasToRel.put(HiveParserSemanticAnalyzer.DUMMY_TABLE, dummySrc);
HiveParserRowResolver dummyRR = new HiveParserRowResolver();
dummyRR.put(HiveParserSemanticAnalyzer.DUMMY_TABLE, "dummy_col", new ColumnInfo(getColumnInternalName(0), TypeInfoFactory.intTypeInfo, HiveParserSemanticAnalyzer.DUMMY_TABLE, false));
relToRowResolver.put(dummySrc, dummyRR);
relToHiveColNameCalcitePosMap.put(dummySrc, buildHiveToCalciteColumnMap(dummyRR));
}
if (!qb.getParseInfo().getAliasToLateralViews().isEmpty()) {
// process lateral views
res = genLateralViewPlan(qb, aliasToRel);
} else if (qb.getParseInfo().getJoinExpr() != null) {
// 1.3 process join
res = genJoinLogicalPlan(qb.getParseInfo().getJoinExpr(), aliasToRel);
} else {
// If no join then there should only be either 1 TS or 1 SubQuery
res = aliasToRel.values().iterator().next();
}
// 2. Build Rel for where Clause
RelNode filterRel = genFilterLogicalPlan(qb, res, outerNameToPosMap, outerRR);
res = (filterRel == null) ? res : filterRel;
RelNode starSrcRel = res;
// 3. Build Rel for GB Clause
RelNode gbRel = genGBLogicalPlan(qb, res);
res = gbRel == null ? res : gbRel;
// 4. Build Rel for GB Having Clause
RelNode gbHavingRel = genGBHavingLogicalPlan(qb, res);
res = gbHavingRel == null ? res : gbHavingRel;
// 5. Build Rel for Select Clause
RelNode selectRel = genSelectLogicalPlan(qb, res, starSrcRel, outerNameToPosMap, outerRR);
res = selectRel == null ? res : selectRel;
// 6. Build Rel for OB Clause
Pair<Sort, RelNode> obAndTopProj = genOBLogicalPlan(qb, res, outerMostQB);
Sort orderRel = obAndTopProj.getKey();
RelNode topConstrainingProjRel = obAndTopProj.getValue();
res = orderRel == null ? res : orderRel;
// Build Rel for SortBy/ClusterBy/DistributeBy. It can happen only if we don't have OrderBy.
if (orderRel == null) {
Pair<RelNode, RelNode> distAndTopProj = genDistSortBy(qb, res, outerMostQB);
RelNode distRel = distAndTopProj.getKey();
topConstrainingProjRel = distAndTopProj.getValue();
res = distRel == null ? res : distRel;
}
// 7. Build Rel for Limit Clause
Sort limitRel = genLimitLogicalPlan(qb, res);
if (limitRel != null) {
if (orderRel != null) {
// merge limit into the order-by node
HiveParserRowResolver orderRR = relToRowResolver.remove(orderRel);
Map<String, Integer> orderColNameToPos = relToHiveColNameCalcitePosMap.remove(orderRel);
res = LogicalSort.create(orderRel.getInput(), orderRel.collation, limitRel.offset, limitRel.fetch);
relToRowResolver.put(res, orderRR);
relToHiveColNameCalcitePosMap.put(res, orderColNameToPos);
relToRowResolver.remove(limitRel);
relToHiveColNameCalcitePosMap.remove(limitRel);
} else {
res = limitRel;
}
}
// 8. Introduce top constraining select if needed.
if (topConstrainingProjRel != null) {
List<RexNode> originalInputRefs = topConstrainingProjRel.getRowType().getFieldList().stream().map(input -> new RexInputRef(input.getIndex(), input.getType())).collect(Collectors.toList());
HiveParserRowResolver topConstrainingProjRR = new HiveParserRowResolver();
if (!HiveParserRowResolver.add(topConstrainingProjRR, relToRowResolver.get(topConstrainingProjRel))) {
LOG.warn("Duplicates detected when adding columns to RR: see previous message");
}
res = genSelectRelNode(originalInputRefs, topConstrainingProjRR, res);
}
// TODO: cleanup this
if (qb.getParseInfo().getAlias() != null) {
HiveParserRowResolver rr = relToRowResolver.get(res);
HiveParserRowResolver newRR = new HiveParserRowResolver();
String alias = qb.getParseInfo().getAlias();
for (ColumnInfo colInfo : rr.getColumnInfos()) {
String name = colInfo.getInternalName();
String[] tmp = rr.reverseLookup(name);
if ("".equals(tmp[0]) || tmp[1] == null) {
// ast expression is not a valid column name for table
tmp[1] = colInfo.getInternalName();
}
ColumnInfo newColInfo = new ColumnInfo(colInfo);
newColInfo.setTabAlias(alias);
newRR.put(alias, tmp[1], newColInfo);
}
relToRowResolver.put(res, newRR);
relToHiveColNameCalcitePosMap.put(res, buildHiveToCalciteColumnMap(newRR));
}
if (LOG.isDebugEnabled()) {
LOG.debug("Created Plan for Query Block " + qb.getId());
}
semanticAnalyzer.setQB(qb);
return res;
}
Aggregations