use of org.apache.calcite.rel.core.Union in project druid by druid-io.
the class DruidUnionDataSourceRule method onMatch.
@Override
public void onMatch(final RelOptRuleCall call) {
final Union unionRel = call.rel(0);
final DruidRel<?> firstDruidRel = call.rel(1);
final DruidQueryRel secondDruidRel = call.rel(2);
if (firstDruidRel instanceof DruidUnionDataSourceRel) {
// Unwrap and flatten the inputs to the Union.
final RelNode newUnionRel = call.builder().pushAll(firstDruidRel.getInputs()).push(secondDruidRel).union(true, firstDruidRel.getInputs().size() + 1).build();
call.transformTo(DruidUnionDataSourceRel.create((Union) newUnionRel, getColumnNamesIfTableOrUnion(firstDruidRel, plannerContext).get(), firstDruidRel.getPlannerContext()));
} else {
// Sanity check.
if (!(firstDruidRel instanceof DruidQueryRel)) {
throw new ISE("Expected first rel to be a DruidQueryRel, but it was %s", firstDruidRel.getClass().getName());
}
call.transformTo(DruidUnionDataSourceRel.create(unionRel, getColumnNamesIfTableOrUnion(firstDruidRel, plannerContext).get(), firstDruidRel.getPlannerContext()));
}
}
use of org.apache.calcite.rel.core.Union in project hive by apache.
the class ASTConverter method convertSource.
private QueryBlockInfo convertSource(RelNode r) throws CalciteSemanticException {
Schema s = null;
ASTNode ast = null;
if (r instanceof TableScan) {
TableScan f = (TableScan) r;
s = new Schema(f);
ast = ASTBuilder.table(f);
planMapper.link(ast, f);
} else if (r instanceof HiveJdbcConverter) {
HiveJdbcConverter f = (HiveJdbcConverter) r;
s = new Schema(f);
ast = ASTBuilder.table(f);
} else if (r instanceof DruidQuery) {
DruidQuery f = (DruidQuery) r;
s = new Schema(f);
ast = ASTBuilder.table(f);
} else if (r instanceof Join) {
Join join = (Join) r;
QueryBlockInfo left = convertSource(join.getLeft());
QueryBlockInfo right = convertSource(join.getRight());
s = new Schema(left.schema, right.schema);
ASTNode cond = join.getCondition().accept(new RexVisitor(s, false, r.getCluster().getRexBuilder()));
boolean semiJoin = join.isSemiJoin() || join.getJoinType() == JoinRelType.ANTI;
if (join.getRight() instanceof Join && !semiJoin) {
// should not be done for semijoin since it will change the semantics
// Invert join inputs; this is done because otherwise the SemanticAnalyzer
// methods to merge joins will not kick in
JoinRelType type;
if (join.getJoinType() == JoinRelType.LEFT) {
type = JoinRelType.RIGHT;
} else if (join.getJoinType() == JoinRelType.RIGHT) {
type = JoinRelType.LEFT;
} else {
type = join.getJoinType();
}
ast = ASTBuilder.join(right.ast, left.ast, type, cond);
addPkFkInfoToAST(ast, join, true);
} else {
ast = ASTBuilder.join(left.ast, right.ast, join.getJoinType(), cond);
addPkFkInfoToAST(ast, join, false);
}
if (semiJoin) {
s = left.schema;
}
} else if (r instanceof Union) {
Union u = ((Union) r);
ASTNode left = new ASTConverter(((Union) r).getInput(0), this.derivedTableCount, planMapper).convert();
for (int ind = 1; ind < u.getInputs().size(); ind++) {
left = getUnionAllAST(left, new ASTConverter(((Union) r).getInput(ind), this.derivedTableCount, planMapper).convert());
String sqAlias = nextAlias();
ast = ASTBuilder.subQuery(left, sqAlias);
s = new Schema((Union) r, sqAlias);
}
} else {
ASTConverter src = new ASTConverter(r, this.derivedTableCount, planMapper);
ASTNode srcAST = src.convert();
String sqAlias = nextAlias();
s = src.getRowSchema(sqAlias);
ast = ASTBuilder.subQuery(srcAST, sqAlias);
}
return new QueryBlockInfo(s, ast);
}
use of org.apache.calcite.rel.core.Union in project hive by apache.
the class MaterializedViewRewritingRelVisitor method visit.
@Override
public void visit(RelNode node, int ordinal, RelNode parent) {
if (node instanceof Aggregate) {
this.containsAggregate = true;
// Aggregate mode - it should be followed by union
// that we need to analyze
RelNode input = node.getInput(0);
if (input instanceof Union) {
check((Union) input);
}
} else if (node instanceof Union) {
// Non aggregate mode - analyze union operator
check((Union) node);
} else if (node instanceof Project) {
// Project operator, we can continue
super.visit(node, ordinal, parent);
}
throw new ReturnedValue(false);
}
use of org.apache.calcite.rel.core.Union in project hive by apache.
the class HiveJoinInsertDeleteIncrementalRewritingRule method onMatch.
@Override
public void onMatch(RelOptRuleCall call) {
final Union union = call.rel(0);
RexBuilder rexBuilder = union.getCluster().getRexBuilder();
// First branch is query, second branch is MV
// 1) First branch is query, second branch is MV
final RelNode joinLeftInput = union.getInput(1);
final RelNode joinRightInput = union.getInput(0);
// 2) Build conditions for join and start adding
// expressions for project operator
List<RexNode> projExprs = new ArrayList<>();
List<RexNode> joinConjs = new ArrayList<>();
for (int leftPos = 0; leftPos < joinLeftInput.getRowType().getFieldCount() - 1; leftPos++) {
RexNode leftRef = rexBuilder.makeInputRef(joinLeftInput.getRowType().getFieldList().get(leftPos).getType(), leftPos);
RexNode rightRef = rexBuilder.makeInputRef(joinRightInput.getRowType().getFieldList().get(leftPos).getType(), leftPos + joinLeftInput.getRowType().getFieldCount());
projExprs.add(rightRef);
joinConjs.add(rexBuilder.makeCall(SqlStdOperatorTable.IS_NOT_DISTINCT_FROM, leftRef, rightRef));
}
RexNode joinCond = RexUtil.composeConjunction(rexBuilder, joinConjs);
int rowIsDeletedIdx = joinRightInput.getRowType().getFieldCount() - 1;
RexNode rowIsDeleted = rexBuilder.makeInputRef(joinRightInput.getRowType().getFieldList().get(rowIsDeletedIdx).getType(), joinLeftInput.getRowType().getFieldCount() + rowIsDeletedIdx);
projExprs.add(rowIsDeleted);
// 3) Build plan
RelNode newNode = call.builder().push(union.getInput(1)).push(union.getInput(0)).join(JoinRelType.RIGHT, joinCond).project(projExprs).build();
call.transformTo(newNode);
}
use of org.apache.calcite.rel.core.Union in project hive by apache.
the class HiveAggregateIncrementalRewritingRuleBase method onMatch.
@Override
public void onMatch(RelOptRuleCall call) {
final Aggregate agg = call.rel(aggregateIndex);
final Union union = call.rel(1);
final RelBuilder relBuilder = call.builder();
final RexBuilder rexBuilder = relBuilder.getRexBuilder();
// 1) First branch is query, second branch is MV
RelNode joinLeftInput = union.getInput(1);
final T joinRightInput = createJoinRightInput(call);
if (joinRightInput == null) {
return;
}
// 2) Introduce a Project on top of MV scan having all columns from the view plus a boolean literal which indicates
// whether the row with the key values coming from the joinRightInput exists in the view:
// - true means exist
// - null means not exists
// Project also needed to encapsulate the view scan by a subquery -> this is required by
// CalcitePlanner.fixUpASTAggregateInsertIncrementalRebuild
// CalcitePlanner.fixUpASTAggregateInsertDeleteIncrementalRebuild
List<RexNode> mvCols = new ArrayList<>(joinLeftInput.getRowType().getFieldCount());
for (int i = 0; i < joinLeftInput.getRowType().getFieldCount(); ++i) {
mvCols.add(rexBuilder.makeInputRef(joinLeftInput.getRowType().getFieldList().get(i).getType(), i));
}
mvCols.add(rexBuilder.makeLiteral(true));
joinLeftInput = relBuilder.push(joinLeftInput).project(mvCols).build();
// 3) Build conditions for join and start adding
// expressions for project operator
List<RexNode> projExprs = new ArrayList<>();
List<RexNode> joinConjs = new ArrayList<>();
int groupCount = agg.getGroupCount();
int totalCount = agg.getGroupCount() + agg.getAggCallList().size();
for (int leftPos = 0, rightPos = totalCount + 1; leftPos < groupCount; leftPos++, rightPos++) {
RexNode leftRef = rexBuilder.makeInputRef(joinLeftInput.getRowType().getFieldList().get(leftPos).getType(), leftPos);
RexNode rightRef = rexBuilder.makeInputRef(joinRightInput.rightInput.getRowType().getFieldList().get(leftPos).getType(), rightPos);
projExprs.add(rightRef);
RexNode nsEqExpr = rexBuilder.makeCall(SqlStdOperatorTable.IS_NOT_DISTINCT_FROM, ImmutableList.of(leftRef, rightRef));
joinConjs.add(nsEqExpr);
}
// 4) Create join node
RexNode joinCond = RexUtil.composeConjunction(rexBuilder, joinConjs);
RelNode join = relBuilder.push(joinLeftInput).push(joinRightInput.rightInput).join(JoinRelType.RIGHT, joinCond).build();
// functions
for (int i = 0, leftPos = groupCount, rightPos = totalCount + 1 + groupCount; leftPos < totalCount; i++, leftPos++, rightPos++) {
// case when source.s is null and mv2.s is null then null
// case when source.s IS null then mv2.s
// case when mv2.s IS null then source.s
// else source.s + mv2.s end
RexNode leftRef = rexBuilder.makeInputRef(joinLeftInput.getRowType().getFieldList().get(leftPos).getType(), leftPos);
RexNode rightRef = rexBuilder.makeInputRef(joinRightInput.rightInput.getRowType().getFieldList().get(leftPos).getType(), rightPos);
// Generate SQLOperator for merging the aggregations
SqlAggFunction aggCall = agg.getAggCallList().get(i).getAggregation();
RexNode elseReturn = createAggregateNode(aggCall, leftRef, rightRef, rexBuilder);
// According to SQL standard (and Hive) Aggregate functions eliminates null values however operators used in
// elseReturn expressions returns null if one of their operands is null
// hence we need a null check of both operands.
// Note: If both are null, we will fall into branch WHEN leftNull THEN rightRef
RexNode leftNull = rexBuilder.makeCall(SqlStdOperatorTable.IS_NULL, leftRef);
RexNode rightNull = rexBuilder.makeCall(SqlStdOperatorTable.IS_NULL, rightRef);
projExprs.add(rexBuilder.makeCall(SqlStdOperatorTable.CASE, leftNull, rightRef, rightNull, leftRef, elseReturn));
}
int flagIndex = joinLeftInput.getRowType().getFieldCount() - 1;
RexNode flagNode = rexBuilder.makeInputRef(join.getRowType().getFieldList().get(flagIndex).getType(), flagIndex);
// 6) Build plan
RelNode newNode = relBuilder.push(join).filter(createFilterCondition(joinRightInput, flagNode, projExprs, relBuilder)).project(projExprs).build();
call.transformTo(newNode);
}
Aggregations