use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rex.RexBuilder in project calcite by apache.
the class AbstractMaterializedViewRule method perform.
/**
* Rewriting logic is based on "Optimizing Queries Using Materialized Views:
* A Practical, Scalable Solution" by Goldstein and Larson.
*
* <p>On the query side, rules matches a Project-node chain or node, where node
* is either an Aggregate or a Join. Subplan rooted at the node operator must
* be composed of one or more of the following operators: TableScan, Project,
* Filter, and Join.
*
* <p>For each join MV, we need to check the following:
* <ol>
* <li> The plan rooted at the Join operator in the view produces all rows
* needed by the plan rooted at the Join operator in the query.</li>
* <li> All columns required by compensating predicates, i.e., predicates that
* need to be enforced over the view, are available at the view output.</li>
* <li> All output expressions can be computed from the output of the view.</li>
* <li> All output rows occur with the correct duplication factor. We might
* rely on existing Unique-Key - Foreign-Key relationships to extract that
* information.</li>
* </ol>
*
* <p>In turn, for each aggregate MV, we need to check the following:
* <ol>
* <li> The plan rooted at the Aggregate operator in the view produces all rows
* needed by the plan rooted at the Aggregate operator in the query.</li>
* <li> All columns required by compensating predicates, i.e., predicates that
* need to be enforced over the view, are available at the view output.</li>
* <li> The grouping columns in the query are a subset of the grouping columns
* in the view.</li>
* <li> All columns required to perform further grouping are available in the
* view output.</li>
* <li> All columns required to compute output expressions are available in the
* view output.</li>
* </ol>
*
* <p>The rule contains multiple extensions compared to the original paper. One of
* them is the possibility of creating rewritings using Union operators, e.g., if
* the result of a query is partially contained in the materialized view.
*/
protected void perform(RelOptRuleCall call, Project topProject, RelNode node) {
final RexBuilder rexBuilder = node.getCluster().getRexBuilder();
final RelMetadataQuery mq = RelMetadataQuery.instance();
final RelOptPlanner planner = call.getPlanner();
final RexExecutor executor = Util.first(planner.getExecutor(), RexUtil.EXECUTOR);
final RelOptPredicateList predicates = RelOptPredicateList.EMPTY;
final RexSimplify simplify = new RexSimplify(rexBuilder, predicates, true, executor);
final List<RelOptMaterialization> materializations = (planner instanceof VolcanoPlanner) ? ((VolcanoPlanner) planner).getMaterializations() : ImmutableList.<RelOptMaterialization>of();
if (!materializations.isEmpty()) {
// try to generate a rewriting are met
if (!isValidPlan(topProject, node, mq)) {
return;
}
// Obtain applicable (filtered) materializations
// TODO: Filtering of relevant materializations needs to be
// improved so we gather only materializations that might
// actually generate a valid rewriting.
final List<RelOptMaterialization> applicableMaterializations = RelOptMaterializations.getApplicableMaterializations(node, materializations);
if (!applicableMaterializations.isEmpty()) {
// 2. Initialize all query related auxiliary data structures
// that will be used throughout query rewriting process
// Generate query table references
final Set<RelTableRef> queryTableRefs = mq.getTableReferences(node);
if (queryTableRefs == null) {
// Bail out
return;
}
// Extract query predicates
final RelOptPredicateList queryPredicateList = mq.getAllPredicates(node);
if (queryPredicateList == null) {
// Bail out
return;
}
final RexNode pred = simplify.simplify(RexUtil.composeConjunction(rexBuilder, queryPredicateList.pulledUpPredicates, false));
final Triple<RexNode, RexNode, RexNode> queryPreds = splitPredicates(rexBuilder, pred);
// Extract query equivalence classes. An equivalence class is a set
// of columns in the query output that are known to be equal.
final EquivalenceClasses qEC = new EquivalenceClasses();
for (RexNode conj : RelOptUtil.conjunctions(queryPreds.getLeft())) {
assert conj.isA(SqlKind.EQUALS);
RexCall equiCond = (RexCall) conj;
qEC.addEquivalenceClass((RexTableInputRef) equiCond.getOperands().get(0), (RexTableInputRef) equiCond.getOperands().get(1));
}
// rewrite the given query
for (RelOptMaterialization materialization : applicableMaterializations) {
RelNode view = materialization.tableRel;
Project topViewProject;
RelNode viewNode;
if (materialization.queryRel instanceof Project) {
topViewProject = (Project) materialization.queryRel;
viewNode = topViewProject.getInput();
} else {
topViewProject = null;
viewNode = materialization.queryRel;
}
// 3.1. View checks before proceeding
if (!isValidPlan(topViewProject, viewNode, mq)) {
// Skip it
continue;
}
// 3.2. Initialize all query related auxiliary data structures
// that will be used throughout query rewriting process
// Extract view predicates
final RelOptPredicateList viewPredicateList = mq.getAllPredicates(viewNode);
if (viewPredicateList == null) {
// Skip it
continue;
}
final RexNode viewPred = simplify.simplify(RexUtil.composeConjunction(rexBuilder, viewPredicateList.pulledUpPredicates, false));
final Triple<RexNode, RexNode, RexNode> viewPreds = splitPredicates(rexBuilder, viewPred);
// Extract view table references
final Set<RelTableRef> viewTableRefs = mq.getTableReferences(viewNode);
if (viewTableRefs == null) {
// Bail out
return;
}
// Extract view tables
MatchModality matchModality;
Multimap<RexTableInputRef, RexTableInputRef> compensationEquiColumns = ArrayListMultimap.create();
if (!queryTableRefs.equals(viewTableRefs)) {
// subset of query tables (add additional tables through joins if possible)
if (viewTableRefs.containsAll(queryTableRefs)) {
matchModality = MatchModality.QUERY_PARTIAL;
final EquivalenceClasses vEC = new EquivalenceClasses();
for (RexNode conj : RelOptUtil.conjunctions(viewPreds.getLeft())) {
assert conj.isA(SqlKind.EQUALS);
RexCall equiCond = (RexCall) conj;
vEC.addEquivalenceClass((RexTableInputRef) equiCond.getOperands().get(0), (RexTableInputRef) equiCond.getOperands().get(1));
}
if (!compensatePartial(viewTableRefs, vEC, queryTableRefs, compensationEquiColumns)) {
// Cannot rewrite, skip it
continue;
}
} else if (queryTableRefs.containsAll(viewTableRefs)) {
matchModality = MatchModality.VIEW_PARTIAL;
ViewPartialRewriting partialRewritingResult = compensateViewPartial(call.builder(), rexBuilder, mq, view, topProject, node, queryTableRefs, qEC, topViewProject, viewNode, viewTableRefs);
if (partialRewritingResult == null) {
// Cannot rewrite, skip it
continue;
}
// Rewrite succeeded
view = partialRewritingResult.newView;
topViewProject = partialRewritingResult.newTopViewProject;
viewNode = partialRewritingResult.newViewNode;
} else {
// Skip it
continue;
}
} else {
matchModality = MatchModality.COMPLETE;
}
// 4. We map every table in the query to a table with the same qualified
// name (all query tables are contained in the view, thus this is equivalent
// to mapping every table in the query to a view table).
final Multimap<RelTableRef, RelTableRef> multiMapTables = ArrayListMultimap.create();
for (RelTableRef queryTableRef1 : queryTableRefs) {
for (RelTableRef queryTableRef2 : queryTableRefs) {
if (queryTableRef1.getQualifiedName().equals(queryTableRef2.getQualifiedName())) {
multiMapTables.put(queryTableRef1, queryTableRef2);
}
}
}
// If a table is used multiple times, we will create multiple mappings,
// and we will try to rewrite the query using each of the mappings.
// Then, we will try to map every source table (query) to a target
// table (view), and if we are successful, we will try to create
// compensation predicates to filter the view results further
// (if needed).
final List<BiMap<RelTableRef, RelTableRef>> flatListMappings = generateTableMappings(multiMapTables);
for (BiMap<RelTableRef, RelTableRef> queryToViewTableMapping : flatListMappings) {
// TableMapping : mapping query tables -> view tables
// 4.0. If compensation equivalence classes exist, we need to add
// the mapping to the query mapping
final EquivalenceClasses currQEC = EquivalenceClasses.copy(qEC);
if (matchModality == MatchModality.QUERY_PARTIAL) {
for (Entry<RexTableInputRef, RexTableInputRef> e : compensationEquiColumns.entries()) {
// Copy origin
RelTableRef queryTableRef = queryToViewTableMapping.inverse().get(e.getKey().getTableRef());
RexTableInputRef queryColumnRef = RexTableInputRef.of(queryTableRef, e.getKey().getIndex(), e.getKey().getType());
// Add to query equivalence classes and table mapping
currQEC.addEquivalenceClass(queryColumnRef, e.getValue());
queryToViewTableMapping.put(e.getValue().getTableRef(), // identity
e.getValue().getTableRef());
}
}
// 4.1. Compute compensation predicates, i.e., predicates that need to be
// enforced over the view to retain query semantics. The resulting predicates
// are expressed using {@link RexTableInputRef} over the query.
// First, to establish relationship, we swap column references of the view
// predicates to point to query tables and compute equivalence classes.
final RexNode viewColumnsEquiPred = RexUtil.swapTableReferences(rexBuilder, viewPreds.getLeft(), queryToViewTableMapping.inverse());
final EquivalenceClasses queryBasedVEC = new EquivalenceClasses();
for (RexNode conj : RelOptUtil.conjunctions(viewColumnsEquiPred)) {
assert conj.isA(SqlKind.EQUALS);
RexCall equiCond = (RexCall) conj;
queryBasedVEC.addEquivalenceClass((RexTableInputRef) equiCond.getOperands().get(0), (RexTableInputRef) equiCond.getOperands().get(1));
}
Triple<RexNode, RexNode, RexNode> compensationPreds = computeCompensationPredicates(rexBuilder, simplify, currQEC, queryPreds, queryBasedVEC, viewPreds, queryToViewTableMapping);
if (compensationPreds == null && generateUnionRewriting) {
// Attempt partial rewriting using union operator. This rewriting
// will read some data from the view and the rest of the data from
// the query computation. The resulting predicates are expressed
// using {@link RexTableInputRef} over the view.
compensationPreds = computeCompensationPredicates(rexBuilder, simplify, queryBasedVEC, viewPreds, currQEC, queryPreds, queryToViewTableMapping.inverse());
if (compensationPreds == null) {
// This was our last chance to use the view, skip it
continue;
}
RexNode compensationColumnsEquiPred = compensationPreds.getLeft();
RexNode otherCompensationPred = RexUtil.composeConjunction(rexBuilder, ImmutableList.of(compensationPreds.getMiddle(), compensationPreds.getRight()), false);
assert !compensationColumnsEquiPred.isAlwaysTrue() || !otherCompensationPred.isAlwaysTrue();
// b. Generate union branch (query).
final RelNode unionInputQuery = rewriteQuery(call.builder(), rexBuilder, simplify, mq, compensationColumnsEquiPred, otherCompensationPred, topProject, node, queryToViewTableMapping, queryBasedVEC, currQEC);
if (unionInputQuery == null) {
// Skip it
continue;
}
// c. Generate union branch (view).
// We trigger the unifying method. This method will either create a Project
// or an Aggregate operator on top of the view. It will also compute the
// output expressions for the query.
final RelNode unionInputView = rewriteView(call.builder(), rexBuilder, simplify, mq, matchModality, true, view, topProject, node, topViewProject, viewNode, queryToViewTableMapping, currQEC);
if (unionInputView == null) {
// Skip it
continue;
}
// d. Generate final rewriting (union).
final RelNode result = createUnion(call.builder(), rexBuilder, topProject, unionInputQuery, unionInputView);
if (result == null) {
// Skip it
continue;
}
call.transformTo(result);
} else if (compensationPreds != null) {
RexNode compensationColumnsEquiPred = compensationPreds.getLeft();
RexNode otherCompensationPred = RexUtil.composeConjunction(rexBuilder, ImmutableList.of(compensationPreds.getMiddle(), compensationPreds.getRight()), false);
// a. Compute final compensation predicate.
if (!compensationColumnsEquiPred.isAlwaysTrue() || !otherCompensationPred.isAlwaysTrue()) {
// All columns required by compensating predicates must be contained
// in the view output (condition 2).
List<RexNode> viewExprs = topViewProject == null ? extractReferences(rexBuilder, view) : topViewProject.getChildExps();
// since we want to enforce the rest
if (!compensationColumnsEquiPred.isAlwaysTrue()) {
compensationColumnsEquiPred = rewriteExpression(rexBuilder, mq, view, viewNode, viewExprs, queryToViewTableMapping.inverse(), queryBasedVEC, false, compensationColumnsEquiPred);
if (compensationColumnsEquiPred == null) {
// Skip it
continue;
}
}
// For the rest, we use the query equivalence classes
if (!otherCompensationPred.isAlwaysTrue()) {
otherCompensationPred = rewriteExpression(rexBuilder, mq, view, viewNode, viewExprs, queryToViewTableMapping.inverse(), currQEC, true, otherCompensationPred);
if (otherCompensationPred == null) {
// Skip it
continue;
}
}
}
final RexNode viewCompensationPred = RexUtil.composeConjunction(rexBuilder, ImmutableList.of(compensationColumnsEquiPred, otherCompensationPred), false);
// b. Generate final rewriting if possible.
// First, we add the compensation predicate (if any) on top of the view.
// Then, we trigger the unifying method. This method will either create a
// Project or an Aggregate operator on top of the view. It will also compute
// the output expressions for the query.
RelBuilder builder = call.builder();
RelNode viewWithFilter;
if (!viewCompensationPred.isAlwaysTrue()) {
RexNode newPred = simplify.simplify(viewCompensationPred);
viewWithFilter = builder.push(view).filter(newPred).build();
// We add (and push) the filter to the view plan before triggering the rewriting.
// This is useful in case some of the columns can be folded to same value after
// filter is added.
Pair<RelNode, RelNode> pushedNodes = pushFilterToOriginalViewPlan(builder, topViewProject, viewNode, newPred);
topViewProject = (Project) pushedNodes.left;
viewNode = pushedNodes.right;
} else {
viewWithFilter = builder.push(view).build();
}
final RelNode result = rewriteView(builder, rexBuilder, simplify, mq, matchModality, false, viewWithFilter, topProject, node, topViewProject, viewNode, queryToViewTableMapping, currQEC);
if (result == null) {
// Skip it
continue;
}
call.transformTo(result);
}
// end else
}
}
}
}
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rex.RexBuilder in project calcite by apache.
the class AggregateExpandDistinctAggregatesRule method doRewrite.
/**
* Converts all distinct aggregate calls to a given set of arguments.
*
* <p>This method is called several times, one for each set of arguments.
* Each time it is called, it generates a JOIN to a new SELECT DISTINCT
* relational expression, and modifies the set of top-level calls.
*
* @param aggregate Original aggregate
* @param n Ordinal of this in a join. {@code relBuilder} contains the
* input relational expression (either the original
* aggregate, the output from the previous call to this
* method. {@code n} is 0 if we're converting the
* first distinct aggregate in a query with no non-distinct
* aggregates)
* @param argList Arguments to the distinct aggregate function
* @param filterArg Argument that filters input to aggregate function, or -1
* @param refs Array of expressions which will be the projected by the
* result of this rule. Those relating to this arg list will
* be modified @return Relational expression
*/
private void doRewrite(RelBuilder relBuilder, Aggregate aggregate, int n, List<Integer> argList, int filterArg, List<RexInputRef> refs) {
final RexBuilder rexBuilder = aggregate.getCluster().getRexBuilder();
final List<RelDataTypeField> leftFields;
if (n == 0) {
leftFields = null;
} else {
leftFields = relBuilder.peek().getRowType().getFieldList();
}
// Aggregate(
// child,
// {COUNT(DISTINCT 1), SUM(DISTINCT 1), SUM(2)})
//
// becomes
//
// Aggregate(
// Join(
// child,
// Aggregate(child, < all columns > {}),
// INNER,
// <f2 = f5>))
//
// E.g.
// SELECT deptno, SUM(DISTINCT sal), COUNT(DISTINCT gender), MAX(age)
// FROM Emps
// GROUP BY deptno
//
// becomes
//
// SELECT e.deptno, adsal.sum_sal, adgender.count_gender, e.max_age
// FROM (
// SELECT deptno, MAX(age) as max_age
// FROM Emps GROUP BY deptno) AS e
// JOIN (
// SELECT deptno, COUNT(gender) AS count_gender FROM (
// SELECT DISTINCT deptno, gender FROM Emps) AS dgender
// GROUP BY deptno) AS adgender
// ON e.deptno = adgender.deptno
// JOIN (
// SELECT deptno, SUM(sal) AS sum_sal FROM (
// SELECT DISTINCT deptno, sal FROM Emps) AS dsal
// GROUP BY deptno) AS adsal
// ON e.deptno = adsal.deptno
// GROUP BY e.deptno
//
// Note that if a query contains no non-distinct aggregates, then the
// very first join/group by is omitted. In the example above, if
// MAX(age) is removed, then the sub-select of "e" is not needed, and
// instead the two other group by's are joined to one another.
// Project the columns of the GROUP BY plus the arguments
// to the agg function.
final Map<Integer, Integer> sourceOf = new HashMap<>();
createSelectDistinct(relBuilder, aggregate, argList, filterArg, sourceOf);
// Now compute the aggregate functions on top of the distinct dataset.
// Each distinct agg becomes a non-distinct call to the corresponding
// field from the right; for example,
// "COUNT(DISTINCT e.sal)"
// becomes
// "COUNT(distinct_e.sal)".
final List<AggregateCall> aggCallList = new ArrayList<>();
final List<AggregateCall> aggCalls = aggregate.getAggCallList();
final int groupAndIndicatorCount = aggregate.getGroupCount() + aggregate.getIndicatorCount();
int i = groupAndIndicatorCount - 1;
for (AggregateCall aggCall : aggCalls) {
++i;
// COUNT(DISTINCT gender) or SUM(sal).
if (!aggCall.isDistinct()) {
continue;
}
if (!aggCall.getArgList().equals(argList)) {
continue;
}
// Re-map arguments.
final int argCount = aggCall.getArgList().size();
final List<Integer> newArgs = new ArrayList<>(argCount);
for (int j = 0; j < argCount; j++) {
final Integer arg = aggCall.getArgList().get(j);
newArgs.add(sourceOf.get(arg));
}
final int newFilterArg = aggCall.filterArg >= 0 ? sourceOf.get(aggCall.filterArg) : -1;
final AggregateCall newAggCall = AggregateCall.create(aggCall.getAggregation(), false, aggCall.isApproximate(), newArgs, newFilterArg, aggCall.getType(), aggCall.getName());
assert refs.get(i) == null;
if (n == 0) {
refs.set(i, new RexInputRef(groupAndIndicatorCount + aggCallList.size(), newAggCall.getType()));
} else {
refs.set(i, new RexInputRef(leftFields.size() + groupAndIndicatorCount + aggCallList.size(), newAggCall.getType()));
}
aggCallList.add(newAggCall);
}
final Map<Integer, Integer> map = new HashMap<>();
for (Integer key : aggregate.getGroupSet()) {
map.put(key, map.size());
}
final ImmutableBitSet newGroupSet = aggregate.getGroupSet().permute(map);
assert newGroupSet.equals(ImmutableBitSet.range(aggregate.getGroupSet().cardinality()));
ImmutableList<ImmutableBitSet> newGroupingSets = null;
if (aggregate.indicator) {
newGroupingSets = ImmutableBitSet.ORDERING.immutableSortedCopy(ImmutableBitSet.permute(aggregate.getGroupSets(), map));
}
relBuilder.push(aggregate.copy(aggregate.getTraitSet(), relBuilder.build(), aggregate.indicator, newGroupSet, newGroupingSets, aggCallList));
// If there's no left child yet, no need to create the join
if (n == 0) {
return;
}
// Create the join condition. It is of the form
// 'left.f0 = right.f0 and left.f1 = right.f1 and ...'
// where {f0, f1, ...} are the GROUP BY fields.
final List<RelDataTypeField> distinctFields = relBuilder.peek().getRowType().getFieldList();
final List<RexNode> conditions = Lists.newArrayList();
for (i = 0; i < groupAndIndicatorCount; ++i) {
// null values form its own group
// use "is not distinct from" so that the join condition
// allows null values to match.
conditions.add(rexBuilder.makeCall(SqlStdOperatorTable.IS_NOT_DISTINCT_FROM, RexInputRef.of(i, leftFields), new RexInputRef(leftFields.size() + i, distinctFields.get(i).getType())));
}
// Join in the new 'select distinct' relation.
relBuilder.join(JoinRelType.INNER, conditions);
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rex.RexBuilder in project calcite by apache.
the class AggregateJoinTransposeRule method onMatch.
public void onMatch(RelOptRuleCall call) {
final Aggregate aggregate = call.rel(0);
final Join join = call.rel(1);
final RexBuilder rexBuilder = aggregate.getCluster().getRexBuilder();
final RelBuilder relBuilder = call.builder();
// If any aggregate call has a filter, bail out
for (AggregateCall aggregateCall : aggregate.getAggCallList()) {
if (aggregateCall.getAggregation().unwrap(SqlSplittableAggFunction.class) == null) {
return;
}
if (aggregateCall.filterArg >= 0) {
return;
}
}
// aggregate operator
if (join.getJoinType() != JoinRelType.INNER) {
return;
}
if (!allowFunctions && !aggregate.getAggCallList().isEmpty()) {
return;
}
// Do the columns used by the join appear in the output of the aggregate?
final ImmutableBitSet aggregateColumns = aggregate.getGroupSet();
final RelMetadataQuery mq = call.getMetadataQuery();
final ImmutableBitSet keyColumns = keyColumns(aggregateColumns, mq.getPulledUpPredicates(join).pulledUpPredicates);
final ImmutableBitSet joinColumns = RelOptUtil.InputFinder.bits(join.getCondition());
final boolean allColumnsInAggregate = keyColumns.contains(joinColumns);
final ImmutableBitSet belowAggregateColumns = aggregateColumns.union(joinColumns);
// Split join condition
final List<Integer> leftKeys = Lists.newArrayList();
final List<Integer> rightKeys = Lists.newArrayList();
final List<Boolean> filterNulls = Lists.newArrayList();
RexNode nonEquiConj = RelOptUtil.splitJoinCondition(join.getLeft(), join.getRight(), join.getCondition(), leftKeys, rightKeys, filterNulls);
// If it contains non-equi join conditions, we bail out
if (!nonEquiConj.isAlwaysTrue()) {
return;
}
// Push each aggregate function down to each side that contains all of its
// arguments. Note that COUNT(*), because it has no arguments, can go to
// both sides.
final Map<Integer, Integer> map = new HashMap<>();
final List<Side> sides = new ArrayList<>();
int uniqueCount = 0;
int offset = 0;
int belowOffset = 0;
for (int s = 0; s < 2; s++) {
final Side side = new Side();
final RelNode joinInput = join.getInput(s);
int fieldCount = joinInput.getRowType().getFieldCount();
final ImmutableBitSet fieldSet = ImmutableBitSet.range(offset, offset + fieldCount);
final ImmutableBitSet belowAggregateKeyNotShifted = belowAggregateColumns.intersect(fieldSet);
for (Ord<Integer> c : Ord.zip(belowAggregateKeyNotShifted)) {
map.put(c.e, belowOffset + c.i);
}
final Mappings.TargetMapping mapping = s == 0 ? Mappings.createIdentity(fieldCount) : Mappings.createShiftMapping(fieldCount + offset, 0, offset, fieldCount);
final ImmutableBitSet belowAggregateKey = belowAggregateKeyNotShifted.shift(-offset);
final boolean unique;
if (!allowFunctions) {
assert aggregate.getAggCallList().isEmpty();
// If there are no functions, it doesn't matter as much whether we
// aggregate the inputs before the join, because there will not be
// any functions experiencing a cartesian product effect.
//
// But finding out whether the input is already unique requires a call
// to areColumnsUnique that currently (until [CALCITE-1048] "Make
// metadata more robust" is fixed) places a heavy load on
// the metadata system.
//
// So we choose to imagine the the input is already unique, which is
// untrue but harmless.
//
Util.discard(Bug.CALCITE_1048_FIXED);
unique = true;
} else {
final Boolean unique0 = mq.areColumnsUnique(joinInput, belowAggregateKey);
unique = unique0 != null && unique0;
}
if (unique) {
++uniqueCount;
side.aggregate = false;
relBuilder.push(joinInput);
final List<RexNode> projects = new ArrayList<>();
for (Integer i : belowAggregateKey) {
projects.add(relBuilder.field(i));
}
for (Ord<AggregateCall> aggCall : Ord.zip(aggregate.getAggCallList())) {
final SqlAggFunction aggregation = aggCall.e.getAggregation();
final SqlSplittableAggFunction splitter = Preconditions.checkNotNull(aggregation.unwrap(SqlSplittableAggFunction.class));
if (!aggCall.e.getArgList().isEmpty() && fieldSet.contains(ImmutableBitSet.of(aggCall.e.getArgList()))) {
final RexNode singleton = splitter.singleton(rexBuilder, joinInput.getRowType(), aggCall.e.transform(mapping));
if (singleton instanceof RexInputRef) {
side.split.put(aggCall.i, ((RexInputRef) singleton).getIndex());
} else {
projects.add(singleton);
side.split.put(aggCall.i, projects.size() - 1);
}
}
}
relBuilder.project(projects);
side.newInput = relBuilder.build();
} else {
side.aggregate = true;
List<AggregateCall> belowAggCalls = new ArrayList<>();
final SqlSplittableAggFunction.Registry<AggregateCall> belowAggCallRegistry = registry(belowAggCalls);
final int oldGroupKeyCount = aggregate.getGroupCount();
final int newGroupKeyCount = belowAggregateKey.cardinality();
for (Ord<AggregateCall> aggCall : Ord.zip(aggregate.getAggCallList())) {
final SqlAggFunction aggregation = aggCall.e.getAggregation();
final SqlSplittableAggFunction splitter = Preconditions.checkNotNull(aggregation.unwrap(SqlSplittableAggFunction.class));
final AggregateCall call1;
if (fieldSet.contains(ImmutableBitSet.of(aggCall.e.getArgList()))) {
final AggregateCall splitCall = splitter.split(aggCall.e, mapping);
call1 = splitCall.adaptTo(joinInput, splitCall.getArgList(), splitCall.filterArg, oldGroupKeyCount, newGroupKeyCount);
} else {
call1 = splitter.other(rexBuilder.getTypeFactory(), aggCall.e);
}
if (call1 != null) {
side.split.put(aggCall.i, belowAggregateKey.cardinality() + belowAggCallRegistry.register(call1));
}
}
side.newInput = relBuilder.push(joinInput).aggregate(relBuilder.groupKey(belowAggregateKey, null), belowAggCalls).build();
}
offset += fieldCount;
belowOffset += side.newInput.getRowType().getFieldCount();
sides.add(side);
}
if (uniqueCount == 2) {
// invocation of this rule; if we continue we might loop forever.
return;
}
// Update condition
final Mapping mapping = (Mapping) Mappings.target(new Function<Integer, Integer>() {
public Integer apply(Integer a0) {
return map.get(a0);
}
}, join.getRowType().getFieldCount(), belowOffset);
final RexNode newCondition = RexUtil.apply(mapping, join.getCondition());
// Create new join
relBuilder.push(sides.get(0).newInput).push(sides.get(1).newInput).join(join.getJoinType(), newCondition);
// Aggregate above to sum up the sub-totals
final List<AggregateCall> newAggCalls = new ArrayList<>();
final int groupIndicatorCount = aggregate.getGroupCount() + aggregate.getIndicatorCount();
final int newLeftWidth = sides.get(0).newInput.getRowType().getFieldCount();
final List<RexNode> projects = new ArrayList<>(rexBuilder.identityProjects(relBuilder.peek().getRowType()));
for (Ord<AggregateCall> aggCall : Ord.zip(aggregate.getAggCallList())) {
final SqlAggFunction aggregation = aggCall.e.getAggregation();
final SqlSplittableAggFunction splitter = Preconditions.checkNotNull(aggregation.unwrap(SqlSplittableAggFunction.class));
final Integer leftSubTotal = sides.get(0).split.get(aggCall.i);
final Integer rightSubTotal = sides.get(1).split.get(aggCall.i);
newAggCalls.add(splitter.topSplit(rexBuilder, registry(projects), groupIndicatorCount, relBuilder.peek().getRowType(), aggCall.e, leftSubTotal == null ? -1 : leftSubTotal, rightSubTotal == null ? -1 : rightSubTotal + newLeftWidth));
}
relBuilder.project(projects);
boolean aggConvertedToProjects = false;
if (allColumnsInAggregate) {
// let's see if we can convert aggregate into projects
List<RexNode> projects2 = new ArrayList<>();
for (int key : Mappings.apply(mapping, aggregate.getGroupSet())) {
projects2.add(relBuilder.field(key));
}
for (AggregateCall newAggCall : newAggCalls) {
final SqlSplittableAggFunction splitter = newAggCall.getAggregation().unwrap(SqlSplittableAggFunction.class);
if (splitter != null) {
final RelDataType rowType = relBuilder.peek().getRowType();
projects2.add(splitter.singleton(rexBuilder, rowType, newAggCall));
}
}
if (projects2.size() == aggregate.getGroupSet().cardinality() + newAggCalls.size()) {
// We successfully converted agg calls into projects.
relBuilder.project(projects2);
aggConvertedToProjects = true;
}
}
if (!aggConvertedToProjects) {
relBuilder.aggregate(relBuilder.groupKey(Mappings.apply(mapping, aggregate.getGroupSet()), Mappings.apply2(mapping, aggregate.getGroupSets())), newAggCalls);
}
call.transformTo(relBuilder.build());
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rex.RexBuilder in project calcite by apache.
the class AggregateReduceFunctionsRule method reduceSum.
private RexNode reduceSum(Aggregate oldAggRel, AggregateCall oldCall, List<AggregateCall> newCalls, Map<AggregateCall, RexNode> aggCallMapping) {
final int nGroups = oldAggRel.getGroupCount();
RexBuilder rexBuilder = oldAggRel.getCluster().getRexBuilder();
int arg = oldCall.getArgList().get(0);
RelDataType argType = getFieldType(oldAggRel.getInput(), arg);
final AggregateCall sumZeroCall = AggregateCall.create(SqlStdOperatorTable.SUM0, oldCall.isDistinct(), oldCall.isApproximate(), oldCall.getArgList(), oldCall.filterArg, oldAggRel.getGroupCount(), oldAggRel.getInput(), null, oldCall.name);
final AggregateCall countCall = AggregateCall.create(SqlStdOperatorTable.COUNT, oldCall.isDistinct(), oldCall.isApproximate(), oldCall.getArgList(), oldCall.filterArg, oldAggRel.getGroupCount(), oldAggRel, null, null);
// NOTE: these references are with respect to the output
// of newAggRel
RexNode sumZeroRef = rexBuilder.addAggCall(sumZeroCall, nGroups, oldAggRel.indicator, newCalls, aggCallMapping, ImmutableList.of(argType));
if (!oldCall.getType().isNullable()) {
// null). Therefore we translate to SUM0(x).
return sumZeroRef;
}
RexNode countRef = rexBuilder.addAggCall(countCall, nGroups, oldAggRel.indicator, newCalls, aggCallMapping, ImmutableList.of(argType));
return rexBuilder.makeCall(SqlStdOperatorTable.CASE, rexBuilder.makeCall(SqlStdOperatorTable.EQUALS, countRef, rexBuilder.makeExactLiteral(BigDecimal.ZERO)), rexBuilder.makeCast(sumZeroRef.getType(), rexBuilder.constantNull()), sumZeroRef);
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rex.RexBuilder in project calcite by apache.
the class AggregateReduceFunctionsRule method reduceStddev.
private RexNode reduceStddev(Aggregate oldAggRel, AggregateCall oldCall, boolean biased, boolean sqrt, List<AggregateCall> newCalls, Map<AggregateCall, RexNode> aggCallMapping, List<RexNode> inputExprs) {
// stddev_pop(x) ==>
// power(
// (sum(x * x) - sum(x) * sum(x) / count(x))
// / count(x),
// .5)
//
// stddev_samp(x) ==>
// power(
// (sum(x * x) - sum(x) * sum(x) / count(x))
// / nullif(count(x) - 1, 0),
// .5)
final int nGroups = oldAggRel.getGroupCount();
final RelOptCluster cluster = oldAggRel.getCluster();
final RexBuilder rexBuilder = cluster.getRexBuilder();
final RelDataTypeFactory typeFactory = cluster.getTypeFactory();
assert oldCall.getArgList().size() == 1 : oldCall.getArgList();
final int argOrdinal = oldCall.getArgList().get(0);
final RelDataType argOrdinalType = getFieldType(oldAggRel.getInput(), argOrdinal);
final RelDataType oldCallType = typeFactory.createTypeWithNullability(oldCall.getType(), argOrdinalType.isNullable());
final RexNode argRef = rexBuilder.ensureType(oldCallType, inputExprs.get(argOrdinal), true);
final int argRefOrdinal = lookupOrAdd(inputExprs, argRef);
final RexNode argSquared = rexBuilder.makeCall(SqlStdOperatorTable.MULTIPLY, argRef, argRef);
final int argSquaredOrdinal = lookupOrAdd(inputExprs, argSquared);
final AggregateCall sumArgSquaredAggCall = createAggregateCallWithBinding(typeFactory, SqlStdOperatorTable.SUM, argSquared.getType(), oldAggRel, oldCall, argSquaredOrdinal);
final RexNode sumArgSquared = rexBuilder.addAggCall(sumArgSquaredAggCall, nGroups, oldAggRel.indicator, newCalls, aggCallMapping, ImmutableList.of(sumArgSquaredAggCall.getType()));
final AggregateCall sumArgAggCall = AggregateCall.create(SqlStdOperatorTable.SUM, oldCall.isDistinct(), oldCall.isApproximate(), ImmutableIntList.of(argOrdinal), oldCall.filterArg, oldAggRel.getGroupCount(), oldAggRel.getInput(), null, null);
final RexNode sumArg = rexBuilder.addAggCall(sumArgAggCall, nGroups, oldAggRel.indicator, newCalls, aggCallMapping, ImmutableList.of(sumArgAggCall.getType()));
final RexNode sumArgCast = rexBuilder.ensureType(oldCallType, sumArg, true);
final RexNode sumSquaredArg = rexBuilder.makeCall(SqlStdOperatorTable.MULTIPLY, sumArgCast, sumArgCast);
final AggregateCall countArgAggCall = AggregateCall.create(SqlStdOperatorTable.COUNT, oldCall.isDistinct(), oldCall.isApproximate(), oldCall.getArgList(), oldCall.filterArg, oldAggRel.getGroupCount(), oldAggRel, null, null);
final RexNode countArg = rexBuilder.addAggCall(countArgAggCall, nGroups, oldAggRel.indicator, newCalls, aggCallMapping, ImmutableList.of(argOrdinalType));
final RexNode avgSumSquaredArg = rexBuilder.makeCall(SqlStdOperatorTable.DIVIDE, sumSquaredArg, countArg);
final RexNode diff = rexBuilder.makeCall(SqlStdOperatorTable.MINUS, sumArgSquared, avgSumSquaredArg);
final RexNode denominator;
if (biased) {
denominator = countArg;
} else {
final RexLiteral one = rexBuilder.makeExactLiteral(BigDecimal.ONE);
final RexNode nul = rexBuilder.makeCast(countArg.getType(), rexBuilder.constantNull());
final RexNode countMinusOne = rexBuilder.makeCall(SqlStdOperatorTable.MINUS, countArg, one);
final RexNode countEqOne = rexBuilder.makeCall(SqlStdOperatorTable.EQUALS, countArg, one);
denominator = rexBuilder.makeCall(SqlStdOperatorTable.CASE, countEqOne, nul, countMinusOne);
}
final RexNode div = rexBuilder.makeCall(SqlStdOperatorTable.DIVIDE, diff, denominator);
RexNode result = div;
if (sqrt) {
final RexNode half = rexBuilder.makeExactLiteral(new BigDecimal("0.5"));
result = rexBuilder.makeCall(SqlStdOperatorTable.POWER, div, half);
}
return rexBuilder.makeCast(oldCall.getType(), result);
}
Aggregations