use of org.apache.calcite.util.mapping.Mapping in project hive by apache.
the class HiveRelFieldTrimmer method trimFields.
/**
* Variant of {@link #trimFields(RelNode, ImmutableBitSet, Set)} for {@link HiveTableFunctionScan}.
* Copied {@link org.apache.calcite.sql2rel.RelFieldTrimmer#trimFields(
* org.apache.calcite.rel.logical.LogicalTableFunctionScan, ImmutableBitSet, Set)}
* and replaced <code>tabFun</code> to {@link HiveTableFunctionScan}.
* Proper fix would be implement this in calcite.
*/
public TrimResult trimFields(HiveTableFunctionScan tabFun, ImmutableBitSet fieldsUsed, Set<RelDataTypeField> extraFields) {
final RelDataType rowType = tabFun.getRowType();
final int fieldCount = rowType.getFieldCount();
final List<RelNode> newInputs = new ArrayList<>();
for (RelNode input : tabFun.getInputs()) {
final int inputFieldCount = input.getRowType().getFieldCount();
ImmutableBitSet inputFieldsUsed = ImmutableBitSet.range(inputFieldCount);
// Create input with trimmed columns.
final Set<RelDataTypeField> inputExtraFields = Collections.emptySet();
TrimResult trimResult = trimChildRestore(tabFun, input, inputFieldsUsed, inputExtraFields);
assert trimResult.right.isIdentity();
newInputs.add(trimResult.left);
}
TableFunctionScan newTabFun = tabFun;
if (!tabFun.getInputs().equals(newInputs)) {
newTabFun = tabFun.copy(tabFun.getTraitSet(), newInputs, tabFun.getCall(), tabFun.getElementType(), tabFun.getRowType(), tabFun.getColumnMappings());
}
assert newTabFun.getClass() == tabFun.getClass();
// Always project all fields.
Mapping mapping = Mappings.createIdentity(fieldCount);
return result(newTabFun, mapping);
}
use of org.apache.calcite.util.mapping.Mapping in project hive by apache.
the class HiveRelFieldTrimmer method trimFields.
public TrimResult trimFields(HiveSortExchange exchange, ImmutableBitSet fieldsUsed, Set<RelDataTypeField> extraFields) {
final RelDataType rowType = exchange.getRowType();
final int fieldCount = rowType.getFieldCount();
final RelCollation collation = exchange.getCollation();
final RelDistribution distribution = exchange.getDistribution();
final RelNode input = exchange.getInput();
// We use the fields used by the consumer, plus any fields used as exchange
// keys.
final ImmutableBitSet.Builder inputFieldsUsed = fieldsUsed.rebuild();
for (RelFieldCollation field : collation.getFieldCollations()) {
inputFieldsUsed.set(field.getFieldIndex());
}
for (int keyIndex : distribution.getKeys()) {
inputFieldsUsed.set(keyIndex);
}
// Create input with trimmed columns.
final Set<RelDataTypeField> inputExtraFields = Collections.emptySet();
TrimResult trimResult = trimChild(exchange, input, inputFieldsUsed.build(), inputExtraFields);
RelNode newInput = trimResult.left;
final Mapping inputMapping = trimResult.right;
// there's nothing we can do.
if (newInput == input && inputMapping.isIdentity() && fieldsUsed.cardinality() == fieldCount) {
return result(exchange, Mappings.createIdentity(fieldCount));
}
final RelBuilder relBuilder = REL_BUILDER.get();
relBuilder.push(newInput);
RelCollation newCollation = RexUtil.apply(inputMapping, collation);
RelDistribution newDistribution = distribution.apply(inputMapping);
relBuilder.sortExchange(newDistribution, newCollation);
return result(relBuilder.build(), inputMapping);
}
use of org.apache.calcite.util.mapping.Mapping in project calcite by apache.
the class RelMdPredicates method getPredicates.
/**
* Infers predicates for a project.
*
* <ol>
* <li>create a mapping from input to projection. Map only positions that
* directly reference an input column.
* <li>Expressions that only contain above columns are retained in the
* Project's pullExpressions list.
* <li>For e.g. expression 'a + e = 9' below will not be pulled up because 'e'
* is not in the projection list.
*
* <blockquote><pre>
* inputPullUpExprs: {a > 7, b + c < 10, a + e = 9}
* projectionExprs: {a, b, c, e / 2}
* projectionPullupExprs: {a > 7, b + c < 10}
* </pre></blockquote>
*
* </ol>
*/
public RelOptPredicateList getPredicates(Project project, RelMetadataQuery mq) {
final RelNode input = project.getInput();
final RexBuilder rexBuilder = project.getCluster().getRexBuilder();
final RelOptPredicateList inputInfo = mq.getPulledUpPredicates(input);
final List<RexNode> projectPullUpPredicates = new ArrayList<>();
ImmutableBitSet.Builder columnsMappedBuilder = ImmutableBitSet.builder();
Mapping m = Mappings.create(MappingType.PARTIAL_FUNCTION, input.getRowType().getFieldCount(), project.getRowType().getFieldCount());
for (Ord<RexNode> o : Ord.zip(project.getProjects())) {
if (o.e instanceof RexInputRef) {
int sIdx = ((RexInputRef) o.e).getIndex();
m.set(sIdx, o.i);
columnsMappedBuilder.set(sIdx);
}
}
// Go over childPullUpPredicates. If a predicate only contains columns in
// 'columnsMapped' construct a new predicate based on mapping.
final ImmutableBitSet columnsMapped = columnsMappedBuilder.build();
for (RexNode r : inputInfo.pulledUpPredicates) {
RexNode r2 = projectPredicate(rexBuilder, input, r, columnsMapped);
if (!r2.isAlwaysTrue()) {
r2 = r2.accept(new RexPermuteInputsShuttle(m, input));
projectPullUpPredicates.add(r2);
}
}
// Project can also generate constants. We need to include them.
for (Ord<RexNode> expr : Ord.zip(project.getProjects())) {
if (RexLiteral.isNullLiteral(expr.e)) {
projectPullUpPredicates.add(rexBuilder.makeCall(SqlStdOperatorTable.IS_NULL, rexBuilder.makeInputRef(project, expr.i)));
} else if (RexUtil.isConstant(expr.e)) {
final List<RexNode> args = ImmutableList.of(rexBuilder.makeInputRef(project, expr.i), expr.e);
final SqlOperator op = args.get(0).getType().isNullable() || args.get(1).getType().isNullable() ? SqlStdOperatorTable.IS_NOT_DISTINCT_FROM : SqlStdOperatorTable.EQUALS;
projectPullUpPredicates.add(rexBuilder.makeCall(op, args));
}
}
return RelOptPredicateList.of(rexBuilder, projectPullUpPredicates);
}
use of org.apache.calcite.util.mapping.Mapping in project calcite by apache.
the class AggregateExtractProjectRule method onMatch.
public void onMatch(RelOptRuleCall call) {
final Aggregate aggregate = call.rel(0);
final RelNode input = call.rel(1);
// Compute which input fields are used.
// 1. group fields are always used
final ImmutableBitSet.Builder inputFieldsUsed = aggregate.getGroupSet().rebuild();
// 2. agg functions
for (AggregateCall aggCall : aggregate.getAggCallList()) {
for (int i : aggCall.getArgList()) {
inputFieldsUsed.set(i);
}
if (aggCall.filterArg >= 0) {
inputFieldsUsed.set(aggCall.filterArg);
}
}
final RelBuilder relBuilder = call.builder().push(input);
final List<RexNode> projects = new ArrayList<>();
final Mapping mapping = Mappings.create(MappingType.INVERSE_SURJECTION, aggregate.getInput().getRowType().getFieldCount(), inputFieldsUsed.cardinality());
int j = 0;
for (int i : inputFieldsUsed.build()) {
projects.add(relBuilder.field(i));
mapping.set(i, j++);
}
relBuilder.project(projects);
final ImmutableBitSet newGroupSet = Mappings.apply(mapping, aggregate.getGroupSet());
final ImmutableList<ImmutableBitSet> newGroupSets = ImmutableList.copyOf(Iterables.transform(aggregate.getGroupSets(), new Function<ImmutableBitSet, ImmutableBitSet>() {
public ImmutableBitSet apply(ImmutableBitSet input) {
return Mappings.apply(mapping, input);
}
}));
final List<RelBuilder.AggCall> newAggCallList = new ArrayList<>();
for (AggregateCall aggCall : aggregate.getAggCallList()) {
final ImmutableList<RexNode> args = relBuilder.fields(Mappings.apply2(mapping, aggCall.getArgList()));
final RexNode filterArg = aggCall.filterArg < 0 ? null : relBuilder.field(Mappings.apply(mapping, aggCall.filterArg));
newAggCallList.add(relBuilder.aggregateCall(aggCall.getAggregation(), aggCall.isDistinct(), aggCall.isApproximate(), filterArg, aggCall.name, args));
}
final RelBuilder.GroupKey groupKey = relBuilder.groupKey(newGroupSet, newGroupSets);
relBuilder.aggregate(groupKey, newAggCallList);
call.transformTo(relBuilder.build());
}
use of org.apache.calcite.util.mapping.Mapping in project flink by apache.
the class FlinkAggregateJoinTransposeRule method onMatch.
public void onMatch(RelOptRuleCall call) {
final Aggregate origAgg = call.rel(0);
final Join join = call.rel(1);
final RexBuilder rexBuilder = origAgg.getCluster().getRexBuilder();
final RelBuilder relBuilder = call.builder();
// converts an aggregate with AUXILIARY_GROUP to a regular aggregate.
// if the converted aggregate can be push down,
// AggregateReduceGroupingRule will try reduce grouping of new aggregates created by this
// rule
final Pair<Aggregate, List<RexNode>> newAggAndProject = toRegularAggregate(origAgg);
final Aggregate aggregate = newAggAndProject.left;
final List<RexNode> projectAfterAgg = newAggAndProject.right;
// If any aggregate call has a filter or distinct, bail out
for (AggregateCall aggregateCall : aggregate.getAggCallList()) {
if (aggregateCall.getAggregation().unwrap(SqlSplittableAggFunction.class) == null) {
return;
}
if (aggregateCall.filterArg >= 0 || aggregateCall.isDistinct()) {
return;
}
}
if (join.getJoinType() != JoinRelType.INNER) {
return;
}
if (!allowFunctions && !aggregate.getAggCallList().isEmpty()) {
return;
}
// Do the columns used by the join appear in the output of the aggregate?
final ImmutableBitSet aggregateColumns = aggregate.getGroupSet();
final RelMetadataQuery mq = call.getMetadataQuery();
final ImmutableBitSet keyColumns = keyColumns(aggregateColumns, mq.getPulledUpPredicates(join).pulledUpPredicates);
final ImmutableBitSet joinColumns = RelOptUtil.InputFinder.bits(join.getCondition());
final boolean allColumnsInAggregate = keyColumns.contains(joinColumns);
final ImmutableBitSet belowAggregateColumns = aggregateColumns.union(joinColumns);
// Split join condition
final List<Integer> leftKeys = com.google.common.collect.Lists.newArrayList();
final List<Integer> rightKeys = com.google.common.collect.Lists.newArrayList();
final List<Boolean> filterNulls = com.google.common.collect.Lists.newArrayList();
RexNode nonEquiConj = RelOptUtil.splitJoinCondition(join.getLeft(), join.getRight(), join.getCondition(), leftKeys, rightKeys, filterNulls);
// If it contains non-equi join conditions, we bail out
if (!nonEquiConj.isAlwaysTrue()) {
return;
}
// Push each aggregate function down to each side that contains all of its
// arguments. Note that COUNT(*), because it has no arguments, can go to
// both sides.
final Map<Integer, Integer> map = new HashMap<>();
final List<Side> sides = new ArrayList<>();
int uniqueCount = 0;
int offset = 0;
int belowOffset = 0;
for (int s = 0; s < 2; s++) {
final Side side = new Side();
final RelNode joinInput = join.getInput(s);
int fieldCount = joinInput.getRowType().getFieldCount();
final ImmutableBitSet fieldSet = ImmutableBitSet.range(offset, offset + fieldCount);
final ImmutableBitSet belowAggregateKeyNotShifted = belowAggregateColumns.intersect(fieldSet);
for (Ord<Integer> c : Ord.zip(belowAggregateKeyNotShifted)) {
map.put(c.e, belowOffset + c.i);
}
final Mappings.TargetMapping mapping = s == 0 ? Mappings.createIdentity(fieldCount) : Mappings.createShiftMapping(fieldCount + offset, 0, offset, fieldCount);
final ImmutableBitSet belowAggregateKey = belowAggregateKeyNotShifted.shift(-offset);
final boolean unique;
if (!allowFunctions) {
assert aggregate.getAggCallList().isEmpty();
// If there are no functions, it doesn't matter as much whether we
// aggregate the inputs before the join, because there will not be
// any functions experiencing a cartesian product effect.
//
// But finding out whether the input is already unique requires a call
// to areColumnsUnique that currently (until [CALCITE-1048] "Make
// metadata more robust" is fixed) places a heavy load on
// the metadata system.
//
// So we choose to imagine the input is already unique, which is
// untrue but harmless.
//
Util.discard(Bug.CALCITE_1048_FIXED);
unique = true;
} else {
final Boolean unique0 = mq.areColumnsUnique(joinInput, belowAggregateKey);
unique = unique0 != null && unique0;
}
if (unique) {
++uniqueCount;
side.aggregate = false;
relBuilder.push(joinInput);
final Map<Integer, Integer> belowAggregateKeyToNewProjectMap = new HashMap<>();
final List<RexNode> projects = new ArrayList<>();
for (Integer i : belowAggregateKey) {
belowAggregateKeyToNewProjectMap.put(i, projects.size());
projects.add(relBuilder.field(i));
}
for (Ord<AggregateCall> aggCall : Ord.zip(aggregate.getAggCallList())) {
final SqlAggFunction aggregation = aggCall.e.getAggregation();
final SqlSplittableAggFunction splitter = Preconditions.checkNotNull(aggregation.unwrap(SqlSplittableAggFunction.class));
if (!aggCall.e.getArgList().isEmpty() && fieldSet.contains(ImmutableBitSet.of(aggCall.e.getArgList()))) {
final RexNode singleton = splitter.singleton(rexBuilder, joinInput.getRowType(), aggCall.e.transform(mapping));
final RexNode targetSingleton = rexBuilder.ensureType(aggCall.e.type, singleton, false);
if (targetSingleton instanceof RexInputRef) {
final int index = ((RexInputRef) targetSingleton).getIndex();
if (!belowAggregateKey.get(index)) {
projects.add(targetSingleton);
side.split.put(aggCall.i, projects.size() - 1);
} else {
side.split.put(aggCall.i, belowAggregateKeyToNewProjectMap.get(index));
}
} else {
projects.add(targetSingleton);
side.split.put(aggCall.i, projects.size() - 1);
}
}
}
relBuilder.project(projects);
side.newInput = relBuilder.build();
} else {
side.aggregate = true;
List<AggregateCall> belowAggCalls = new ArrayList<>();
final SqlSplittableAggFunction.Registry<AggregateCall> belowAggCallRegistry = registry(belowAggCalls);
final int oldGroupKeyCount = aggregate.getGroupCount();
final int newGroupKeyCount = belowAggregateKey.cardinality();
for (Ord<AggregateCall> aggCall : Ord.zip(aggregate.getAggCallList())) {
final SqlAggFunction aggregation = aggCall.e.getAggregation();
final SqlSplittableAggFunction splitter = Preconditions.checkNotNull(aggregation.unwrap(SqlSplittableAggFunction.class));
final AggregateCall call1;
if (fieldSet.contains(ImmutableBitSet.of(aggCall.e.getArgList()))) {
final AggregateCall splitCall = splitter.split(aggCall.e, mapping);
call1 = splitCall.adaptTo(joinInput, splitCall.getArgList(), splitCall.filterArg, oldGroupKeyCount, newGroupKeyCount);
} else {
call1 = splitter.other(rexBuilder.getTypeFactory(), aggCall.e);
}
if (call1 != null) {
side.split.put(aggCall.i, belowAggregateKey.cardinality() + belowAggCallRegistry.register(call1));
}
}
side.newInput = relBuilder.push(joinInput).aggregate(relBuilder.groupKey(belowAggregateKey, null), belowAggCalls).build();
}
offset += fieldCount;
belowOffset += side.newInput.getRowType().getFieldCount();
sides.add(side);
}
if (uniqueCount == 2) {
// invocation of this rule; if we continue we might loop forever.
return;
}
// Update condition
final Mapping mapping = (Mapping) Mappings.target(map::get, join.getRowType().getFieldCount(), belowOffset);
final RexNode newCondition = RexUtil.apply(mapping, join.getCondition());
// Create new join
relBuilder.push(sides.get(0).newInput).push(sides.get(1).newInput).join(join.getJoinType(), newCondition);
// Aggregate above to sum up the sub-totals
final List<AggregateCall> newAggCalls = new ArrayList<>();
final int groupIndicatorCount = aggregate.getGroupCount() + aggregate.getIndicatorCount();
final int newLeftWidth = sides.get(0).newInput.getRowType().getFieldCount();
final List<RexNode> projects = new ArrayList<>(rexBuilder.identityProjects(relBuilder.peek().getRowType()));
for (Ord<AggregateCall> aggCall : Ord.zip(aggregate.getAggCallList())) {
final SqlAggFunction aggregation = aggCall.e.getAggregation();
final SqlSplittableAggFunction splitter = Preconditions.checkNotNull(aggregation.unwrap(SqlSplittableAggFunction.class));
final Integer leftSubTotal = sides.get(0).split.get(aggCall.i);
final Integer rightSubTotal = sides.get(1).split.get(aggCall.i);
newAggCalls.add(splitter.topSplit(rexBuilder, registry(projects), groupIndicatorCount, relBuilder.peek().getRowType(), aggCall.e, leftSubTotal == null ? -1 : leftSubTotal, rightSubTotal == null ? -1 : rightSubTotal + newLeftWidth));
}
relBuilder.project(projects);
boolean aggConvertedToProjects = false;
if (allColumnsInAggregate) {
// let's see if we can convert aggregate into projects
List<RexNode> projects2 = new ArrayList<>();
for (int key : Mappings.apply(mapping, aggregate.getGroupSet())) {
projects2.add(relBuilder.field(key));
}
int aggCallIdx = projects2.size();
for (AggregateCall newAggCall : newAggCalls) {
final SqlSplittableAggFunction splitter = newAggCall.getAggregation().unwrap(SqlSplittableAggFunction.class);
if (splitter != null) {
final RelDataType rowType = relBuilder.peek().getRowType();
final RexNode singleton = splitter.singleton(rexBuilder, rowType, newAggCall);
final RelDataType originalAggCallType = aggregate.getRowType().getFieldList().get(aggCallIdx).getType();
final RexNode targetSingleton = rexBuilder.ensureType(originalAggCallType, singleton, false);
projects2.add(targetSingleton);
}
aggCallIdx += 1;
}
if (projects2.size() == aggregate.getGroupSet().cardinality() + newAggCalls.size()) {
// We successfully converted agg calls into projects.
relBuilder.project(projects2);
aggConvertedToProjects = true;
}
}
if (!aggConvertedToProjects) {
relBuilder.aggregate(relBuilder.groupKey(Mappings.apply(mapping, aggregate.getGroupSet()), Mappings.apply2(mapping, aggregate.getGroupSets())), newAggCalls);
}
if (projectAfterAgg != null) {
relBuilder.project(projectAfterAgg, origAgg.getRowType().getFieldNames());
}
call.transformTo(relBuilder.build());
}
Aggregations