use of org.apache.calcite.rel.RelCollation in project hive by apache.
the class HiveSortJoinReduceRule method onMatch.
@Override
public void onMatch(RelOptRuleCall call) {
final HiveSortLimit sortLimit = call.rel(0);
final HiveJoin join = call.rel(1);
RelNode inputLeft = join.getLeft();
RelNode inputRight = join.getRight();
// We create a new sort operator on the corresponding input
if (join.getJoinType() == JoinRelType.LEFT) {
inputLeft = sortLimit.copy(sortLimit.getTraitSet(), inputLeft, sortLimit.getCollation(), sortLimit.offset, sortLimit.fetch);
((HiveSortLimit) inputLeft).setRuleCreated(true);
} else {
// Adjust right collation
final RelCollation rightCollation = RelCollationTraitDef.INSTANCE.canonize(RelCollations.shift(sortLimit.getCollation(), -join.getLeft().getRowType().getFieldCount()));
inputRight = sortLimit.copy(sortLimit.getTraitSet().replace(rightCollation), inputRight, rightCollation, sortLimit.offset, sortLimit.fetch);
((HiveSortLimit) inputRight).setRuleCreated(true);
}
// We copy the join and the top sort operator
RelNode result = join.copy(join.getTraitSet(), join.getCondition(), inputLeft, inputRight, join.getJoinType(), join.isSemiJoinDone());
result = sortLimit.copy(sortLimit.getTraitSet(), result, sortLimit.getCollation(), sortLimit.offset, sortLimit.fetch);
call.transformTo(result);
}
use of org.apache.calcite.rel.RelCollation in project hive by apache.
the class HiveProjectSortTransposeRule method onMatch.
//~ Methods ----------------------------------------------------------------
// implement RelOptRule
public void onMatch(RelOptRuleCall call) {
final HiveProject project = call.rel(0);
final HiveSortLimit sort = call.rel(1);
// Determine mapping between project input and output fields. If sort
// relies on non-trivial expressions, we can't push.
final Mappings.TargetMapping map = RelOptUtil.permutation(project.getProjects(), project.getInput().getRowType()).inverse();
for (RelFieldCollation fc : sort.getCollation().getFieldCollations()) {
if (map.getTarget(fc.getFieldIndex()) < 0) {
return;
}
}
// Create new collation
final RelCollation newCollation = RelCollationTraitDef.INSTANCE.canonize(RexUtil.apply(map, sort.getCollation()));
// New operators
final RelNode newProject = project.copy(sort.getInput().getTraitSet(), ImmutableList.<RelNode>of(sort.getInput()));
final HiveSortLimit newSort = sort.copy(newProject.getTraitSet(), newProject, newCollation, sort.offset, sort.fetch);
call.transformTo(newSort);
}
use of org.apache.calcite.rel.RelCollation in project hive by apache.
the class HiveSortLimit method copy.
@Override
public HiveSortLimit copy(RelTraitSet traitSet, RelNode newInput, RelCollation newCollation, RexNode offset, RexNode fetch) {
// TODO: can we blindly copy sort trait? What if inputs changed and we
// are now sorting by different cols
RelCollation canonizedCollation = traitSet.canonize(newCollation);
HiveSortLimit sortLimit = new HiveSortLimit(getCluster(), traitSet, newInput, canonizedCollation, offset, fetch);
sortLimit.setRuleCreated(ruleCreated);
return sortLimit;
}
use of org.apache.calcite.rel.RelCollation in project drill by apache.
the class StreamAggPrule method onMatch.
@Override
public void onMatch(RelOptRuleCall call) {
final DrillAggregateRel aggregate = (DrillAggregateRel) call.rel(0);
RelNode input = aggregate.getInput();
final RelCollation collation = getCollation(aggregate);
RelTraitSet traits = null;
if (aggregate.containsDistinctCall()) {
// currently, don't use StreamingAggregate if any of the logical aggrs contains DISTINCT
return;
}
try {
if (aggregate.getGroupSet().isEmpty()) {
DrillDistributionTrait singleDist = DrillDistributionTrait.SINGLETON;
final RelTraitSet singleDistTrait = call.getPlanner().emptyTraitSet().plus(Prel.DRILL_PHYSICAL).plus(singleDist);
if (create2PhasePlan(call, aggregate)) {
traits = call.getPlanner().emptyTraitSet().plus(Prel.DRILL_PHYSICAL);
RelNode convertedInput = convert(input, traits);
new SubsetTransformer<DrillAggregateRel, InvalidRelException>(call) {
@Override
public RelNode convertChild(final DrillAggregateRel join, final RelNode rel) throws InvalidRelException {
DrillDistributionTrait toDist = rel.getTraitSet().getTrait(DrillDistributionTraitDef.INSTANCE);
RelTraitSet traits = newTraitSet(Prel.DRILL_PHYSICAL, toDist);
RelNode newInput = convert(rel, traits);
StreamAggPrel phase1Agg = new StreamAggPrel(aggregate.getCluster(), traits, newInput, aggregate.indicator, aggregate.getGroupSet(), aggregate.getGroupSets(), aggregate.getAggCallList(), OperatorPhase.PHASE_1of2);
UnionExchangePrel exch = new UnionExchangePrel(phase1Agg.getCluster(), singleDistTrait, phase1Agg);
return new StreamAggPrel(aggregate.getCluster(), singleDistTrait, exch, aggregate.indicator, aggregate.getGroupSet(), aggregate.getGroupSets(), phase1Agg.getPhase2AggCalls(), OperatorPhase.PHASE_2of2);
}
}.go(aggregate, convertedInput);
} else {
createTransformRequest(call, aggregate, input, singleDistTrait);
}
} else {
// hash distribute on all grouping keys
final DrillDistributionTrait distOnAllKeys = new DrillDistributionTrait(DrillDistributionTrait.DistributionType.HASH_DISTRIBUTED, ImmutableList.copyOf(getDistributionField(aggregate, true)));
traits = call.getPlanner().emptyTraitSet().plus(Prel.DRILL_PHYSICAL).plus(collation).plus(distOnAllKeys);
createTransformRequest(call, aggregate, input, traits);
// hash distribute on one grouping key
DrillDistributionTrait distOnOneKey = new DrillDistributionTrait(DrillDistributionTrait.DistributionType.HASH_DISTRIBUTED, ImmutableList.copyOf(getDistributionField(aggregate, false)));
traits = call.getPlanner().emptyTraitSet().plus(Prel.DRILL_PHYSICAL).plus(collation).plus(distOnOneKey);
if (create2PhasePlan(call, aggregate)) {
traits = call.getPlanner().emptyTraitSet().plus(Prel.DRILL_PHYSICAL);
RelNode convertedInput = convert(input, traits);
new SubsetTransformer<DrillAggregateRel, InvalidRelException>(call) {
@Override
public RelNode convertChild(final DrillAggregateRel aggregate, final RelNode rel) throws InvalidRelException {
DrillDistributionTrait toDist = rel.getTraitSet().getTrait(DrillDistributionTraitDef.INSTANCE);
RelTraitSet traits = newTraitSet(Prel.DRILL_PHYSICAL, collation, toDist);
RelNode newInput = convert(rel, traits);
StreamAggPrel phase1Agg = new StreamAggPrel(aggregate.getCluster(), traits, newInput, aggregate.indicator, aggregate.getGroupSet(), aggregate.getGroupSets(), aggregate.getAggCallList(), OperatorPhase.PHASE_1of2);
int numEndPoints = PrelUtil.getSettings(phase1Agg.getCluster()).numEndPoints();
HashToMergeExchangePrel exch = new HashToMergeExchangePrel(phase1Agg.getCluster(), phase1Agg.getTraitSet().plus(Prel.DRILL_PHYSICAL).plus(distOnAllKeys), phase1Agg, ImmutableList.copyOf(getDistributionField(aggregate, true)), collation, numEndPoints);
return new StreamAggPrel(aggregate.getCluster(), exch.getTraitSet(), exch, aggregate.indicator, aggregate.getGroupSet(), aggregate.getGroupSets(), phase1Agg.getPhase2AggCalls(), OperatorPhase.PHASE_2of2);
}
}.go(aggregate, convertedInput);
}
}
} catch (InvalidRelException e) {
tracer.warning(e.toString());
}
}
use of org.apache.calcite.rel.RelCollation in project drill by apache.
the class WindowPrule method onMatch.
@Override
public void onMatch(RelOptRuleCall call) {
final DrillWindowRel window = call.rel(0);
RelNode input = call.rel(1);
// TODO: Order window based on existing partition by
//input.getTraitSet().subsumes()
boolean partitionby = false;
boolean addMerge = false;
// The start index of the constant fields of DrillWindowRel
final int startConstantsIndex = window.getInput().getRowType().getFieldCount();
int constantShiftIndex = 0;
for (final Ord<Window.Group> w : Ord.zip(window.groups)) {
Window.Group windowBase = w.getValue();
RelTraitSet traits = call.getPlanner().emptyTraitSet().plus(Prel.DRILL_PHYSICAL);
// For empty Over-Clause
if (windowBase.keys.isEmpty() && windowBase.orderKeys.getFieldCollations().isEmpty()) {
DrillDistributionTrait distEmptyKeys = new DrillDistributionTrait(DrillDistributionTrait.DistributionType.SINGLETON);
traits = traits.plus(distEmptyKeys);
} else if (windowBase.keys.size() > 0) {
DrillDistributionTrait distOnAllKeys = new DrillDistributionTrait(DrillDistributionTrait.DistributionType.HASH_DISTRIBUTED, ImmutableList.copyOf(getDistributionFields(windowBase)));
partitionby = true;
traits = traits.plus(distOnAllKeys);
} else if (windowBase.orderKeys.getFieldCollations().size() > 0) {
// if only the order-by clause is specified, there is a single partition
// consisting of all the rows, so we do a distributed sort followed by a
// single merge as the input of the window operator
DrillDistributionTrait distKeys = new DrillDistributionTrait(DrillDistributionTrait.DistributionType.HASH_DISTRIBUTED, ImmutableList.copyOf(getDistributionFieldsFromCollation(windowBase)));
traits = traits.plus(distKeys);
if (!isSingleMode(call)) {
addMerge = true;
}
}
// Add collation trait if either partition-by or order-by is specified.
if (partitionby || windowBase.orderKeys.getFieldCollations().size() > 0) {
RelCollation collation = getCollation(windowBase);
traits = traits.plus(collation);
}
RelNode convertedInput = convert(input, traits);
if (addMerge) {
traits = traits.plus(DrillDistributionTrait.SINGLETON);
convertedInput = new SingleMergeExchangePrel(window.getCluster(), traits, convertedInput, windowBase.collation());
}
List<RelDataTypeField> newRowFields = Lists.newArrayList();
for (RelDataTypeField field : convertedInput.getRowType().getFieldList()) {
newRowFields.add(field);
}
Iterable<RelDataTypeField> newWindowFields = Iterables.filter(window.getRowType().getFieldList(), new Predicate<RelDataTypeField>() {
@Override
public boolean apply(RelDataTypeField relDataTypeField) {
return relDataTypeField.getName().startsWith("w" + w.i + "$");
}
});
for (RelDataTypeField newField : newWindowFields) {
newRowFields.add(newField);
}
RelDataType rowType = new RelRecordType(newRowFields);
List<Window.RexWinAggCall> newWinAggCalls = Lists.newArrayList();
for (Ord<Window.RexWinAggCall> aggOrd : Ord.zip(windowBase.aggCalls)) {
Window.RexWinAggCall aggCall = aggOrd.getValue();
// If the argument points at the constant and
// additional fields have been generated by the Window below,
// the index of constants will be shifted
final List<RexNode> newOperandsOfWindowFunction = Lists.newArrayList();
for (RexNode operand : aggCall.getOperands()) {
if (operand instanceof RexInputRef) {
final RexInputRef rexInputRef = (RexInputRef) operand;
final int refIndex = rexInputRef.getIndex();
// Check if this RexInputRef points at the constants
if (rexInputRef.getIndex() >= startConstantsIndex) {
operand = new RexInputRef(refIndex + constantShiftIndex, window.constants.get(refIndex - startConstantsIndex).getType());
}
}
newOperandsOfWindowFunction.add(operand);
}
aggCall = new Window.RexWinAggCall((SqlAggFunction) aggCall.getOperator(), aggCall.getType(), newOperandsOfWindowFunction, aggCall.ordinal);
newWinAggCalls.add(new Window.RexWinAggCall((SqlAggFunction) aggCall.getOperator(), aggCall.getType(), aggCall.getOperands(), aggOrd.i));
}
windowBase = new Window.Group(windowBase.keys, windowBase.isRows, windowBase.lowerBound, windowBase.upperBound, windowBase.orderKeys, newWinAggCalls);
input = new WindowPrel(window.getCluster(), window.getTraitSet().merge(traits), convertedInput, window.getConstants(), rowType, windowBase);
constantShiftIndex += windowBase.aggCalls.size();
}
call.transformTo(input);
}
Aggregations