use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rel.core.Project in project hive by apache.
the class HiveUnionSimpleSelectsToInlineTableRule method onMatch.
@Override
public void onMatch(RelOptRuleCall call) {
RexBuilder rexBuilder = call.builder().getRexBuilder();
final HiveUnion union = call.rel(0);
if (!union.all) {
return;
}
List<RelNode> inputs = new ArrayList<RelNode>();
List<Project> projects = new ArrayList<>();
List<HiveTableFunctionScan> inlineTables = new ArrayList<>();
for (RelNode input : union.getInputs()) {
input = HiveRelDecorrelator.stripHep(input);
if (isPlainProject(input)) {
projects.add((Project) input);
continue;
}
if (isInlineTableOperand(input)) {
inlineTables.add((HiveTableFunctionScan) input);
continue;
}
inputs.add(input);
}
if (projects.size() + inlineTables.size() <= 1) {
// nothing to do
return;
}
RowStorage newRows = new RowStorage();
for (HiveTableFunctionScan rel : inlineTables) {
// inline(array(row1,row2,...))
RexCall rex = (RexCall) ((RexCall) rel.getCall()).operands.get(0);
for (RexNode row : rex.operands) {
if (!(row.getType() instanceof RelRecordType)) {
return;
}
newRows.addRow(row);
}
}
for (Project proj : projects) {
RexNode row = rexBuilder.makeCall(SqlStdOperatorTable.ROW, proj.getProjects());
if (!(row.getType() instanceof RelRecordType)) {
return;
}
newRows.addRow(row);
}
if (newRows.keySet().size() + inputs.size() == union.getInputs().size()) {
// nothing to do
return;
}
if (dummyTable == null) {
LOG.warn("Unexpected; rule would match - but dummyTable is not available");
return;
}
for (RelRecordType type : newRows.keySet()) {
List<RexNode> rows = newRows.get(type);
RelDataType arrayType = rexBuilder.getTypeFactory().createArrayType(type, -1);
try {
SqlOperator inlineFn = SqlFunctionConverter.getCalciteFn("inline", Collections.singletonList(arrayType), type, true, false);
SqlOperator arrayFn = SqlFunctionConverter.getCalciteFn("array", Collections.nCopies(rows.size(), type), arrayType, true, false);
RexNode expr = rexBuilder.makeCall(arrayFn, rows);
expr = rexBuilder.makeCall(inlineFn, expr);
RelNode newInlineTable = buildTableFunctionScan(expr, union.getCluster());
inputs.add(newInlineTable);
} catch (CalciteSemanticException e) {
LOG.debug("Conversion failed with exception", e);
return;
}
}
if (inputs.size() > 1) {
HiveUnion newUnion = (HiveUnion) union.copy(union.getTraitSet(), inputs, true);
call.transformTo(newUnion);
} else {
call.transformTo(inputs.get(0));
}
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rel.core.Project in project hive by apache.
the class HiveWindowingLastValueRewrite method onMatch.
@Override
public void onMatch(RelOptRuleCall call) {
Project project = call.rel(0);
List<RexNode> newExprs = new ArrayList<>();
LastValueRewriteRexShuttle lastValueRewrite = new LastValueRewriteRexShuttle(project.getCluster().getRexBuilder());
boolean modified = false;
for (RexNode expr : project.getProjects()) {
RexNode newExpr = lastValueRewrite.apply(expr);
newExprs.add(newExpr);
modified |= (newExpr != expr);
}
if (modified) {
RelNode newProject = project.copy(project.getTraitSet(), project.getInput(), newExprs, project.getRowType());
call.transformTo(newProject);
}
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rel.core.Project in project hive by apache.
the class HiveRelFieldTrimmer method trimFields.
/**
* Variant of {@link #trimFields(RelNode, ImmutableBitSet, Set)} for
* {@link org.apache.calcite.adapter.druid.DruidQuery}.
*/
public TrimResult trimFields(DruidQuery dq, ImmutableBitSet fieldsUsed, Set<RelDataTypeField> extraFields) {
final int fieldCount = dq.getRowType().getFieldCount();
if (fieldsUsed.equals(ImmutableBitSet.range(fieldCount)) && extraFields.isEmpty()) {
// then no need to introduce another RelNode
return trimFields((RelNode) dq, fieldsUsed, extraFields);
}
final RelNode newTableAccessRel = project(dq, fieldsUsed, extraFields, REL_BUILDER.get());
// pretend that one field is used.
if (fieldsUsed.cardinality() == 0) {
RelNode input = newTableAccessRel;
if (input instanceof Project) {
// The table has implemented the project in the obvious way - by
// creating project with 0 fields. Strip it away, and create our own
// project with one field.
Project project = (Project) input;
if (project.getRowType().getFieldCount() == 0) {
input = project.getInput();
}
}
return dummyProject(fieldCount, input);
}
final Mapping mapping = createMapping(fieldsUsed, fieldCount);
return result(newTableAccessRel, mapping);
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rel.core.Project in project hive by apache.
the class HiveJoinConstraintsRule method onMatch.
@Override
public void onMatch(RelOptRuleCall call) {
final Project project = call.rel(0);
final RexBuilder rexBuilder = project.getCluster().getRexBuilder();
List<RexNode> topProjExprs = project.getProjects();
Join join = call.rel(1);
final JoinRelType joinType = join.getJoinType();
final RelNode leftInput = join.getLeft();
final RelNode rightInput = join.getRight();
final RexNode cond = join.getCondition();
// TODO:https://issues.apache.org/jira/browse/HIVE-23920
if (joinType == JoinRelType.ANTI) {
return;
}
// 1) If it is an inner, check whether project only uses columns from one side.
// That side will need to be the FK side.
// If it is a left outer, left will be the FK side.
// If it is a right outer, right will be the FK side.
final RelNode fkInput;
final RelNode nonFkInput;
final ImmutableBitSet topRefs = RelOptUtil.InputFinder.bits(topProjExprs, null);
final ImmutableBitSet leftBits = ImmutableBitSet.range(leftInput.getRowType().getFieldCount());
final ImmutableBitSet rightBits = ImmutableBitSet.range(leftInput.getRowType().getFieldCount(), join.getRowType().getFieldCount());
// These boolean values represent corresponding left, right input which is potential FK
boolean leftInputPotentialFK = topRefs.intersects(leftBits);
boolean rightInputPotentialFK = topRefs.intersects(rightBits);
if (leftInputPotentialFK && rightInputPotentialFK && (joinType == JoinRelType.INNER || joinType == JoinRelType.SEMI)) {
// Both inputs are referenced. Before making a decision, try to swap
// references in join condition if it is an inner join, i.e. if a join
// condition column is referenced above the join, then we can just
// reference the column from the other side.
// For example, given two relations R(a1,a2), S(b1) :
// SELECT a2, b1 FROM R, S ON R.a1=R.b1 =>
// SELECT a2, a1 FROM R, S ON R.a1=R.b1
int joinFieldCount = join.getRowType().getFieldCount();
Mapping mappingLR = Mappings.create(MappingType.PARTIAL_FUNCTION, joinFieldCount, joinFieldCount);
Mapping mappingRL = Mappings.create(MappingType.PARTIAL_FUNCTION, joinFieldCount, joinFieldCount);
for (RexNode conj : RelOptUtil.conjunctions(cond)) {
if (!conj.isA(SqlKind.EQUALS)) {
continue;
}
RexCall eq = (RexCall) conj;
RexNode op1 = eq.getOperands().get(0);
RexNode op2 = eq.getOperands().get(1);
if (op1 instanceof RexInputRef && op2 instanceof RexInputRef) {
// Check references
int ref1 = ((RexInputRef) op1).getIndex();
int ref2 = ((RexInputRef) op2).getIndex();
int leftRef = -1;
int rightRef = -1;
if (leftBits.get(ref1) && rightBits.get(ref2)) {
leftRef = ref1;
rightRef = ref2;
} else if (rightBits.get(ref1) && leftBits.get(ref2)) {
leftRef = ref2;
rightRef = ref1;
}
if (leftRef != -1 && rightRef != -1) {
// as it is useless
if (mappingLR.getTargetOpt(leftRef) == -1) {
mappingLR.set(leftRef, rightRef);
}
if (mappingRL.getTargetOpt(rightRef) == -1) {
mappingRL.set(rightRef, leftRef);
}
}
}
}
if (mappingLR.size() != 0) {
// First insert missing elements into the mapping as identity mappings
for (int i = 0; i < joinFieldCount; i++) {
if (mappingLR.getTargetOpt(i) == -1) {
mappingLR.set(i, i);
}
if (mappingRL.getTargetOpt(i) == -1) {
mappingRL.set(i, i);
}
}
// Then, we start by trying to reference only left side in top projections
List<RexNode> swappedTopProjExprs = topProjExprs.stream().map(projExpr -> projExpr.accept(new RexPermuteInputsShuttle(mappingRL, call.rel(1)))).collect(Collectors.toList());
rightInputPotentialFK = RelOptUtil.InputFinder.bits(swappedTopProjExprs, null).intersects(rightBits);
if (!rightInputPotentialFK) {
topProjExprs = swappedTopProjExprs;
} else {
// If it did not work, we try to reference only right side in top projections
swappedTopProjExprs = topProjExprs.stream().map(projExpr -> projExpr.accept(new RexPermuteInputsShuttle(mappingLR, call.rel(1)))).collect(Collectors.toList());
leftInputPotentialFK = RelOptUtil.InputFinder.bits(swappedTopProjExprs, null).intersects(leftBits);
if (!leftInputPotentialFK) {
topProjExprs = swappedTopProjExprs;
}
}
}
} else if (!leftInputPotentialFK && !rightInputPotentialFK) {
// TODO: There are no references in the project operator above.
// In this case, we should probably do two passes, one for
// left as FK and one for right as FK, although it may be expensive.
// Currently we only assume left as FK
leftInputPotentialFK = true;
}
final Mode mode;
switch(joinType) {
case SEMI:
case INNER:
// case ANTI: //TODO:https://issues.apache.org/jira/browse/HIVE-23920
if (leftInputPotentialFK && rightInputPotentialFK) {
// and there is nothing to transform
return;
}
fkInput = leftInputPotentialFK ? leftInput : rightInput;
nonFkInput = leftInputPotentialFK ? rightInput : leftInput;
mode = Mode.REMOVE;
break;
case LEFT:
fkInput = leftInput;
nonFkInput = rightInput;
mode = leftInputPotentialFK && !rightInputPotentialFK ? Mode.REMOVE : Mode.TRANSFORM;
break;
case RIGHT:
fkInput = rightInput;
nonFkInput = leftInput;
mode = !leftInputPotentialFK && rightInputPotentialFK ? Mode.REMOVE : Mode.TRANSFORM;
break;
default:
// Other type, bail out
return;
}
// 2) Check whether this join can be rewritten or removed
RewritablePKFKJoinInfo r = HiveRelOptUtil.isRewritablePKFKJoin(join, fkInput, nonFkInput, call.getMetadataQuery());
// 3) If it is the only condition, we can trigger the rewriting
if (r.rewritable) {
rewrite(mode, fkInput, nonFkInput, join, topProjExprs, call, project, r.nullableNodes);
} else {
// Possibly this could be enhanced to take other join type into consideration.
if (joinType != JoinRelType.INNER) {
return;
}
// first swap fk and non-fk input and see if we can rewrite them
RewritablePKFKJoinInfo fkRemoval = HiveRelOptUtil.isRewritablePKFKJoin(join, nonFkInput, fkInput, call.getMetadataQuery());
if (fkRemoval.rewritable) {
// we have established that nonFkInput is FK, and fkInput is PK
// and there is no row filtering on FK side
// check that FK side join column is distinct (i.e. have a group by)
ImmutableBitSet fkSideBitSet;
if (nonFkInput == leftInput) {
fkSideBitSet = leftBits;
} else {
fkSideBitSet = rightBits;
}
ImmutableBitSet.Builder fkJoinColBuilder = ImmutableBitSet.builder();
for (RexNode conj : RelOptUtil.conjunctions(cond)) {
if (!conj.isA(SqlKind.EQUALS)) {
return;
}
RexCall eq = (RexCall) conj;
RexNode op1 = eq.getOperands().get(0);
RexNode op2 = eq.getOperands().get(1);
if (op1 instanceof RexInputRef && op2 instanceof RexInputRef) {
// Check references
int ref1 = ((RexInputRef) op1).getIndex();
int ref2 = ((RexInputRef) op2).getIndex();
int leftRef = -1;
int rightRef = -1;
if (fkSideBitSet.get(ref1)) {
// check that join columns are not nullable
if (op1.getType().isNullable()) {
return;
}
fkJoinColBuilder.set(fkSideBitSet.indexOf(ref1));
} else {
if (op2.getType().isNullable()) {
return;
}
fkJoinColBuilder.set(fkSideBitSet.indexOf(ref2));
}
}
}
if (!call.getMetadataQuery().areColumnsUnique(nonFkInput, fkJoinColBuilder.build())) {
return;
}
// all conditions are met, therefore we can perform rewrite to remove fk side
rewrite(mode, fkInput, nonFkInput, join, topProjExprs, call, project, fkRemoval.nullableNodes);
}
}
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rel.core.Project in project hive by apache.
the class HiveJoinToMultiJoinRule method onMatch.
// ~ Methods ----------------------------------------------------------------
@Override
public void onMatch(RelOptRuleCall call) {
final HiveJoin join = call.rel(0);
final RelNode left = call.rel(1);
final RelNode right = call.rel(2);
// 1. We try to merge this join with the left child
RelNode multiJoin = mergeJoin(join, left, right);
if (multiJoin != null) {
call.transformTo(multiJoin);
return;
}
// 2. If we cannot, we swap the inputs so we can try
// to merge it with its right child
RelNode swapped = JoinCommuteRule.swap(join, true);
assert swapped != null;
// The result of the swapping operation is either
// i) a Project or,
// ii) if the project is trivial, a raw join
final HiveJoin newJoin;
Project topProject = null;
if (swapped instanceof HiveJoin) {
newJoin = (HiveJoin) swapped;
} else {
topProject = (Project) swapped;
newJoin = (HiveJoin) swapped.getInput(0);
}
// 3. We try to merge the join with the right child
multiJoin = mergeJoin(newJoin, right, left);
if (multiJoin != null) {
if (topProject != null) {
multiJoin = projectFactory.createProject(multiJoin, Collections.emptyList(), topProject.getProjects(), topProject.getRowType().getFieldNames());
}
call.transformTo(multiJoin);
return;
}
}
Aggregations