Search in sources :

Example 11 with Mapping

use of org.apache.calcite.util.mapping.Mapping in project hive by apache.

the class RelFieldTrimmer method dummyProject.

/**
 * Creates a project with a dummy column, to protect the parts of the system
 * that cannot handle a relational expression with no columns.
 *
 * @param fieldCount Number of fields in the original relational expression
 * @param input Trimmed input
 * @return Dummy project, or null if no dummy is required
 */
protected TrimResult dummyProject(int fieldCount, RelNode input) {
    final RelOptCluster cluster = input.getCluster();
    final Mapping mapping = Mappings.create(MappingType.INVERSE_SURJECTION, fieldCount, 1);
    if (input.getRowType().getFieldCount() == 1) {
        // created for the child). We can't do better.
        return result(input, mapping);
    }
    final RexLiteral expr = cluster.getRexBuilder().makeExactLiteral(BigDecimal.ZERO);
    final RelBuilder relBuilder = REL_BUILDER.get();
    relBuilder.push(input);
    relBuilder.project(ImmutableList.<RexNode>of(expr), ImmutableList.of("DUMMY"));
    return result(relBuilder.build(), mapping);
}
Also used : RelOptCluster(org.apache.calcite.plan.RelOptCluster) RexLiteral(org.apache.calcite.rex.RexLiteral) RelBuilder(org.apache.calcite.tools.RelBuilder) Mapping(org.apache.calcite.util.mapping.Mapping)

Example 12 with Mapping

use of org.apache.calcite.util.mapping.Mapping in project hive by apache.

the class HiveRelFieldTrimmer method trimFields.

/**
 * Variant of {@link #trimFields(RelNode, ImmutableBitSet, Set)} for
 * {@link org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveMultiJoin}.
 */
public TrimResult trimFields(HiveMultiJoin join, ImmutableBitSet fieldsUsed, Set<RelDataTypeField> extraFields) {
    final int fieldCount = join.getRowType().getFieldCount();
    final RexNode conditionExpr = join.getCondition();
    final List<RexNode> joinFilters = join.getJoinFilters();
    // Add in fields used in the condition.
    final Set<RelDataTypeField> combinedInputExtraFields = new LinkedHashSet<RelDataTypeField>(extraFields);
    RelOptUtil.InputFinder inputFinder = new RelOptUtil.InputFinder(combinedInputExtraFields, fieldsUsed);
    conditionExpr.accept(inputFinder);
    final ImmutableBitSet fieldsUsedPlus = inputFinder.build();
    int inputStartPos = 0;
    int changeCount = 0;
    int newFieldCount = 0;
    List<RelNode> newInputs = new ArrayList<RelNode>();
    List<Mapping> inputMappings = new ArrayList<Mapping>();
    for (RelNode input : join.getInputs()) {
        final RelDataType inputRowType = input.getRowType();
        final int inputFieldCount = inputRowType.getFieldCount();
        // Compute required mapping.
        ImmutableBitSet.Builder inputFieldsUsed = ImmutableBitSet.builder();
        for (int bit : fieldsUsedPlus) {
            if (bit >= inputStartPos && bit < inputStartPos + inputFieldCount) {
                inputFieldsUsed.set(bit - inputStartPos);
            }
        }
        Set<RelDataTypeField> inputExtraFields = Collections.<RelDataTypeField>emptySet();
        TrimResult trimResult = trimChild(join, input, inputFieldsUsed.build(), inputExtraFields);
        newInputs.add(trimResult.left);
        if (trimResult.left != input) {
            ++changeCount;
        }
        final Mapping inputMapping = trimResult.right;
        inputMappings.add(inputMapping);
        // Move offset to point to start of next input.
        inputStartPos += inputFieldCount;
        newFieldCount += inputMapping.getTargetCount();
    }
    Mapping mapping = Mappings.create(MappingType.INVERSE_SURJECTION, fieldCount, newFieldCount);
    int offset = 0;
    int newOffset = 0;
    for (int i = 0; i < inputMappings.size(); i++) {
        Mapping inputMapping = inputMappings.get(i);
        for (IntPair pair : inputMapping) {
            mapping.set(pair.source + offset, pair.target + newOffset);
        }
        offset += inputMapping.getSourceCount();
        newOffset += inputMapping.getTargetCount();
    }
    if (changeCount == 0 && mapping.isIdentity()) {
        return new TrimResult(join, Mappings.createIdentity(fieldCount));
    }
    // Build new join.
    final RexVisitor<RexNode> shuttle = new RexPermuteInputsShuttle(mapping, newInputs.toArray(new RelNode[newInputs.size()]));
    RexNode newConditionExpr = conditionExpr.accept(shuttle);
    List<RexNode> newJoinFilters = Lists.newArrayList();
    for (RexNode joinFilter : joinFilters) {
        newJoinFilters.add(joinFilter.accept(shuttle));
    }
    final RelDataType newRowType = RelOptUtil.permute(join.getCluster().getTypeFactory(), join.getRowType(), mapping);
    final RelNode newJoin = new HiveMultiJoin(join.getCluster(), newInputs, newConditionExpr, newRowType, join.getJoinInputs(), join.getJoinTypes(), newJoinFilters);
    return new TrimResult(newJoin, mapping);
}
Also used : LinkedHashSet(java.util.LinkedHashSet) ImmutableBitSet(org.apache.calcite.util.ImmutableBitSet) HiveMultiJoin(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveMultiJoin) RelOptUtil(org.apache.calcite.plan.RelOptUtil) ArrayList(java.util.ArrayList) Mapping(org.apache.calcite.util.mapping.Mapping) RelDataType(org.apache.calcite.rel.type.RelDataType) IntPair(org.apache.calcite.util.mapping.IntPair) RelDataTypeField(org.apache.calcite.rel.type.RelDataTypeField) RelNode(org.apache.calcite.rel.RelNode) RexPermuteInputsShuttle(org.apache.calcite.rex.RexPermuteInputsShuttle) RexNode(org.apache.calcite.rex.RexNode)

Example 13 with Mapping

use of org.apache.calcite.util.mapping.Mapping in project hive by apache.

the class HiveRelFieldTrimmer method trimFields.

/**
 * Variant of {@link #trimFields(RelNode, ImmutableBitSet, Set)} for
 * {@link org.apache.calcite.adapter.druid.DruidQuery}.
 */
public TrimResult trimFields(DruidQuery dq, ImmutableBitSet fieldsUsed, Set<RelDataTypeField> extraFields) {
    final int fieldCount = dq.getRowType().getFieldCount();
    if (fieldsUsed.equals(ImmutableBitSet.range(fieldCount)) && extraFields.isEmpty()) {
        // then no need to introduce another RelNode
        return trimFields((RelNode) dq, fieldsUsed, extraFields);
    }
    final RelNode newTableAccessRel = project(dq, fieldsUsed, extraFields, REL_BUILDER.get());
    // pretend that one field is used.
    if (fieldsUsed.cardinality() == 0) {
        RelNode input = newTableAccessRel;
        if (input instanceof Project) {
            // The table has implemented the project in the obvious way - by
            // creating project with 0 fields. Strip it away, and create our own
            // project with one field.
            Project project = (Project) input;
            if (project.getRowType().getFieldCount() == 0) {
                input = project.getInput();
            }
        }
        return dummyProject(fieldCount, input);
    }
    final Mapping mapping = createMapping(fieldsUsed, fieldCount);
    return result(newTableAccessRel, mapping);
}
Also used : HiveProject(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject) Project(org.apache.calcite.rel.core.Project) RelNode(org.apache.calcite.rel.RelNode) Mapping(org.apache.calcite.util.mapping.Mapping)

Example 14 with Mapping

use of org.apache.calcite.util.mapping.Mapping in project hive by apache.

the class HiveRelFieldTrimmer method trimFields.

@Override
public TrimResult trimFields(Aggregate aggregate, ImmutableBitSet fieldsUsed, Set<RelDataTypeField> extraFields) {
    // Fields:
    // 
    // | sys fields | group fields | indicator fields | agg functions |
    // 
    // Two kinds of trimming:
    // 
    // 1. If agg rel has system fields but none of these are used, create an
    // agg rel with no system fields.
    // 
    // 2. If aggregate functions are not used, remove them.
    // 
    // But group and indicator fields stay, even if they are not used.
    // Compute which input fields are used.
    // agg functions
    // agg functions are added first (before group sets) because rewriteGBConstantsKeys
    // needs it
    final ImmutableBitSet.Builder aggCallFieldsUsedBuilder = ImmutableBitSet.builder();
    for (AggregateCall aggCall : aggregate.getAggCallList()) {
        for (int i : aggCall.getArgList()) {
            aggCallFieldsUsedBuilder.set(i);
        }
        if (aggCall.filterArg >= 0) {
            aggCallFieldsUsedBuilder.set(aggCall.filterArg);
        }
    }
    // transform if group by contain constant keys
    ImmutableBitSet aggCallFieldsUsed = aggCallFieldsUsedBuilder.build();
    aggregate = rewriteGBConstantKeys(aggregate, fieldsUsed, aggCallFieldsUsed);
    // add group fields
    final ImmutableBitSet.Builder inputFieldsUsed = aggregate.getGroupSet().rebuild();
    inputFieldsUsed.addAll(aggCallFieldsUsed);
    final RelDataType rowType = aggregate.getRowType();
    // Create input with trimmed columns.
    final RelNode input = aggregate.getInput();
    final Set<RelDataTypeField> inputExtraFields = Collections.emptySet();
    final TrimResult trimResult = trimChild(aggregate, input, inputFieldsUsed.build(), inputExtraFields);
    final RelNode newInput = trimResult.left;
    final Mapping inputMapping = trimResult.right;
    ImmutableBitSet originalGroupSet = aggregate.getGroupSet();
    ImmutableBitSet updatedGroupSet = generateNewGroupset(aggregate, fieldsUsed);
    ImmutableBitSet gbKeysDeleted = originalGroupSet.except(updatedGroupSet);
    ImmutableBitSet updatedGroupFields = ImmutableBitSet.range(originalGroupSet.cardinality());
    final int updatedGroupCount = updatedGroupSet.cardinality();
    // we need to clear the bits corresponding to deleted gb keys
    int setIdx = 0;
    while (setIdx != -1) {
        setIdx = gbKeysDeleted.nextSetBit(setIdx);
        if (setIdx != -1) {
            updatedGroupFields = updatedGroupFields.clear(setIdx);
            setIdx++;
        }
    }
    fieldsUsed = fieldsUsed.union(updatedGroupFields);
    // there's nothing to do.
    if (input == newInput && fieldsUsed.equals(ImmutableBitSet.range(rowType.getFieldCount()))) {
        return result(aggregate, Mappings.createIdentity(rowType.getFieldCount()));
    }
    // update the group by keys based on inputMapping
    ImmutableBitSet newGroupSet = Mappings.apply(inputMapping, updatedGroupSet);
    // Which agg calls are used by our consumer?
    int originalGroupCount = aggregate.getGroupSet().cardinality();
    int j = originalGroupCount;
    int usedAggCallCount = 0;
    for (int i = 0; i < aggregate.getAggCallList().size(); i++) {
        if (fieldsUsed.get(j++)) {
            ++usedAggCallCount;
        }
    }
    // Offset due to the number of system fields having changed.
    Mapping mapping = Mappings.create(MappingType.INVERSE_SURJECTION, rowType.getFieldCount(), updatedGroupCount + usedAggCallCount);
    // if group keys were reduced, it means we didn't have grouping therefore
    // we don't need to transform group sets
    ImmutableList<ImmutableBitSet> newGroupSets = null;
    if (!updatedGroupSet.equals(aggregate.getGroupSet())) {
        newGroupSets = ImmutableList.of(newGroupSet);
    } else {
        newGroupSets = ImmutableList.copyOf(Iterables.transform(aggregate.getGroupSets(), input1 -> Mappings.apply(inputMapping, input1)));
    }
    // Populate mapping of where to find the fields. System, group key and
    // indicator fields first.
    int gbKeyIdx = 0;
    for (j = 0; j < originalGroupCount; j++) {
        if (fieldsUsed.get(j)) {
            mapping.set(j, gbKeyIdx);
            gbKeyIdx++;
        }
    }
    // Now create new agg calls, and populate mapping for them.
    final RelBuilder relBuilder = REL_BUILDER.get();
    relBuilder.push(newInput);
    final List<RelBuilder.AggCall> newAggCallList = new ArrayList<>();
    // because lookup in fieldsUsed is done using original group count
    j = originalGroupCount;
    for (AggregateCall aggCall : aggregate.getAggCallList()) {
        if (fieldsUsed.get(j)) {
            final ImmutableList<RexNode> args = relBuilder.fields(Mappings.apply2(inputMapping, aggCall.getArgList()));
            final RexNode filterArg = aggCall.filterArg < 0 ? null : relBuilder.field(Mappings.apply(inputMapping, aggCall.filterArg));
            RelBuilder.AggCall newAggCall = relBuilder.aggregateCall(aggCall.getAggregation(), aggCall.isDistinct(), aggCall.isApproximate(), filterArg, aggCall.name, args);
            mapping.set(j, updatedGroupCount + newAggCallList.size());
            newAggCallList.add(newAggCall);
        }
        ++j;
    }
    final RelBuilder.GroupKey groupKey = relBuilder.groupKey(newGroupSet, newGroupSets);
    relBuilder.aggregate(groupKey, newAggCallList);
    return result(relBuilder.build(), mapping);
}
Also used : RelBuilder(org.apache.calcite.tools.RelBuilder) ImmutableBitSet(org.apache.calcite.util.ImmutableBitSet) ArrayList(java.util.ArrayList) RelDataType(org.apache.calcite.rel.type.RelDataType) Mapping(org.apache.calcite.util.mapping.Mapping) AggregateCall(org.apache.calcite.rel.core.AggregateCall) RelDataTypeField(org.apache.calcite.rel.type.RelDataTypeField) RelNode(org.apache.calcite.rel.RelNode) RexNode(org.apache.calcite.rex.RexNode)

Example 15 with Mapping

use of org.apache.calcite.util.mapping.Mapping in project hive by apache.

the class HiveJoinConstraintsRule method onMatch.

@Override
public void onMatch(RelOptRuleCall call) {
    final Project project = call.rel(0);
    final RexBuilder rexBuilder = project.getCluster().getRexBuilder();
    List<RexNode> topProjExprs = project.getProjects();
    Join join = call.rel(1);
    final JoinRelType joinType = join.getJoinType();
    final RelNode leftInput = join.getLeft();
    final RelNode rightInput = join.getRight();
    final RexNode cond = join.getCondition();
    // TODO:https://issues.apache.org/jira/browse/HIVE-23920
    if (joinType == JoinRelType.ANTI) {
        return;
    }
    // 1) If it is an inner, check whether project only uses columns from one side.
    // That side will need to be the FK side.
    // If it is a left outer, left will be the FK side.
    // If it is a right outer, right will be the FK side.
    final RelNode fkInput;
    final RelNode nonFkInput;
    final ImmutableBitSet topRefs = RelOptUtil.InputFinder.bits(topProjExprs, null);
    final ImmutableBitSet leftBits = ImmutableBitSet.range(leftInput.getRowType().getFieldCount());
    final ImmutableBitSet rightBits = ImmutableBitSet.range(leftInput.getRowType().getFieldCount(), join.getRowType().getFieldCount());
    // These boolean values represent corresponding left, right input which is potential FK
    boolean leftInputPotentialFK = topRefs.intersects(leftBits);
    boolean rightInputPotentialFK = topRefs.intersects(rightBits);
    if (leftInputPotentialFK && rightInputPotentialFK && (joinType == JoinRelType.INNER || joinType == JoinRelType.SEMI)) {
        // Both inputs are referenced. Before making a decision, try to swap
        // references in join condition if it is an inner join, i.e. if a join
        // condition column is referenced above the join, then we can just
        // reference the column from the other side.
        // For example, given two relations R(a1,a2), S(b1) :
        // SELECT a2, b1 FROM R, S ON R.a1=R.b1 =>
        // SELECT a2, a1 FROM R, S ON R.a1=R.b1
        int joinFieldCount = join.getRowType().getFieldCount();
        Mapping mappingLR = Mappings.create(MappingType.PARTIAL_FUNCTION, joinFieldCount, joinFieldCount);
        Mapping mappingRL = Mappings.create(MappingType.PARTIAL_FUNCTION, joinFieldCount, joinFieldCount);
        for (RexNode conj : RelOptUtil.conjunctions(cond)) {
            if (!conj.isA(SqlKind.EQUALS)) {
                continue;
            }
            RexCall eq = (RexCall) conj;
            RexNode op1 = eq.getOperands().get(0);
            RexNode op2 = eq.getOperands().get(1);
            if (op1 instanceof RexInputRef && op2 instanceof RexInputRef) {
                // Check references
                int ref1 = ((RexInputRef) op1).getIndex();
                int ref2 = ((RexInputRef) op2).getIndex();
                int leftRef = -1;
                int rightRef = -1;
                if (leftBits.get(ref1) && rightBits.get(ref2)) {
                    leftRef = ref1;
                    rightRef = ref2;
                } else if (rightBits.get(ref1) && leftBits.get(ref2)) {
                    leftRef = ref2;
                    rightRef = ref1;
                }
                if (leftRef != -1 && rightRef != -1) {
                    // as it is useless
                    if (mappingLR.getTargetOpt(leftRef) == -1) {
                        mappingLR.set(leftRef, rightRef);
                    }
                    if (mappingRL.getTargetOpt(rightRef) == -1) {
                        mappingRL.set(rightRef, leftRef);
                    }
                }
            }
        }
        if (mappingLR.size() != 0) {
            // First insert missing elements into the mapping as identity mappings
            for (int i = 0; i < joinFieldCount; i++) {
                if (mappingLR.getTargetOpt(i) == -1) {
                    mappingLR.set(i, i);
                }
                if (mappingRL.getTargetOpt(i) == -1) {
                    mappingRL.set(i, i);
                }
            }
            // Then, we start by trying to reference only left side in top projections
            List<RexNode> swappedTopProjExprs = topProjExprs.stream().map(projExpr -> projExpr.accept(new RexPermuteInputsShuttle(mappingRL, call.rel(1)))).collect(Collectors.toList());
            rightInputPotentialFK = RelOptUtil.InputFinder.bits(swappedTopProjExprs, null).intersects(rightBits);
            if (!rightInputPotentialFK) {
                topProjExprs = swappedTopProjExprs;
            } else {
                // If it did not work, we try to reference only right side in top projections
                swappedTopProjExprs = topProjExprs.stream().map(projExpr -> projExpr.accept(new RexPermuteInputsShuttle(mappingLR, call.rel(1)))).collect(Collectors.toList());
                leftInputPotentialFK = RelOptUtil.InputFinder.bits(swappedTopProjExprs, null).intersects(leftBits);
                if (!leftInputPotentialFK) {
                    topProjExprs = swappedTopProjExprs;
                }
            }
        }
    } else if (!leftInputPotentialFK && !rightInputPotentialFK) {
        // TODO: There are no references in the project operator above.
        // In this case, we should probably do two passes, one for
        // left as FK and one for right as FK, although it may be expensive.
        // Currently we only assume left as FK
        leftInputPotentialFK = true;
    }
    final Mode mode;
    switch(joinType) {
        case SEMI:
        case INNER:
            // case ANTI: //TODO:https://issues.apache.org/jira/browse/HIVE-23920
            if (leftInputPotentialFK && rightInputPotentialFK) {
                // and there is nothing to transform
                return;
            }
            fkInput = leftInputPotentialFK ? leftInput : rightInput;
            nonFkInput = leftInputPotentialFK ? rightInput : leftInput;
            mode = Mode.REMOVE;
            break;
        case LEFT:
            fkInput = leftInput;
            nonFkInput = rightInput;
            mode = leftInputPotentialFK && !rightInputPotentialFK ? Mode.REMOVE : Mode.TRANSFORM;
            break;
        case RIGHT:
            fkInput = rightInput;
            nonFkInput = leftInput;
            mode = !leftInputPotentialFK && rightInputPotentialFK ? Mode.REMOVE : Mode.TRANSFORM;
            break;
        default:
            // Other type, bail out
            return;
    }
    // 2) Check whether this join can be rewritten or removed
    RewritablePKFKJoinInfo r = HiveRelOptUtil.isRewritablePKFKJoin(join, fkInput, nonFkInput, call.getMetadataQuery());
    // 3) If it is the only condition, we can trigger the rewriting
    if (r.rewritable) {
        rewrite(mode, fkInput, nonFkInput, join, topProjExprs, call, project, r.nullableNodes);
    } else {
        // Possibly this could be enhanced to take other join type into consideration.
        if (joinType != JoinRelType.INNER) {
            return;
        }
        // first swap fk and non-fk input and see if we can rewrite them
        RewritablePKFKJoinInfo fkRemoval = HiveRelOptUtil.isRewritablePKFKJoin(join, nonFkInput, fkInput, call.getMetadataQuery());
        if (fkRemoval.rewritable) {
            // we have established that nonFkInput is FK, and fkInput is PK
            // and there is no row filtering on FK side
            // check that FK side join column is distinct (i.e. have a group by)
            ImmutableBitSet fkSideBitSet;
            if (nonFkInput == leftInput) {
                fkSideBitSet = leftBits;
            } else {
                fkSideBitSet = rightBits;
            }
            ImmutableBitSet.Builder fkJoinColBuilder = ImmutableBitSet.builder();
            for (RexNode conj : RelOptUtil.conjunctions(cond)) {
                if (!conj.isA(SqlKind.EQUALS)) {
                    return;
                }
                RexCall eq = (RexCall) conj;
                RexNode op1 = eq.getOperands().get(0);
                RexNode op2 = eq.getOperands().get(1);
                if (op1 instanceof RexInputRef && op2 instanceof RexInputRef) {
                    // Check references
                    int ref1 = ((RexInputRef) op1).getIndex();
                    int ref2 = ((RexInputRef) op2).getIndex();
                    int leftRef = -1;
                    int rightRef = -1;
                    if (fkSideBitSet.get(ref1)) {
                        // check that join columns are not nullable
                        if (op1.getType().isNullable()) {
                            return;
                        }
                        fkJoinColBuilder.set(fkSideBitSet.indexOf(ref1));
                    } else {
                        if (op2.getType().isNullable()) {
                            return;
                        }
                        fkJoinColBuilder.set(fkSideBitSet.indexOf(ref2));
                    }
                }
            }
            if (!call.getMetadataQuery().areColumnsUnique(nonFkInput, fkJoinColBuilder.build())) {
                return;
            }
            // all conditions are met, therefore we can perform rewrite to remove fk side
            rewrite(mode, fkInput, nonFkInput, join, topProjExprs, call, project, fkRemoval.nullableNodes);
        }
    }
}
Also used : Project(org.apache.calcite.rel.core.Project) Mappings(org.apache.calcite.util.mapping.Mappings) MappingType(org.apache.calcite.util.mapping.MappingType) LoggerFactory(org.slf4j.LoggerFactory) RelOptUtil(org.apache.calcite.plan.RelOptUtil) Join(org.apache.calcite.rel.core.Join) ArrayList(java.util.ArrayList) RexUtil(org.apache.calcite.rex.RexUtil) RexNode(org.apache.calcite.rex.RexNode) Mapping(org.apache.calcite.util.mapping.Mapping) RelBuilderFactory(org.apache.calcite.tools.RelBuilderFactory) HiveRelFactories(org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelFactories) RexPermuteInputsShuttle(org.apache.calcite.rex.RexPermuteInputsShuttle) ImmutableBitSet(org.apache.calcite.util.ImmutableBitSet) SqlKind(org.apache.calcite.sql.SqlKind) Logger(org.slf4j.Logger) RexBuilder(org.apache.calcite.rex.RexBuilder) HiveRelOptUtil(org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelOptUtil) RelNode(org.apache.calcite.rel.RelNode) Collectors(java.util.stream.Collectors) RelOptRuleCall(org.apache.calcite.plan.RelOptRuleCall) RexInputRef(org.apache.calcite.rex.RexInputRef) RewritablePKFKJoinInfo(org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelOptUtil.RewritablePKFKJoinInfo) RelOptRule(org.apache.calcite.plan.RelOptRule) HiveCalciteUtil(org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil) List(java.util.List) SqlStdOperatorTable(org.apache.calcite.sql.fun.SqlStdOperatorTable) JoinRelType(org.apache.calcite.rel.core.JoinRelType) RexCall(org.apache.calcite.rex.RexCall) RewritablePKFKJoinInfo(org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelOptUtil.RewritablePKFKJoinInfo) ImmutableBitSet(org.apache.calcite.util.ImmutableBitSet) Join(org.apache.calcite.rel.core.Join) Mapping(org.apache.calcite.util.mapping.Mapping) RexCall(org.apache.calcite.rex.RexCall) Project(org.apache.calcite.rel.core.Project) JoinRelType(org.apache.calcite.rel.core.JoinRelType) RelNode(org.apache.calcite.rel.RelNode) RexBuilder(org.apache.calcite.rex.RexBuilder) RexInputRef(org.apache.calcite.rex.RexInputRef) RexPermuteInputsShuttle(org.apache.calcite.rex.RexPermuteInputsShuttle) RexNode(org.apache.calcite.rex.RexNode)

Aggregations

Mapping (org.apache.calcite.util.mapping.Mapping)44 RelNode (org.apache.calcite.rel.RelNode)36 ImmutableBitSet (org.apache.calcite.util.ImmutableBitSet)29 RelDataType (org.apache.calcite.rel.type.RelDataType)26 RexNode (org.apache.calcite.rex.RexNode)25 ArrayList (java.util.ArrayList)22 RelDataTypeField (org.apache.calcite.rel.type.RelDataTypeField)20 RelBuilder (org.apache.calcite.tools.RelBuilder)15 RexPermuteInputsShuttle (org.apache.calcite.rex.RexPermuteInputsShuttle)13 RexBuilder (org.apache.calcite.rex.RexBuilder)11 RelOptUtil (org.apache.calcite.plan.RelOptUtil)9 LinkedHashSet (java.util.LinkedHashSet)8 AggregateCall (org.apache.calcite.rel.core.AggregateCall)8 Aggregate (org.apache.calcite.rel.core.Aggregate)6 Join (org.apache.calcite.rel.core.Join)6 RexInputRef (org.apache.calcite.rex.RexInputRef)6 RexLiteral (org.apache.calcite.rex.RexLiteral)6 Mappings (org.apache.calcite.util.mapping.Mappings)6 ImmutableList (com.google.common.collect.ImmutableList)5 HashMap (java.util.HashMap)5