Search in sources :

Example 41 with RexNode

use of org.apache.calcite.rex.RexNode in project hive by apache.

the class HiveExceptRewriteRule method makeFilterExprForExceptDistinct.

private RexNode makeFilterExprForExceptDistinct(HiveRelNode input, int columnSize, RelOptCluster cluster, RexBuilder rexBuilder) throws CalciteSemanticException {
    List<RexNode> childRexNodeLst = new ArrayList<RexNode>();
    RexInputRef a = rexBuilder.makeInputRef(input, columnSize - 2);
    RexLiteral zero = rexBuilder.makeBigintLiteral(new BigDecimal(0));
    childRexNodeLst.add(a);
    childRexNodeLst.add(zero);
    ImmutableList.Builder<RelDataType> calciteArgTypesBldr = new ImmutableList.Builder<RelDataType>();
    calciteArgTypesBldr.add(TypeConverter.convert(TypeInfoFactory.longTypeInfo, cluster.getTypeFactory()));
    calciteArgTypesBldr.add(TypeConverter.convert(TypeInfoFactory.longTypeInfo, cluster.getTypeFactory()));
    // a>0
    RexNode aMorethanZero = rexBuilder.makeCall(SqlFunctionConverter.getCalciteFn(">", calciteArgTypesBldr.build(), TypeConverter.convert(TypeInfoFactory.longTypeInfo, cluster.getTypeFactory()), false), childRexNodeLst);
    childRexNodeLst = new ArrayList<RexNode>();
    RexLiteral two = rexBuilder.makeBigintLiteral(new BigDecimal(2));
    childRexNodeLst.add(a);
    childRexNodeLst.add(two);
    // 2*a
    RexNode twoa = rexBuilder.makeCall(SqlFunctionConverter.getCalciteFn("*", calciteArgTypesBldr.build(), TypeConverter.convert(TypeInfoFactory.longTypeInfo, cluster.getTypeFactory()), false), childRexNodeLst);
    childRexNodeLst = new ArrayList<RexNode>();
    RexInputRef b = rexBuilder.makeInputRef(input, columnSize - 1);
    childRexNodeLst.add(twoa);
    childRexNodeLst.add(b);
    // 2a=b
    RexNode twoaEqualTob = rexBuilder.makeCall(SqlFunctionConverter.getCalciteFn("=", calciteArgTypesBldr.build(), TypeConverter.convert(TypeInfoFactory.longTypeInfo, cluster.getTypeFactory()), false), childRexNodeLst);
    childRexNodeLst = new ArrayList<RexNode>();
    childRexNodeLst.add(aMorethanZero);
    childRexNodeLst.add(twoaEqualTob);
    // a>0 && 2a=b
    return rexBuilder.makeCall(SqlFunctionConverter.getCalciteFn("and", calciteArgTypesBldr.build(), TypeConverter.convert(TypeInfoFactory.longTypeInfo, cluster.getTypeFactory()), false), childRexNodeLst);
}
Also used : RexLiteral(org.apache.calcite.rex.RexLiteral) ImmutableList(com.google.common.collect.ImmutableList) RelBuilder(org.apache.calcite.tools.RelBuilder) RexBuilder(org.apache.calcite.rex.RexBuilder) Builder(com.google.common.collect.ImmutableList.Builder) ArrayList(java.util.ArrayList) RexInputRef(org.apache.calcite.rex.RexInputRef) RelDataType(org.apache.calcite.rel.type.RelDataType) BigDecimal(java.math.BigDecimal) RexNode(org.apache.calcite.rex.RexNode)

Example 42 with RexNode

use of org.apache.calcite.rex.RexNode in project hive by apache.

the class HiveIntersectRewriteRule method onMatch.

// ~ Methods ----------------------------------------------------------------
public void onMatch(RelOptRuleCall call) {
    final HiveIntersect hiveIntersect = call.rel(0);
    final RelOptCluster cluster = hiveIntersect.getCluster();
    final RexBuilder rexBuilder = cluster.getRexBuilder();
    int numOfBranch = hiveIntersect.getInputs().size();
    Builder<RelNode> bldr = new ImmutableList.Builder<RelNode>();
    // 1st level GB: create a GB (col0, col1, count(1) as c) for each branch
    for (int index = 0; index < numOfBranch; index++) {
        RelNode input = hiveIntersect.getInputs().get(index);
        final List<RexNode> gbChildProjLst = Lists.newArrayList();
        final List<Integer> groupSetPositions = Lists.newArrayList();
        for (int cInd = 0; cInd < input.getRowType().getFieldList().size(); cInd++) {
            gbChildProjLst.add(rexBuilder.makeInputRef(input, cInd));
            groupSetPositions.add(cInd);
        }
        gbChildProjLst.add(rexBuilder.makeBigintLiteral(new BigDecimal(1)));
        // create the project before GB because we need a new project with extra column '1'.
        RelNode gbInputRel = null;
        try {
            gbInputRel = HiveProject.create(input, gbChildProjLst, null);
        } catch (CalciteSemanticException e) {
            LOG.debug(e.toString());
            throw new RuntimeException(e);
        }
        // groupSetPosition includes all the positions
        final ImmutableBitSet groupSet = ImmutableBitSet.of(groupSetPositions);
        List<AggregateCall> aggregateCalls = Lists.newArrayList();
        RelDataType aggFnRetType = TypeConverter.convert(TypeInfoFactory.longTypeInfo, cluster.getTypeFactory());
        // count(1), 1's position is input.getRowType().getFieldList().size()
        AggregateCall aggregateCall = HiveCalciteUtil.createSingleArgAggCall("count", cluster, TypeInfoFactory.longTypeInfo, input.getRowType().getFieldList().size(), aggFnRetType);
        aggregateCalls.add(aggregateCall);
        HiveRelNode aggregateRel = new HiveAggregate(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), gbInputRel, false, groupSet, null, aggregateCalls);
        bldr.add(aggregateRel);
    }
    // create a union above all the branches
    HiveRelNode union = new HiveUnion(cluster, TraitsUtil.getDefaultTraitSet(cluster), bldr.build());
    // 2nd level GB: create a GB (col0, col1, count(c)) for each branch
    final List<Integer> groupSetPositions = Lists.newArrayList();
    // the index of c
    int cInd = union.getRowType().getFieldList().size() - 1;
    for (int index = 0; index < union.getRowType().getFieldList().size(); index++) {
        if (index != cInd) {
            groupSetPositions.add(index);
        }
    }
    List<AggregateCall> aggregateCalls = Lists.newArrayList();
    RelDataType aggFnRetType = TypeConverter.convert(TypeInfoFactory.longTypeInfo, cluster.getTypeFactory());
    AggregateCall aggregateCall = HiveCalciteUtil.createSingleArgAggCall("count", cluster, TypeInfoFactory.longTypeInfo, cInd, aggFnRetType);
    aggregateCalls.add(aggregateCall);
    if (hiveIntersect.all) {
        aggregateCall = HiveCalciteUtil.createSingleArgAggCall("min", cluster, TypeInfoFactory.longTypeInfo, cInd, aggFnRetType);
        aggregateCalls.add(aggregateCall);
    }
    final ImmutableBitSet groupSet = ImmutableBitSet.of(groupSetPositions);
    HiveRelNode aggregateRel = new HiveAggregate(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), union, false, groupSet, null, aggregateCalls);
    // add a filter count(c) = #branches
    int countInd = cInd;
    List<RexNode> childRexNodeLst = new ArrayList<RexNode>();
    RexInputRef ref = rexBuilder.makeInputRef(aggregateRel, countInd);
    RexLiteral literal = rexBuilder.makeBigintLiteral(new BigDecimal(numOfBranch));
    childRexNodeLst.add(ref);
    childRexNodeLst.add(literal);
    ImmutableList.Builder<RelDataType> calciteArgTypesBldr = new ImmutableList.Builder<RelDataType>();
    calciteArgTypesBldr.add(TypeConverter.convert(TypeInfoFactory.longTypeInfo, cluster.getTypeFactory()));
    calciteArgTypesBldr.add(TypeConverter.convert(TypeInfoFactory.longTypeInfo, cluster.getTypeFactory()));
    RexNode factoredFilterExpr = null;
    try {
        factoredFilterExpr = rexBuilder.makeCall(SqlFunctionConverter.getCalciteFn("=", calciteArgTypesBldr.build(), TypeConverter.convert(TypeInfoFactory.longTypeInfo, cluster.getTypeFactory()), true), childRexNodeLst);
    } catch (CalciteSemanticException e) {
        LOG.debug(e.toString());
        throw new RuntimeException(e);
    }
    RelNode filterRel = new HiveFilter(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), aggregateRel, factoredFilterExpr);
    if (!hiveIntersect.all) {
        // the schema for intersect distinct is like this
        // R3 on all attributes + count(c) as cnt
        // finally add a project to project out the last column
        Set<Integer> projectOutColumnPositions = new HashSet<>();
        projectOutColumnPositions.add(filterRel.getRowType().getFieldList().size() - 1);
        try {
            call.transformTo(HiveCalciteUtil.createProjectWithoutColumn(filterRel, projectOutColumnPositions));
        } catch (CalciteSemanticException e) {
            LOG.debug(e.toString());
            throw new RuntimeException(e);
        }
    } else {
        // the schema for intersect all is like this
        // R3 + count(c) as cnt + min(c) as m
        // we create a input project for udtf whose schema is like this
        // min(c) as m + R3
        List<RexNode> originalInputRefs = Lists.transform(filterRel.getRowType().getFieldList(), new Function<RelDataTypeField, RexNode>() {

            @Override
            public RexNode apply(RelDataTypeField input) {
                return new RexInputRef(input.getIndex(), input.getType());
            }
        });
        List<RexNode> copyInputRefs = new ArrayList<>();
        copyInputRefs.add(originalInputRefs.get(originalInputRefs.size() - 1));
        for (int i = 0; i < originalInputRefs.size() - 2; i++) {
            copyInputRefs.add(originalInputRefs.get(i));
        }
        RelNode srcRel = null;
        try {
            srcRel = HiveProject.create(filterRel, copyInputRefs, null);
            HiveTableFunctionScan udtf = HiveCalciteUtil.createUDTFForSetOp(cluster, srcRel);
            // finally add a project to project out the 1st column
            Set<Integer> projectOutColumnPositions = new HashSet<>();
            projectOutColumnPositions.add(0);
            call.transformTo(HiveCalciteUtil.createProjectWithoutColumn(udtf, projectOutColumnPositions));
        } catch (SemanticException e) {
            LOG.debug(e.toString());
            throw new RuntimeException(e);
        }
    }
}
Also used : RelOptCluster(org.apache.calcite.plan.RelOptCluster) RexLiteral(org.apache.calcite.rex.RexLiteral) ImmutableBitSet(org.apache.calcite.util.ImmutableBitSet) ImmutableList(com.google.common.collect.ImmutableList) RelBuilder(org.apache.calcite.tools.RelBuilder) RexBuilder(org.apache.calcite.rex.RexBuilder) Builder(com.google.common.collect.ImmutableList.Builder) HiveTableFunctionScan(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableFunctionScan) HiveRelNode(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveRelNode) ArrayList(java.util.ArrayList) RelDataType(org.apache.calcite.rel.type.RelDataType) RexBuilder(org.apache.calcite.rex.RexBuilder) CalciteSemanticException(org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException) HashSet(java.util.HashSet) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) CalciteSemanticException(org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException) HiveIntersect(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveIntersect) HiveUnion(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveUnion) BigDecimal(java.math.BigDecimal) HiveFilter(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFilter) AggregateCall(org.apache.calcite.rel.core.AggregateCall) HiveAggregate(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveAggregate) RelDataTypeField(org.apache.calcite.rel.type.RelDataTypeField) HiveRelNode(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveRelNode) RelNode(org.apache.calcite.rel.RelNode) RexInputRef(org.apache.calcite.rex.RexInputRef) RexNode(org.apache.calcite.rex.RexNode)

Example 43 with RexNode

use of org.apache.calcite.rex.RexNode in project hive by apache.

the class HiveJoinAddNotNullRule method getNotNullConditions.

private static List<RexNode> getNotNullConditions(RelOptCluster cluster, RexBuilder rexBuilder, RelNode input, Set<Integer> inputKeyPositions, Set<String> pushedPredicates) {
    final List<RexNode> newConditions = Lists.newArrayList();
    for (int pos : inputKeyPositions) {
        RelDataType keyType = input.getRowType().getFieldList().get(pos).getType();
        // Nothing to do if key cannot be null
        if (!keyType.isNullable()) {
            continue;
        }
        RexNode cond = rexBuilder.makeCall(SqlStdOperatorTable.IS_NOT_NULL, rexBuilder.makeInputRef(input, pos));
        String digest = cond.toString();
        if (pushedPredicates.add(digest)) {
            newConditions.add(cond);
        }
    }
    return newConditions;
}
Also used : RelDataType(org.apache.calcite.rel.type.RelDataType) RexNode(org.apache.calcite.rex.RexNode)

Example 44 with RexNode

use of org.apache.calcite.rex.RexNode in project hive by apache.

the class HiveJoinAddNotNullRule method onMatch.

//~ Methods ----------------------------------------------------------------
@Override
public void onMatch(RelOptRuleCall call) {
    final Join join = call.rel(0);
    RelNode lChild = join.getLeft();
    RelNode rChild = join.getRight();
    HiveRulesRegistry registry = call.getPlanner().getContext().unwrap(HiveRulesRegistry.class);
    assert registry != null;
    if (join.getJoinType() != JoinRelType.INNER) {
        return;
    }
    if (join.getCondition().isAlwaysTrue()) {
        return;
    }
    JoinPredicateInfo joinPredInfo;
    try {
        joinPredInfo = HiveCalciteUtil.JoinPredicateInfo.constructJoinPredicateInfo(join);
    } catch (CalciteSemanticException e) {
        return;
    }
    Set<Integer> joinLeftKeyPositions = new HashSet<Integer>();
    Set<Integer> joinRightKeyPositions = new HashSet<Integer>();
    for (int i = 0; i < joinPredInfo.getEquiJoinPredicateElements().size(); i++) {
        JoinLeafPredicateInfo joinLeafPredInfo = joinPredInfo.getEquiJoinPredicateElements().get(i);
        joinLeftKeyPositions.addAll(joinLeafPredInfo.getProjsFromLeftPartOfJoinKeysInChildSchema());
        joinRightKeyPositions.addAll(joinLeafPredInfo.getProjsFromRightPartOfJoinKeysInChildSchema());
    }
    // Build not null conditions
    final RelOptCluster cluster = join.getCluster();
    final RexBuilder rexBuilder = join.getCluster().getRexBuilder();
    Set<String> leftPushedPredicates = Sets.newHashSet(registry.getPushedPredicates(join, 0));
    final List<RexNode> newLeftConditions = getNotNullConditions(cluster, rexBuilder, join.getLeft(), joinLeftKeyPositions, leftPushedPredicates);
    Set<String> rightPushedPredicates = Sets.newHashSet(registry.getPushedPredicates(join, 1));
    final List<RexNode> newRightConditions = getNotNullConditions(cluster, rexBuilder, join.getRight(), joinRightKeyPositions, rightPushedPredicates);
    // Nothing will be added to the expression
    RexNode newLeftPredicate = RexUtil.composeConjunction(rexBuilder, newLeftConditions, false);
    RexNode newRightPredicate = RexUtil.composeConjunction(rexBuilder, newRightConditions, false);
    if (newLeftPredicate.isAlwaysTrue() && newRightPredicate.isAlwaysTrue()) {
        return;
    }
    if (!newLeftPredicate.isAlwaysTrue()) {
        RelNode curr = lChild;
        lChild = filterFactory.createFilter(lChild, newLeftPredicate);
        call.getPlanner().onCopy(curr, lChild);
    }
    if (!newRightPredicate.isAlwaysTrue()) {
        RelNode curr = rChild;
        rChild = filterFactory.createFilter(rChild, newRightPredicate);
        call.getPlanner().onCopy(curr, rChild);
    }
    Join newJoin = join.copy(join.getTraitSet(), join.getCondition(), lChild, rChild, join.getJoinType(), join.isSemiJoinDone());
    call.getPlanner().onCopy(join, newJoin);
    // Register information about created predicates
    registry.getPushedPredicates(newJoin, 0).addAll(leftPushedPredicates);
    registry.getPushedPredicates(newJoin, 1).addAll(rightPushedPredicates);
    call.transformTo(newJoin);
}
Also used : RelOptCluster(org.apache.calcite.plan.RelOptCluster) HiveSemiJoin(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSemiJoin) Join(org.apache.calcite.rel.core.Join) HiveJoin(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveJoin) JoinLeafPredicateInfo(org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil.JoinLeafPredicateInfo) RelNode(org.apache.calcite.rel.RelNode) JoinPredicateInfo(org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil.JoinPredicateInfo) RexBuilder(org.apache.calcite.rex.RexBuilder) CalciteSemanticException(org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException) HashSet(java.util.HashSet) RexNode(org.apache.calcite.rex.RexNode)

Example 45 with RexNode

use of org.apache.calcite.rex.RexNode in project hive by apache.

the class HiveJoinPushTransitivePredicatesRule method getValidPreds.

private ImmutableList<RexNode> getValidPreds(RelOptCluster cluster, RelNode child, Set<String> predicatesToExclude, List<RexNode> rexs, RelDataType rType) {
    InputRefValidator validator = new InputRefValidator(rType.getFieldList());
    List<RexNode> valids = new ArrayList<RexNode>(rexs.size());
    for (RexNode rex : rexs) {
        try {
            rex.accept(validator);
            valids.add(rex);
        } catch (Util.FoundOne e) {
            Util.swallow(e, null);
        }
    }
    // We need to filter i) those that have been pushed already as stored in the join,
    // and ii) those that were already in the subtree rooted at child
    ImmutableList<RexNode> toPush = HiveCalciteUtil.getPredsNotPushedAlready(predicatesToExclude, child, valids);
    return toPush;
}
Also used : ArrayList(java.util.ArrayList) RexUtil(org.apache.calcite.rex.RexUtil) HiveCalciteUtil(org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil) Util(org.apache.calcite.util.Util) RexNode(org.apache.calcite.rex.RexNode)

Aggregations

RexNode (org.apache.calcite.rex.RexNode)204 RelNode (org.apache.calcite.rel.RelNode)75 ArrayList (java.util.ArrayList)68 RexBuilder (org.apache.calcite.rex.RexBuilder)52 RelDataType (org.apache.calcite.rel.type.RelDataType)48 RexInputRef (org.apache.calcite.rex.RexInputRef)43 RelDataTypeField (org.apache.calcite.rel.type.RelDataTypeField)41 RexCall (org.apache.calcite.rex.RexCall)32 ImmutableBitSet (org.apache.calcite.util.ImmutableBitSet)28 Pair (org.apache.calcite.util.Pair)22 HashMap (java.util.HashMap)21 AggregateCall (org.apache.calcite.rel.core.AggregateCall)21 RexLiteral (org.apache.calcite.rex.RexLiteral)19 ImmutableList (com.google.common.collect.ImmutableList)18 Project (org.apache.calcite.rel.core.Project)17 RelDataTypeFactory (org.apache.calcite.rel.type.RelDataTypeFactory)14 Filter (org.apache.calcite.rel.core.Filter)12 HashSet (java.util.HashSet)11 CalciteSemanticException (org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException)11 BigDecimal (java.math.BigDecimal)10