Search in sources :

Example 6 with HiveUnion

use of org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveUnion in project hive by apache.

the class HiveSortUnionReduceRule method matches.

// ~ Methods ----------------------------------------------------------------
@Override
public boolean matches(RelOptRuleCall call) {
    final HiveSortLimit sort = call.rel(0);
    final HiveUnion union = call.rel(1);
    // And Sort.fetch is not null and it is more than 0.
    return union.all && sort.fetch != null && // Calite bug CALCITE-987
    RexLiteral.intValue(sort.fetch) > 0;
}
Also used : HiveSortLimit(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSortLimit) HiveUnion(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveUnion)

Example 7 with HiveUnion

use of org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveUnion in project hive by apache.

the class JDBCUnionPushDownRule method onMatch.

@Override
public void onMatch(RelOptRuleCall call) {
    LOG.debug("JDBCUnionPushDown has been called");
    final HiveUnion union = call.rel(0);
    final HiveJdbcConverter converter1 = call.rel(1);
    final HiveJdbcConverter converter2 = call.rel(2);
    List<RelNode> unionInput = Arrays.asList(converter1.getInput(), converter2.getInput());
    JdbcUnion jdbcUnion = new JdbcUnion(union.getCluster(), union.getTraitSet().replace(converter1.getJdbcConvention()), unionInput, union.all);
    call.transformTo(converter1.copy(converter1.getTraitSet(), jdbcUnion));
}
Also used : RelNode(org.apache.calcite.rel.RelNode) JdbcUnion(org.apache.calcite.adapter.jdbc.JdbcRules.JdbcUnion) HiveJdbcConverter(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.jdbc.HiveJdbcConverter) HiveUnion(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveUnion)

Example 8 with HiveUnion

use of org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveUnion in project hive by apache.

the class HiveExceptRewriteRule method onMatch.

// ~ Methods ----------------------------------------------------------------
public void onMatch(RelOptRuleCall call) {
    final HiveExcept hiveExcept = call.rel(0);
    final RelOptCluster cluster = hiveExcept.getCluster();
    final RexBuilder rexBuilder = cluster.getRexBuilder();
    Builder<RelNode> bldr = new ImmutableList.Builder<RelNode>();
    // branch
    try {
        bldr.add(createFirstGB(hiveExcept.getInputs().get(0), true, cluster, rexBuilder));
        bldr.add(createFirstGB(hiveExcept.getInputs().get(1), false, cluster, rexBuilder));
    } catch (CalciteSemanticException e) {
        LOG.debug(e.toString());
        throw new RuntimeException(e);
    }
    // create a union above all the branches
    // the schema of union looks like this
    // all keys + VCol + c
    HiveRelNode union = new HiveUnion(cluster, TraitsUtil.getDefaultTraitSet(cluster), bldr.build());
    // 2nd level GB: create a GB (all keys + sum(c) as a + sum(VCol*c) as b) for
    // each branch
    final List<RexNode> gbChildProjLst = Lists.newArrayList();
    final List<Integer> groupSetPositions = Lists.newArrayList();
    int unionColumnSize = union.getRowType().getFieldList().size();
    for (int cInd = 0; cInd < unionColumnSize; cInd++) {
        gbChildProjLst.add(rexBuilder.makeInputRef(union, cInd));
        // the last 2 columns are VCol and c
        if (cInd < unionColumnSize - 2) {
            groupSetPositions.add(cInd);
        }
    }
    try {
        gbChildProjLst.add(multiply(rexBuilder.makeInputRef(union, unionColumnSize - 2), rexBuilder.makeInputRef(union, unionColumnSize - 1), cluster, rexBuilder));
    } catch (CalciteSemanticException e) {
        LOG.debug(e.toString());
        throw new RuntimeException(e);
    }
    RelNode gbInputRel = null;
    try {
        // Here we create a project for the following reasons:
        // (1) GBy only accepts arg as a position of the input, however, we need to sum on VCol*c
        // (2) This can better reuse the function createSingleArgAggCall.
        gbInputRel = HiveProject.create(union, gbChildProjLst, null);
    } catch (CalciteSemanticException e) {
        LOG.debug(e.toString());
        throw new RuntimeException(e);
    }
    // gbInputRel's schema is like this
    // all keys + VCol + c + VCol*c
    List<AggregateCall> aggregateCalls = Lists.newArrayList();
    RelDataType aggFnRetType = TypeConverter.convert(TypeInfoFactory.longTypeInfo, cluster.getTypeFactory());
    // sum(c)
    AggregateCall aggregateCall = HiveCalciteUtil.createSingleArgAggCall("sum", cluster, TypeInfoFactory.longTypeInfo, unionColumnSize - 1, aggFnRetType);
    aggregateCalls.add(aggregateCall);
    // sum(VCol*c)
    aggregateCall = HiveCalciteUtil.createSingleArgAggCall("sum", cluster, TypeInfoFactory.longTypeInfo, unionColumnSize, aggFnRetType);
    aggregateCalls.add(aggregateCall);
    final ImmutableBitSet groupSet = ImmutableBitSet.of(groupSetPositions);
    HiveRelNode aggregateRel = new HiveAggregate(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), gbInputRel, groupSet, null, aggregateCalls);
    if (!hiveExcept.all) {
        RelNode filterRel = null;
        try {
            filterRel = new HiveFilter(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), aggregateRel, makeFilterExprForExceptDistinct(aggregateRel, unionColumnSize, cluster, rexBuilder));
        } catch (CalciteSemanticException e) {
            LOG.debug(e.toString());
            throw new RuntimeException(e);
        }
        // finally add a project to project out the last 2 columns
        Set<Integer> projectOutColumnPositions = new HashSet<>();
        projectOutColumnPositions.add(filterRel.getRowType().getFieldList().size() - 2);
        projectOutColumnPositions.add(filterRel.getRowType().getFieldList().size() - 1);
        try {
            call.transformTo(HiveCalciteUtil.createProjectWithoutColumn(filterRel, projectOutColumnPositions));
        } catch (CalciteSemanticException e) {
            LOG.debug(e.toString());
            throw new RuntimeException(e);
        }
    } else {
        List<RexNode> originalInputRefs = Lists.transform(aggregateRel.getRowType().getFieldList(), new Function<RelDataTypeField, RexNode>() {

            @Override
            public RexNode apply(RelDataTypeField input) {
                return new RexInputRef(input.getIndex(), input.getType());
            }
        });
        List<RexNode> copyInputRefs = new ArrayList<>();
        try {
            copyInputRefs.add(makeExprForExceptAll(aggregateRel, unionColumnSize, cluster, rexBuilder));
        } catch (CalciteSemanticException e) {
            LOG.debug(e.toString());
            throw new RuntimeException(e);
        }
        for (int i = 0; i < originalInputRefs.size() - 2; i++) {
            copyInputRefs.add(originalInputRefs.get(i));
        }
        RelNode srcRel = null;
        try {
            srcRel = HiveProject.create(aggregateRel, copyInputRefs, null);
            HiveTableFunctionScan udtf = HiveCalciteUtil.createUDTFForSetOp(cluster, srcRel);
            // finally add a project to project out the 1st columns
            Set<Integer> projectOutColumnPositions = new HashSet<>();
            projectOutColumnPositions.add(0);
            call.transformTo(HiveCalciteUtil.createProjectWithoutColumn(udtf, projectOutColumnPositions));
        } catch (SemanticException e) {
            LOG.debug(e.toString());
            throw new RuntimeException(e);
        }
    }
}
Also used : RelOptCluster(org.apache.calcite.plan.RelOptCluster) ImmutableBitSet(org.apache.calcite.util.ImmutableBitSet) RexBuilder(org.apache.calcite.rex.RexBuilder) Builder(com.google.common.collect.ImmutableList.Builder) HiveTableFunctionScan(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableFunctionScan) HiveRelNode(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveRelNode) ArrayList(java.util.ArrayList) RelDataType(org.apache.calcite.rel.type.RelDataType) RexBuilder(org.apache.calcite.rex.RexBuilder) CalciteSemanticException(org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException) HashSet(java.util.HashSet) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) CalciteSemanticException(org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException) HiveExcept(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveExcept) HiveUnion(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveUnion) HiveFilter(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFilter) AggregateCall(org.apache.calcite.rel.core.AggregateCall) HiveAggregate(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveAggregate) RelDataTypeField(org.apache.calcite.rel.type.RelDataTypeField) HiveRelNode(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveRelNode) RelNode(org.apache.calcite.rel.RelNode) RexInputRef(org.apache.calcite.rex.RexInputRef) RexNode(org.apache.calcite.rex.RexNode)

Aggregations

HiveUnion (org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveUnion)8 RelNode (org.apache.calcite.rel.RelNode)6 ArrayList (java.util.ArrayList)5 RexNode (org.apache.calcite.rex.RexNode)4 RelDataType (org.apache.calcite.rel.type.RelDataType)3 RexBuilder (org.apache.calcite.rex.RexBuilder)3 CalciteSemanticException (org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException)3 HiveTableFunctionScan (org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableFunctionScan)3 Builder (com.google.common.collect.ImmutableList.Builder)2 HashSet (java.util.HashSet)2 RelOptCluster (org.apache.calcite.plan.RelOptCluster)2 AggregateCall (org.apache.calcite.rel.core.AggregateCall)2 RelDataTypeField (org.apache.calcite.rel.type.RelDataTypeField)2 RexInputRef (org.apache.calcite.rex.RexInputRef)2 ImmutableBitSet (org.apache.calcite.util.ImmutableBitSet)2 HiveAggregate (org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveAggregate)2 HiveFilter (org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFilter)2 HiveRelNode (org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveRelNode)2 HiveSortLimit (org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSortLimit)2 HiveJdbcConverter (org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.jdbc.HiveJdbcConverter)2