Search in sources :

Example 46 with RelOptCluster

use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.plan.RelOptCluster in project hive by apache.

the class HiveRelMdRuntimeRowCount method getRuntimeRowCount.

public Optional<Long> getRuntimeRowCount(RelNode rel) {
    RelOptCluster cluster = rel.getCluster();
    Context context = cluster.getPlanner().getContext();
    if (context instanceof HivePlannerContext) {
        StatsSource ss = ((HivePlannerContext) context).unwrap(StatsSource.class);
        if (ss.canProvideStatsFor(rel.getClass())) {
            Optional<OperatorStats> os = ss.lookup(RelTreeSignature.of(rel));
            if (os.isPresent()) {
                long outputRecords = os.get().getOutputRecords();
                return Optional.of(outputRecords);
            }
        }
    }
    return Optional.empty();
}
Also used : RelOptCluster(org.apache.calcite.plan.RelOptCluster) Context(org.apache.calcite.plan.Context) HivePlannerContext(org.apache.hadoop.hive.ql.optimizer.calcite.HivePlannerContext) HivePlannerContext(org.apache.hadoop.hive.ql.optimizer.calcite.HivePlannerContext) OperatorStats(org.apache.hadoop.hive.ql.stats.OperatorStats) StatsSource(org.apache.hadoop.hive.ql.plan.mapper.StatsSource)

Example 47 with RelOptCluster

use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.plan.RelOptCluster in project hive by apache.

the class HiveRelDecorrelator method decorrelateQuery.

// ~ Methods ----------------------------------------------------------------
/**
 * Decorrelates a query.
 *
 * <p>This is the main entry point to {@code RelDecorrelator}.
 *
 * @param rootRel Root node of the query
 *
 * @return Equivalent query with all
 * {@link org.apache.calcite.rel.logical.LogicalCorrelate} instances removed
 */
public static RelNode decorrelateQuery(RelNode rootRel) {
    final CorelMap corelMap = new CorelMapBuilder().build(rootRel);
    if (!corelMap.hasCorrelation()) {
        return rootRel;
    }
    final RelOptCluster cluster = rootRel.getCluster();
    final HiveRelDecorrelator decorrelator = new HiveRelDecorrelator(cluster, corelMap, cluster.getPlanner().getContext());
    RelNode newRootRel = decorrelator.removeCorrelationViaRule(rootRel);
    if (!decorrelator.cm.mapCorToCorRel.isEmpty()) {
        newRootRel = decorrelator.decorrelate(newRootRel);
    }
    return newRootRel;
}
Also used : RelOptCluster(org.apache.calcite.plan.RelOptCluster) HiveRelNode(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveRelNode) RelNode(org.apache.calcite.rel.RelNode)

Example 48 with RelOptCluster

use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.plan.RelOptCluster in project hive by apache.

the class HiveRelDecorrelator method createValueGenerator.

/**
 * Create RelNode tree that produces a list of correlated variables.
 */
private RelNode createValueGenerator(Iterable<CorRef> correlations, int valueGenFieldOffset, SortedMap<CorDef, Integer> corDefOutputs) {
    final Map<RelNode, List<Integer>> mapNewInputToOutputs = new HashMap<>();
    final Map<RelNode, Integer> mapNewInputToNewOffset = new HashMap<>();
    // Add to map all the referenced positions (relative to each input rel).
    for (CorRef corVar : correlations) {
        final int oldCorVarOffset = corVar.field;
        final RelNode oldInput = getCorRel(corVar);
        assert oldInput != null;
        final Frame frame = map.get(oldInput);
        assert frame != null;
        final RelNode newInput = frame.r;
        final List<Integer> newLocalOutputs;
        if (!mapNewInputToOutputs.containsKey(newInput)) {
            newLocalOutputs = new ArrayList<>();
        } else {
            newLocalOutputs = mapNewInputToOutputs.get(newInput);
        }
        final int newCorVarOffset = frame.oldToNewOutputs.get(oldCorVarOffset);
        // Add all unique positions referenced.
        if (!newLocalOutputs.contains(newCorVarOffset)) {
            newLocalOutputs.add(newCorVarOffset);
        }
        mapNewInputToOutputs.put(newInput, newLocalOutputs);
    }
    int offset = 0;
    // Project only the correlated fields out of each inputRel
    // and join the projectRel together.
    // To make sure the plan does not change in terms of join order,
    // join these rels based on their occurrence in cor var list which
    // is sorted.
    final Set<RelNode> joinedInputs = new HashSet<>();
    RelNode r = null;
    for (CorRef corVar : correlations) {
        final RelNode oldInput = getCorRel(corVar);
        assert oldInput != null;
        final RelNode newInput = map.get(oldInput).r;
        assert newInput != null;
        if (!joinedInputs.contains(newInput)) {
            RelNode project = RelOptUtil.createProject(HiveRelFactories.HIVE_PROJECT_FACTORY, newInput, mapNewInputToOutputs.get(newInput));
            RelNode distinct = relBuilder.push(project).distinct().build();
            RelOptCluster cluster = distinct.getCluster();
            joinedInputs.add(newInput);
            mapNewInputToNewOffset.put(newInput, offset);
            offset += distinct.getRowType().getFieldCount();
            if (r == null) {
                r = distinct;
            } else {
                r = relBuilder.push(r).push(distinct).join(JoinRelType.INNER, cluster.getRexBuilder().makeLiteral(true)).build();
            }
        }
    }
    // referencing correlated variables.
    for (CorRef corRef : correlations) {
        // The first input of a Correlator is always the rel defining
        // the correlated variables.
        final RelNode oldInput = getCorRel(corRef);
        assert oldInput != null;
        final Frame frame = map.get(oldInput);
        final RelNode newInput = frame.r;
        assert newInput != null;
        final List<Integer> newLocalOutputs = mapNewInputToOutputs.get(newInput);
        final int newLocalOutput = frame.oldToNewOutputs.get(corRef.field);
        // newOutput is the index of the cor var in the referenced
        // position list plus the offset of referenced position list of
        // each newInput.
        final int newOutput = newLocalOutputs.indexOf(newLocalOutput) + mapNewInputToNewOffset.get(newInput) + valueGenFieldOffset;
        corDefOutputs.put(corRef.def(), newOutput);
    }
    return r;
}
Also used : RelOptCluster(org.apache.calcite.plan.RelOptCluster) HashMap(java.util.HashMap) HiveRelNode(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveRelNode) RelNode(org.apache.calcite.rel.RelNode) ArrayList(java.util.ArrayList) List(java.util.List) ImmutableList(com.google.common.collect.ImmutableList) HashSet(java.util.HashSet)

Example 49 with RelOptCluster

use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.plan.RelOptCluster in project hive by apache.

the class RelFieldTrimmer method dummyProject.

/**
 * Creates a project with a dummy column, to protect the parts of the system
 * that cannot handle a relational expression with no columns.
 *
 * @param fieldCount Number of fields in the original relational expression
 * @param input Trimmed input
 * @return Dummy project, or null if no dummy is required
 */
protected TrimResult dummyProject(int fieldCount, RelNode input) {
    final RelOptCluster cluster = input.getCluster();
    final Mapping mapping = Mappings.create(MappingType.INVERSE_SURJECTION, fieldCount, 1);
    if (input.getRowType().getFieldCount() == 1) {
        // created for the child). We can't do better.
        return result(input, mapping);
    }
    final RexLiteral expr = cluster.getRexBuilder().makeExactLiteral(BigDecimal.ZERO);
    final RelBuilder relBuilder = REL_BUILDER.get();
    relBuilder.push(input);
    relBuilder.project(ImmutableList.<RexNode>of(expr), ImmutableList.of("DUMMY"));
    return result(relBuilder.build(), mapping);
}
Also used : RelOptCluster(org.apache.calcite.plan.RelOptCluster) RexLiteral(org.apache.calcite.rex.RexLiteral) RelBuilder(org.apache.calcite.tools.RelBuilder) Mapping(org.apache.calcite.util.mapping.Mapping)

Example 50 with RelOptCluster

use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.plan.RelOptCluster in project hive by apache.

the class HiveIntersectRewriteRule method onMatch.

// ~ Methods ----------------------------------------------------------------
public void onMatch(RelOptRuleCall call) {
    final HiveIntersect hiveIntersect = call.rel(0);
    final RelOptCluster cluster = hiveIntersect.getCluster();
    final RexBuilder rexBuilder = cluster.getRexBuilder();
    int numOfBranch = hiveIntersect.getInputs().size();
    Builder<RelNode> bldr = new ImmutableList.Builder<RelNode>();
    // 1st level GB: create a GB (col0, col1, count(1) as c) for each branch
    for (int index = 0; index < numOfBranch; index++) {
        RelNode input = hiveIntersect.getInputs().get(index);
        final List<RexNode> gbChildProjLst = Lists.newArrayList();
        final List<Integer> groupSetPositions = Lists.newArrayList();
        for (int cInd = 0; cInd < input.getRowType().getFieldList().size(); cInd++) {
            gbChildProjLst.add(rexBuilder.makeInputRef(input, cInd));
            groupSetPositions.add(cInd);
        }
        gbChildProjLst.add(rexBuilder.makeBigintLiteral(new BigDecimal(1)));
        // create the project before GB because we need a new project with extra column '1'.
        RelNode gbInputRel = null;
        try {
            gbInputRel = HiveProject.create(input, gbChildProjLst, null);
        } catch (CalciteSemanticException e) {
            LOG.debug(e.toString());
            throw new RuntimeException(e);
        }
        // groupSetPosition includes all the positions
        final ImmutableBitSet groupSet = ImmutableBitSet.of(groupSetPositions);
        List<AggregateCall> aggregateCalls = Lists.newArrayList();
        RelDataType aggFnRetType = TypeConverter.convert(TypeInfoFactory.longTypeInfo, cluster.getTypeFactory());
        // count(1), 1's position is input.getRowType().getFieldList().size()
        AggregateCall aggregateCall = HiveCalciteUtil.createSingleArgAggCall("count", cluster, TypeInfoFactory.longTypeInfo, input.getRowType().getFieldList().size(), aggFnRetType);
        aggregateCalls.add(aggregateCall);
        HiveRelNode aggregateRel = new HiveAggregate(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), gbInputRel, groupSet, null, aggregateCalls);
        bldr.add(aggregateRel);
    }
    // create a union above all the branches
    HiveRelNode union = new HiveUnion(cluster, TraitsUtil.getDefaultTraitSet(cluster), bldr.build());
    // 2nd level GB: create a GB (col0, col1, count(c)) for each branch
    final List<Integer> groupSetPositions = Lists.newArrayList();
    // the index of c
    int cInd = union.getRowType().getFieldList().size() - 1;
    for (int index = 0; index < union.getRowType().getFieldList().size(); index++) {
        if (index != cInd) {
            groupSetPositions.add(index);
        }
    }
    List<AggregateCall> aggregateCalls = Lists.newArrayList();
    RelDataType aggFnRetType = TypeConverter.convert(TypeInfoFactory.longTypeInfo, cluster.getTypeFactory());
    AggregateCall aggregateCall = HiveCalciteUtil.createSingleArgAggCall("count", cluster, TypeInfoFactory.longTypeInfo, cInd, aggFnRetType);
    aggregateCalls.add(aggregateCall);
    if (hiveIntersect.all) {
        aggregateCall = HiveCalciteUtil.createSingleArgAggCall("min", cluster, TypeInfoFactory.longTypeInfo, cInd, aggFnRetType);
        aggregateCalls.add(aggregateCall);
    }
    final ImmutableBitSet groupSet = ImmutableBitSet.of(groupSetPositions);
    HiveRelNode aggregateRel = new HiveAggregate(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), union, groupSet, null, aggregateCalls);
    // add a filter count(c) = #branches
    int countInd = cInd;
    List<RexNode> childRexNodeLst = new ArrayList<RexNode>();
    RexInputRef ref = rexBuilder.makeInputRef(aggregateRel, countInd);
    RexLiteral literal = rexBuilder.makeBigintLiteral(new BigDecimal(numOfBranch));
    childRexNodeLst.add(ref);
    childRexNodeLst.add(literal);
    ImmutableList.Builder<RelDataType> calciteArgTypesBldr = new ImmutableList.Builder<RelDataType>();
    calciteArgTypesBldr.add(TypeConverter.convert(TypeInfoFactory.longTypeInfo, cluster.getTypeFactory()));
    calciteArgTypesBldr.add(TypeConverter.convert(TypeInfoFactory.longTypeInfo, cluster.getTypeFactory()));
    RexNode factoredFilterExpr = null;
    try {
        factoredFilterExpr = rexBuilder.makeCall(SqlFunctionConverter.getCalciteFn("=", calciteArgTypesBldr.build(), TypeConverter.convert(TypeInfoFactory.longTypeInfo, cluster.getTypeFactory()), true, false), childRexNodeLst);
    } catch (CalciteSemanticException e) {
        LOG.debug(e.toString());
        throw new RuntimeException(e);
    }
    RelNode filterRel = new HiveFilter(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), aggregateRel, factoredFilterExpr);
    if (!hiveIntersect.all) {
        // the schema for intersect distinct is like this
        // R3 on all attributes + count(c) as cnt
        // finally add a project to project out the last column
        Set<Integer> projectOutColumnPositions = new HashSet<>();
        projectOutColumnPositions.add(filterRel.getRowType().getFieldList().size() - 1);
        try {
            call.transformTo(HiveCalciteUtil.createProjectWithoutColumn(filterRel, projectOutColumnPositions));
        } catch (CalciteSemanticException e) {
            LOG.debug(e.toString());
            throw new RuntimeException(e);
        }
    } else {
        // the schema for intersect all is like this
        // R3 + count(c) as cnt + min(c) as m
        // we create a input project for udtf whose schema is like this
        // min(c) as m + R3
        List<RexNode> originalInputRefs = Lists.transform(filterRel.getRowType().getFieldList(), new Function<RelDataTypeField, RexNode>() {

            @Override
            public RexNode apply(RelDataTypeField input) {
                return new RexInputRef(input.getIndex(), input.getType());
            }
        });
        List<RexNode> copyInputRefs = new ArrayList<>();
        copyInputRefs.add(originalInputRefs.get(originalInputRefs.size() - 1));
        for (int i = 0; i < originalInputRefs.size() - 2; i++) {
            copyInputRefs.add(originalInputRefs.get(i));
        }
        RelNode srcRel = null;
        try {
            srcRel = HiveProject.create(filterRel, copyInputRefs, null);
            HiveTableFunctionScan udtf = HiveCalciteUtil.createUDTFForSetOp(cluster, srcRel);
            // finally add a project to project out the 1st column
            Set<Integer> projectOutColumnPositions = new HashSet<>();
            projectOutColumnPositions.add(0);
            call.transformTo(HiveCalciteUtil.createProjectWithoutColumn(udtf, projectOutColumnPositions));
        } catch (SemanticException e) {
            LOG.debug(e.toString());
            throw new RuntimeException(e);
        }
    }
}
Also used : RelOptCluster(org.apache.calcite.plan.RelOptCluster) RexLiteral(org.apache.calcite.rex.RexLiteral) ImmutableBitSet(org.apache.calcite.util.ImmutableBitSet) ImmutableList(com.google.common.collect.ImmutableList) RexBuilder(org.apache.calcite.rex.RexBuilder) Builder(com.google.common.collect.ImmutableList.Builder) HiveTableFunctionScan(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableFunctionScan) HiveRelNode(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveRelNode) ArrayList(java.util.ArrayList) RelDataType(org.apache.calcite.rel.type.RelDataType) RexBuilder(org.apache.calcite.rex.RexBuilder) CalciteSemanticException(org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException) HashSet(java.util.HashSet) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) CalciteSemanticException(org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException) HiveIntersect(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveIntersect) HiveUnion(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveUnion) BigDecimal(java.math.BigDecimal) HiveFilter(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFilter) AggregateCall(org.apache.calcite.rel.core.AggregateCall) HiveAggregate(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveAggregate) RelDataTypeField(org.apache.calcite.rel.type.RelDataTypeField) HiveRelNode(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveRelNode) RelNode(org.apache.calcite.rel.RelNode) RexInputRef(org.apache.calcite.rex.RexInputRef) RexNode(org.apache.calcite.rex.RexNode)

Aggregations

RelOptCluster (org.apache.calcite.plan.RelOptCluster)117 RelNode (org.apache.calcite.rel.RelNode)63 RelTraitSet (org.apache.calcite.plan.RelTraitSet)36 RexBuilder (org.apache.calcite.rex.RexBuilder)35 RexNode (org.apache.calcite.rex.RexNode)31 ArrayList (java.util.ArrayList)26 RelDataType (org.apache.calcite.rel.type.RelDataType)23 Test (org.junit.Test)21 ImmutableBitSet (org.apache.calcite.util.ImmutableBitSet)15 List (java.util.List)13 RelDataTypeField (org.apache.calcite.rel.type.RelDataTypeField)13 RelBuilder (org.apache.calcite.tools.RelBuilder)13 RelCollation (org.apache.calcite.rel.RelCollation)12 RelMetadataQuery (org.apache.calcite.rel.metadata.RelMetadataQuery)11 RelOptTable (org.apache.calcite.plan.RelOptTable)10 ImmutableList (com.google.common.collect.ImmutableList)9 HashMap (java.util.HashMap)9 RelOptPlanner (org.apache.calcite.plan.RelOptPlanner)9 Join (org.apache.calcite.rel.core.Join)9 LogicalJoin (org.apache.calcite.rel.logical.LogicalJoin)9