Search in sources :

Example 56 with Aggregate

use of org.apache.calcite.rel.core.Aggregate in project drill by apache.

the class DrillReduceAggregatesRule method reduceAggs.

/*
  private boolean isMatch(AggregateCall call) {
    if (call.getAggregation() instanceof SqlAvgAggFunction) {
      final SqlAvgAggFunction.Subtype subtype =
          ((SqlAvgAggFunction) call.getAggregation()).getSubtype();
      return (subtype == SqlAvgAggFunction.Subtype.AVG);
    }
    return false;
  }
 */
/**
 * Reduces all calls to AVG, STDDEV_POP, STDDEV_SAMP, VAR_POP, VAR_SAMP in
 * the aggregates list to.
 *
 * <p>It handles newly generated common subexpressions since this was done
 * at the sql2rel stage.
 */
private void reduceAggs(RelOptRuleCall ruleCall, Aggregate oldAggRel) {
    RexBuilder rexBuilder = oldAggRel.getCluster().getRexBuilder();
    List<AggregateCall> oldCalls = oldAggRel.getAggCallList();
    final int nGroups = oldAggRel.getGroupCount();
    List<AggregateCall> newCalls = new ArrayList<>();
    Map<AggregateCall, RexNode> aggCallMapping = new HashMap<>();
    List<RexNode> projList = new ArrayList<>();
    // pass through group key
    for (int i = 0; i < nGroups; ++i) {
        projList.add(rexBuilder.makeInputRef(getFieldType(oldAggRel, i), i));
    }
    // List of input expressions. If a particular aggregate needs more, it
    // will add an expression to the end, and we will create an extra
    // project.
    RelNode input = oldAggRel.getInput();
    List<RexNode> inputExprs = new ArrayList<>();
    for (RelDataTypeField field : input.getRowType().getFieldList()) {
        inputExprs.add(rexBuilder.makeInputRef(field.getType(), inputExprs.size()));
    }
    // create new agg function calls and rest of project list together
    for (AggregateCall oldCall : oldCalls) {
        projList.add(reduceAgg(oldAggRel, oldCall, newCalls, aggCallMapping, inputExprs));
    }
    final int extraArgCount = inputExprs.size() - input.getRowType().getFieldCount();
    if (extraArgCount > 0) {
        input = relBuilderFactory.create(input.getCluster(), null).push(input).projectNamed(inputExprs, CompositeList.of(input.getRowType().getFieldNames(), Collections.nCopies(extraArgCount, null)), true).build();
    }
    Aggregate newAggRel = newAggregateRel(oldAggRel, input, newCalls);
    RelNode projectRel = relBuilderFactory.create(newAggRel.getCluster(), null).push(newAggRel).projectNamed(projList, oldAggRel.getRowType().getFieldNames(), true).build();
    ruleCall.transformTo(projectRel);
}
Also used : AggregateCall(org.apache.calcite.rel.core.AggregateCall) RelDataTypeField(org.apache.calcite.rel.type.RelDataTypeField) RelNode(org.apache.calcite.rel.RelNode) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) RexBuilder(org.apache.calcite.rex.RexBuilder) Aggregate(org.apache.calcite.rel.core.Aggregate) LogicalAggregate(org.apache.calcite.rel.logical.LogicalAggregate) RexNode(org.apache.calcite.rex.RexNode)

Example 57 with Aggregate

use of org.apache.calcite.rel.core.Aggregate in project flink by apache.

the class HiveParserCalcitePlanner method genGBHavingLogicalPlan.

private RelNode genGBHavingLogicalPlan(HiveParserQB qb, RelNode srcRel) throws SemanticException {
    RelNode gbFilter = null;
    HiveParserQBParseInfo qbp = qb.getParseInfo();
    String destClauseName = qbp.getClauseNames().iterator().next();
    HiveParserASTNode havingClause = qbp.getHavingForClause(qbp.getClauseNames().iterator().next());
    if (havingClause != null) {
        if (!(srcRel instanceof Aggregate)) {
            // ill-formed query like select * from t1 having c1 > 0;
            throw new SemanticException("Having clause without any group-by.");
        }
        HiveParserASTNode targetNode = (HiveParserASTNode) havingClause.getChild(0);
        validateNoHavingReferenceToAlias(qb, targetNode, relToRowResolver.get(srcRel), semanticAnalyzer);
        if (!qbp.getDestToGroupBy().isEmpty()) {
            final boolean cubeRollupGrpSetPresent = (!qbp.getDestRollups().isEmpty() || !qbp.getDestGroupingSets().isEmpty() || !qbp.getDestCubes().isEmpty());
            // Special handling of grouping function
            targetNode = rewriteGroupingFunctionAST(getGroupByForClause(qbp, destClauseName), targetNode, !cubeRollupGrpSetPresent);
        }
        gbFilter = genFilterRelNode(qb, targetNode, srcRel, null, null, true);
    }
    return gbFilter;
}
Also used : HiveParserQBParseInfo(org.apache.flink.table.planner.delegation.hive.copy.HiveParserQBParseInfo) HiveParserASTNode(org.apache.flink.table.planner.delegation.hive.copy.HiveParserASTNode) RelNode(org.apache.calcite.rel.RelNode) Aggregate(org.apache.calcite.rel.core.Aggregate) LogicalAggregate(org.apache.calcite.rel.logical.LogicalAggregate) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException)

Example 58 with Aggregate

use of org.apache.calcite.rel.core.Aggregate in project flink by apache.

the class HiveParserCalcitePlanner method genSelectLogicalPlan.

// NOTE: there can only be one select clause since we don't handle multi destination insert.
private RelNode genSelectLogicalPlan(HiveParserQB qb, RelNode srcRel, RelNode starSrcRel, Map<String, Integer> outerNameToPos, HiveParserRowResolver outerRR) throws SemanticException {
    // 0. Generate a Select Node for Windowing
    // Exclude the newly-generated select columns from */etc. resolution.
    HashSet<ColumnInfo> excludedColumns = new HashSet<>();
    RelNode selForWindow = genSelectForWindowing(qb, srcRel, excludedColumns);
    srcRel = (selForWindow == null) ? srcRel : selForWindow;
    ArrayList<ExprNodeDesc> exprNodeDescs = new ArrayList<>();
    // 1. Get Select Expression List
    HiveParserQBParseInfo qbp = qb.getParseInfo();
    String selClauseName = qbp.getClauseNames().iterator().next();
    HiveParserASTNode selExprList = qbp.getSelForClause(selClauseName);
    // make sure if there is subquery it is top level expression
    HiveParserSubQueryUtils.checkForTopLevelSubqueries(selExprList);
    final boolean cubeRollupGrpSetPresent = !qbp.getDestRollups().isEmpty() || !qbp.getDestGroupingSets().isEmpty() || !qbp.getDestCubes().isEmpty();
    // 3. Query Hints
    int posn = 0;
    boolean hintPresent = selExprList.getChild(0).getType() == HiveASTParser.QUERY_HINT;
    if (hintPresent) {
        posn++;
    }
    // 4. Bailout if select involves Transform
    boolean isInTransform = selExprList.getChild(posn).getChild(0).getType() == HiveASTParser.TOK_TRANSFORM;
    if (isInTransform) {
        String msg = "SELECT TRANSFORM is currently not supported in CBO, turn off cbo to use TRANSFORM.";
        throw new SemanticException(msg);
    }
    // 2.Row resolvers for input, output
    HiveParserRowResolver outRR = new HiveParserRowResolver();
    Integer pos = 0;
    // TODO: will this also fix windowing? try
    HiveParserRowResolver inputRR = relToRowResolver.get(srcRel), starRR = inputRR;
    if (starSrcRel != null) {
        starRR = relToRowResolver.get(starSrcRel);
    }
    // 5. Check if select involves UDTF
    String udtfTableAlias = null;
    SqlOperator udtfOperator = null;
    String genericUDTFName = null;
    ArrayList<String> udtfColAliases = new ArrayList<>();
    HiveParserASTNode expr = (HiveParserASTNode) selExprList.getChild(posn).getChild(0);
    int exprType = expr.getType();
    if (exprType == HiveASTParser.TOK_FUNCTION || exprType == HiveASTParser.TOK_FUNCTIONSTAR) {
        String funcName = HiveParserTypeCheckProcFactory.DefaultExprProcessor.getFunctionText(expr, true);
        // we can't just try to get table function here because the operator table throws
        // exception if it's not a table function
        SqlOperator sqlOperator = HiveParserUtils.getAnySqlOperator(funcName, frameworkConfig.getOperatorTable());
        if (HiveParserUtils.isUDTF(sqlOperator)) {
            LOG.debug("Found UDTF " + funcName);
            udtfOperator = sqlOperator;
            genericUDTFName = funcName;
            if (!HiveParserUtils.isNative(sqlOperator)) {
                semanticAnalyzer.unparseTranslator.addIdentifierTranslation((HiveParserASTNode) expr.getChild(0));
            }
            if (exprType == HiveASTParser.TOK_FUNCTIONSTAR) {
                semanticAnalyzer.genColListRegex(".*", null, (HiveParserASTNode) expr.getChild(0), exprNodeDescs, null, inputRR, starRR, pos, outRR, qb.getAliases(), false);
            }
        }
    }
    if (udtfOperator != null) {
        // Only support a single expression when it's a UDTF
        if (selExprList.getChildCount() > 1) {
            throw new SemanticException(generateErrorMessage((HiveParserASTNode) selExprList.getChild(1), ErrorMsg.UDTF_MULTIPLE_EXPR.getMsg()));
        }
        HiveParserASTNode selExpr = (HiveParserASTNode) selExprList.getChild(posn);
        // column names also can be inferred from result of UDTF
        for (int i = 1; i < selExpr.getChildCount(); i++) {
            HiveParserASTNode selExprChild = (HiveParserASTNode) selExpr.getChild(i);
            switch(selExprChild.getType()) {
                case HiveASTParser.Identifier:
                    udtfColAliases.add(unescapeIdentifier(selExprChild.getText().toLowerCase()));
                    semanticAnalyzer.unparseTranslator.addIdentifierTranslation(selExprChild);
                    break;
                case HiveASTParser.TOK_TABALIAS:
                    assert (selExprChild.getChildCount() == 1);
                    udtfTableAlias = unescapeIdentifier(selExprChild.getChild(0).getText());
                    qb.addAlias(udtfTableAlias);
                    semanticAnalyzer.unparseTranslator.addIdentifierTranslation((HiveParserASTNode) selExprChild.getChild(0));
                    break;
                default:
                    throw new SemanticException("Find invalid token type " + selExprChild.getType() + " in UDTF.");
            }
        }
        LOG.debug("UDTF table alias is " + udtfTableAlias);
        LOG.debug("UDTF col aliases are " + udtfColAliases);
    }
    // 6. Iterate over all expression (after SELECT)
    HiveParserASTNode exprList;
    if (udtfOperator != null) {
        exprList = expr;
    } else {
        exprList = selExprList;
    }
    // For UDTF's, skip the function name to get the expressions
    int startPos = udtfOperator != null ? posn + 1 : posn;
    // track the col aliases provided by user
    List<String> colAliases = new ArrayList<>();
    for (int i = startPos; i < exprList.getChildCount(); ++i) {
        colAliases.add(null);
        // 6.1 child can be EXPR AS ALIAS, or EXPR.
        HiveParserASTNode child = (HiveParserASTNode) exprList.getChild(i);
        boolean hasAsClause = child.getChildCount() == 2;
        // slightly different.
        if (udtfOperator == null && child.getChildCount() > 2) {
            throw new SemanticException(generateErrorMessage((HiveParserASTNode) child.getChild(2), ErrorMsg.INVALID_AS.getMsg()));
        }
        String tabAlias;
        String colAlias;
        if (udtfOperator != null) {
            tabAlias = null;
            colAlias = semanticAnalyzer.getAutogenColAliasPrfxLbl() + i;
            expr = child;
        } else {
            // 6.3 Get rid of TOK_SELEXPR
            expr = (HiveParserASTNode) child.getChild(0);
            String[] colRef = HiveParserUtils.getColAlias(child, semanticAnalyzer.getAutogenColAliasPrfxLbl(), inputRR, semanticAnalyzer.autogenColAliasPrfxIncludeFuncName(), i);
            tabAlias = colRef[0];
            colAlias = colRef[1];
            if (hasAsClause) {
                colAliases.set(colAliases.size() - 1, colAlias);
                semanticAnalyzer.unparseTranslator.addIdentifierTranslation((HiveParserASTNode) child.getChild(1));
            }
        }
        Map<HiveParserASTNode, RelNode> subQueryToRelNode = new HashMap<>();
        boolean isSubQuery = genSubQueryRelNode(qb, expr, srcRel, false, subQueryToRelNode);
        if (isSubQuery) {
            ExprNodeDesc subQueryDesc = semanticAnalyzer.genExprNodeDesc(expr, relToRowResolver.get(srcRel), outerRR, subQueryToRelNode, false);
            exprNodeDescs.add(subQueryDesc);
            ColumnInfo colInfo = new ColumnInfo(getColumnInternalName(pos), subQueryDesc.getWritableObjectInspector(), tabAlias, false);
            if (!outRR.putWithCheck(tabAlias, colAlias, null, colInfo)) {
                throw new SemanticException("Cannot add column to RR: " + tabAlias + "." + colAlias + " => " + colInfo + " due to duplication, see previous warnings");
            }
        } else {
            // 6.4 Build ExprNode corresponding to columns
            if (expr.getType() == HiveASTParser.TOK_ALLCOLREF) {
                pos = semanticAnalyzer.genColListRegex(".*", expr.getChildCount() == 0 ? null : HiveParserBaseSemanticAnalyzer.getUnescapedName((HiveParserASTNode) expr.getChild(0)).toLowerCase(), expr, exprNodeDescs, excludedColumns, inputRR, starRR, pos, outRR, qb.getAliases(), false);
            } else if (expr.getType() == HiveASTParser.TOK_TABLE_OR_COL && !hasAsClause && !inputRR.getIsExprResolver() && HiveParserUtils.isRegex(unescapeIdentifier(expr.getChild(0).getText()), semanticAnalyzer.getConf())) {
                // In case the expression is a regex COL. This can only happen without AS clause
                // We don't allow this for ExprResolver - the Group By case
                pos = semanticAnalyzer.genColListRegex(unescapeIdentifier(expr.getChild(0).getText()), null, expr, exprNodeDescs, excludedColumns, inputRR, starRR, pos, outRR, qb.getAliases(), true);
            } else if (expr.getType() == HiveASTParser.DOT && expr.getChild(0).getType() == HiveASTParser.TOK_TABLE_OR_COL && inputRR.hasTableAlias(unescapeIdentifier(expr.getChild(0).getChild(0).getText().toLowerCase())) && !hasAsClause && !inputRR.getIsExprResolver() && HiveParserUtils.isRegex(unescapeIdentifier(expr.getChild(1).getText()), semanticAnalyzer.getConf())) {
                // In case the expression is TABLE.COL (col can be regex). This can only happen
                // without AS clause
                // We don't allow this for ExprResolver - the Group By case
                pos = semanticAnalyzer.genColListRegex(unescapeIdentifier(expr.getChild(1).getText()), unescapeIdentifier(expr.getChild(0).getChild(0).getText().toLowerCase()), expr, exprNodeDescs, excludedColumns, inputRR, starRR, pos, outRR, qb.getAliases(), false);
            } else if (HiveASTParseUtils.containsTokenOfType(expr, HiveASTParser.TOK_FUNCTIONDI) && !(srcRel instanceof Aggregate)) {
                // Likely a malformed query eg, select hash(distinct c1) from t1;
                throw new SemanticException("Distinct without an aggregation.");
            } else {
                // Case when this is an expression
                HiveParserTypeCheckCtx typeCheckCtx = new HiveParserTypeCheckCtx(inputRR, frameworkConfig, cluster);
                // We allow stateful functions in the SELECT list (but nowhere else)
                typeCheckCtx.setAllowStatefulFunctions(true);
                if (!qbp.getDestToGroupBy().isEmpty()) {
                    // Special handling of grouping function
                    expr = rewriteGroupingFunctionAST(getGroupByForClause(qbp, selClauseName), expr, !cubeRollupGrpSetPresent);
                }
                ExprNodeDesc exprDesc = semanticAnalyzer.genExprNodeDesc(expr, inputRR, typeCheckCtx);
                String recommended = semanticAnalyzer.recommendName(exprDesc, colAlias);
                if (recommended != null && outRR.get(null, recommended) == null) {
                    colAlias = recommended;
                }
                exprNodeDescs.add(exprDesc);
                ColumnInfo colInfo = new ColumnInfo(getColumnInternalName(pos), exprDesc.getWritableObjectInspector(), tabAlias, false);
                colInfo.setSkewedCol(exprDesc instanceof ExprNodeColumnDesc && ((ExprNodeColumnDesc) exprDesc).isSkewedCol());
                // Hive errors out in case of duplication. We allow it and see what happens.
                outRR.put(tabAlias, colAlias, colInfo);
                if (exprDesc instanceof ExprNodeColumnDesc) {
                    ExprNodeColumnDesc colExp = (ExprNodeColumnDesc) exprDesc;
                    String[] altMapping = inputRR.getAlternateMappings(colExp.getColumn());
                    if (altMapping != null) {
                        // TODO: this can overwrite the mapping. Should this be allowed?
                        outRR.put(altMapping[0], altMapping[1], colInfo);
                    }
                }
                pos++;
            }
        }
    }
    // 7. Convert Hive projections to Calcite
    List<RexNode> calciteColLst = new ArrayList<>();
    HiveParserRexNodeConverter rexNodeConverter = new HiveParserRexNodeConverter(cluster, srcRel.getRowType(), outerNameToPos, buildHiveColNameToInputPosMap(exprNodeDescs, inputRR), relToRowResolver.get(srcRel), outerRR, 0, false, subqueryId, funcConverter);
    for (ExprNodeDesc colExpr : exprNodeDescs) {
        RexNode calciteCol = rexNodeConverter.convert(colExpr);
        calciteCol = convertNullLiteral(calciteCol).accept(funcConverter);
        calciteColLst.add(calciteCol);
    }
    // 8. Build Calcite Rel
    RelNode res;
    if (udtfOperator != null) {
        // The basic idea for CBO support of UDTF is to treat UDTF as a special project.
        res = genUDTFPlan(udtfOperator, genericUDTFName, udtfTableAlias, udtfColAliases, qb, calciteColLst, outRR.getColumnInfos(), srcRel, true, false);
    } else {
        // and thus introduces unnecessary agg node.
        if (HiveParserUtils.isIdentityProject(srcRel, calciteColLst, colAliases) && outerRR != null) {
            res = srcRel;
        } else {
            res = genSelectRelNode(calciteColLst, outRR, srcRel);
        }
    }
    // 9. Handle select distinct as GBY if there exist windowing functions
    if (selForWindow != null && selExprList.getToken().getType() == HiveASTParser.TOK_SELECTDI) {
        ImmutableBitSet groupSet = ImmutableBitSet.range(res.getRowType().getFieldList().size());
        res = LogicalAggregate.create(res, groupSet, Collections.emptyList(), Collections.emptyList());
        HiveParserRowResolver groupByOutputRowResolver = new HiveParserRowResolver();
        for (int i = 0; i < outRR.getColumnInfos().size(); i++) {
            ColumnInfo colInfo = outRR.getColumnInfos().get(i);
            ColumnInfo newColInfo = new ColumnInfo(colInfo.getInternalName(), colInfo.getType(), colInfo.getTabAlias(), colInfo.getIsVirtualCol());
            groupByOutputRowResolver.put(colInfo.getTabAlias(), colInfo.getAlias(), newColInfo);
        }
        relToHiveColNameCalcitePosMap.put(res, buildHiveToCalciteColumnMap(groupByOutputRowResolver));
        relToRowResolver.put(res, groupByOutputRowResolver);
    }
    return res;
}
Also used : ImmutableBitSet(org.apache.calcite.util.ImmutableBitSet) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) SqlOperator(org.apache.calcite.sql.SqlOperator) ArrayList(java.util.ArrayList) ColumnInfo(org.apache.hadoop.hive.ql.exec.ColumnInfo) HiveParserRowResolver(org.apache.flink.table.planner.delegation.hive.copy.HiveParserRowResolver) ExprNodeColumnDesc(org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc) ExprNodeDesc(org.apache.hadoop.hive.ql.plan.ExprNodeDesc) HashSet(java.util.HashSet) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) HiveParserASTNode(org.apache.flink.table.planner.delegation.hive.copy.HiveParserASTNode) HiveParserQBParseInfo(org.apache.flink.table.planner.delegation.hive.copy.HiveParserQBParseInfo) RelNode(org.apache.calcite.rel.RelNode) Aggregate(org.apache.calcite.rel.core.Aggregate) LogicalAggregate(org.apache.calcite.rel.logical.LogicalAggregate) HiveParserTypeCheckCtx(org.apache.flink.table.planner.delegation.hive.copy.HiveParserTypeCheckCtx) RexNode(org.apache.calcite.rex.RexNode)

Example 59 with Aggregate

use of org.apache.calcite.rel.core.Aggregate in project flink by apache.

the class FlinkAggregateJoinTransposeRule method onMatch.

public void onMatch(RelOptRuleCall call) {
    final Aggregate origAgg = call.rel(0);
    final Join join = call.rel(1);
    final RexBuilder rexBuilder = origAgg.getCluster().getRexBuilder();
    final RelBuilder relBuilder = call.builder();
    // converts an aggregate with AUXILIARY_GROUP to a regular aggregate.
    // if the converted aggregate can be push down,
    // AggregateReduceGroupingRule will try reduce grouping of new aggregates created by this
    // rule
    final Pair<Aggregate, List<RexNode>> newAggAndProject = toRegularAggregate(origAgg);
    final Aggregate aggregate = newAggAndProject.left;
    final List<RexNode> projectAfterAgg = newAggAndProject.right;
    // If any aggregate call has a filter or distinct, bail out
    for (AggregateCall aggregateCall : aggregate.getAggCallList()) {
        if (aggregateCall.getAggregation().unwrap(SqlSplittableAggFunction.class) == null) {
            return;
        }
        if (aggregateCall.filterArg >= 0 || aggregateCall.isDistinct()) {
            return;
        }
    }
    if (join.getJoinType() != JoinRelType.INNER) {
        return;
    }
    if (!allowFunctions && !aggregate.getAggCallList().isEmpty()) {
        return;
    }
    // Do the columns used by the join appear in the output of the aggregate?
    final ImmutableBitSet aggregateColumns = aggregate.getGroupSet();
    final RelMetadataQuery mq = call.getMetadataQuery();
    final ImmutableBitSet keyColumns = keyColumns(aggregateColumns, mq.getPulledUpPredicates(join).pulledUpPredicates);
    final ImmutableBitSet joinColumns = RelOptUtil.InputFinder.bits(join.getCondition());
    final boolean allColumnsInAggregate = keyColumns.contains(joinColumns);
    final ImmutableBitSet belowAggregateColumns = aggregateColumns.union(joinColumns);
    // Split join condition
    final List<Integer> leftKeys = com.google.common.collect.Lists.newArrayList();
    final List<Integer> rightKeys = com.google.common.collect.Lists.newArrayList();
    final List<Boolean> filterNulls = com.google.common.collect.Lists.newArrayList();
    RexNode nonEquiConj = RelOptUtil.splitJoinCondition(join.getLeft(), join.getRight(), join.getCondition(), leftKeys, rightKeys, filterNulls);
    // If it contains non-equi join conditions, we bail out
    if (!nonEquiConj.isAlwaysTrue()) {
        return;
    }
    // Push each aggregate function down to each side that contains all of its
    // arguments. Note that COUNT(*), because it has no arguments, can go to
    // both sides.
    final Map<Integer, Integer> map = new HashMap<>();
    final List<Side> sides = new ArrayList<>();
    int uniqueCount = 0;
    int offset = 0;
    int belowOffset = 0;
    for (int s = 0; s < 2; s++) {
        final Side side = new Side();
        final RelNode joinInput = join.getInput(s);
        int fieldCount = joinInput.getRowType().getFieldCount();
        final ImmutableBitSet fieldSet = ImmutableBitSet.range(offset, offset + fieldCount);
        final ImmutableBitSet belowAggregateKeyNotShifted = belowAggregateColumns.intersect(fieldSet);
        for (Ord<Integer> c : Ord.zip(belowAggregateKeyNotShifted)) {
            map.put(c.e, belowOffset + c.i);
        }
        final Mappings.TargetMapping mapping = s == 0 ? Mappings.createIdentity(fieldCount) : Mappings.createShiftMapping(fieldCount + offset, 0, offset, fieldCount);
        final ImmutableBitSet belowAggregateKey = belowAggregateKeyNotShifted.shift(-offset);
        final boolean unique;
        if (!allowFunctions) {
            assert aggregate.getAggCallList().isEmpty();
            // If there are no functions, it doesn't matter as much whether we
            // aggregate the inputs before the join, because there will not be
            // any functions experiencing a cartesian product effect.
            // 
            // But finding out whether the input is already unique requires a call
            // to areColumnsUnique that currently (until [CALCITE-1048] "Make
            // metadata more robust" is fixed) places a heavy load on
            // the metadata system.
            // 
            // So we choose to imagine the input is already unique, which is
            // untrue but harmless.
            // 
            Util.discard(Bug.CALCITE_1048_FIXED);
            unique = true;
        } else {
            final Boolean unique0 = mq.areColumnsUnique(joinInput, belowAggregateKey);
            unique = unique0 != null && unique0;
        }
        if (unique) {
            ++uniqueCount;
            side.aggregate = false;
            relBuilder.push(joinInput);
            final Map<Integer, Integer> belowAggregateKeyToNewProjectMap = new HashMap<>();
            final List<RexNode> projects = new ArrayList<>();
            for (Integer i : belowAggregateKey) {
                belowAggregateKeyToNewProjectMap.put(i, projects.size());
                projects.add(relBuilder.field(i));
            }
            for (Ord<AggregateCall> aggCall : Ord.zip(aggregate.getAggCallList())) {
                final SqlAggFunction aggregation = aggCall.e.getAggregation();
                final SqlSplittableAggFunction splitter = Preconditions.checkNotNull(aggregation.unwrap(SqlSplittableAggFunction.class));
                if (!aggCall.e.getArgList().isEmpty() && fieldSet.contains(ImmutableBitSet.of(aggCall.e.getArgList()))) {
                    final RexNode singleton = splitter.singleton(rexBuilder, joinInput.getRowType(), aggCall.e.transform(mapping));
                    final RexNode targetSingleton = rexBuilder.ensureType(aggCall.e.type, singleton, false);
                    if (targetSingleton instanceof RexInputRef) {
                        final int index = ((RexInputRef) targetSingleton).getIndex();
                        if (!belowAggregateKey.get(index)) {
                            projects.add(targetSingleton);
                            side.split.put(aggCall.i, projects.size() - 1);
                        } else {
                            side.split.put(aggCall.i, belowAggregateKeyToNewProjectMap.get(index));
                        }
                    } else {
                        projects.add(targetSingleton);
                        side.split.put(aggCall.i, projects.size() - 1);
                    }
                }
            }
            relBuilder.project(projects);
            side.newInput = relBuilder.build();
        } else {
            side.aggregate = true;
            List<AggregateCall> belowAggCalls = new ArrayList<>();
            final SqlSplittableAggFunction.Registry<AggregateCall> belowAggCallRegistry = registry(belowAggCalls);
            final int oldGroupKeyCount = aggregate.getGroupCount();
            final int newGroupKeyCount = belowAggregateKey.cardinality();
            for (Ord<AggregateCall> aggCall : Ord.zip(aggregate.getAggCallList())) {
                final SqlAggFunction aggregation = aggCall.e.getAggregation();
                final SqlSplittableAggFunction splitter = Preconditions.checkNotNull(aggregation.unwrap(SqlSplittableAggFunction.class));
                final AggregateCall call1;
                if (fieldSet.contains(ImmutableBitSet.of(aggCall.e.getArgList()))) {
                    final AggregateCall splitCall = splitter.split(aggCall.e, mapping);
                    call1 = splitCall.adaptTo(joinInput, splitCall.getArgList(), splitCall.filterArg, oldGroupKeyCount, newGroupKeyCount);
                } else {
                    call1 = splitter.other(rexBuilder.getTypeFactory(), aggCall.e);
                }
                if (call1 != null) {
                    side.split.put(aggCall.i, belowAggregateKey.cardinality() + belowAggCallRegistry.register(call1));
                }
            }
            side.newInput = relBuilder.push(joinInput).aggregate(relBuilder.groupKey(belowAggregateKey, null), belowAggCalls).build();
        }
        offset += fieldCount;
        belowOffset += side.newInput.getRowType().getFieldCount();
        sides.add(side);
    }
    if (uniqueCount == 2) {
        // invocation of this rule; if we continue we might loop forever.
        return;
    }
    // Update condition
    final Mapping mapping = (Mapping) Mappings.target(map::get, join.getRowType().getFieldCount(), belowOffset);
    final RexNode newCondition = RexUtil.apply(mapping, join.getCondition());
    // Create new join
    relBuilder.push(sides.get(0).newInput).push(sides.get(1).newInput).join(join.getJoinType(), newCondition);
    // Aggregate above to sum up the sub-totals
    final List<AggregateCall> newAggCalls = new ArrayList<>();
    final int groupIndicatorCount = aggregate.getGroupCount() + aggregate.getIndicatorCount();
    final int newLeftWidth = sides.get(0).newInput.getRowType().getFieldCount();
    final List<RexNode> projects = new ArrayList<>(rexBuilder.identityProjects(relBuilder.peek().getRowType()));
    for (Ord<AggregateCall> aggCall : Ord.zip(aggregate.getAggCallList())) {
        final SqlAggFunction aggregation = aggCall.e.getAggregation();
        final SqlSplittableAggFunction splitter = Preconditions.checkNotNull(aggregation.unwrap(SqlSplittableAggFunction.class));
        final Integer leftSubTotal = sides.get(0).split.get(aggCall.i);
        final Integer rightSubTotal = sides.get(1).split.get(aggCall.i);
        newAggCalls.add(splitter.topSplit(rexBuilder, registry(projects), groupIndicatorCount, relBuilder.peek().getRowType(), aggCall.e, leftSubTotal == null ? -1 : leftSubTotal, rightSubTotal == null ? -1 : rightSubTotal + newLeftWidth));
    }
    relBuilder.project(projects);
    boolean aggConvertedToProjects = false;
    if (allColumnsInAggregate) {
        // let's see if we can convert aggregate into projects
        List<RexNode> projects2 = new ArrayList<>();
        for (int key : Mappings.apply(mapping, aggregate.getGroupSet())) {
            projects2.add(relBuilder.field(key));
        }
        int aggCallIdx = projects2.size();
        for (AggregateCall newAggCall : newAggCalls) {
            final SqlSplittableAggFunction splitter = newAggCall.getAggregation().unwrap(SqlSplittableAggFunction.class);
            if (splitter != null) {
                final RelDataType rowType = relBuilder.peek().getRowType();
                final RexNode singleton = splitter.singleton(rexBuilder, rowType, newAggCall);
                final RelDataType originalAggCallType = aggregate.getRowType().getFieldList().get(aggCallIdx).getType();
                final RexNode targetSingleton = rexBuilder.ensureType(originalAggCallType, singleton, false);
                projects2.add(targetSingleton);
            }
            aggCallIdx += 1;
        }
        if (projects2.size() == aggregate.getGroupSet().cardinality() + newAggCalls.size()) {
            // We successfully converted agg calls into projects.
            relBuilder.project(projects2);
            aggConvertedToProjects = true;
        }
    }
    if (!aggConvertedToProjects) {
        relBuilder.aggregate(relBuilder.groupKey(Mappings.apply(mapping, aggregate.getGroupSet()), Mappings.apply2(mapping, aggregate.getGroupSets())), newAggCalls);
    }
    if (projectAfterAgg != null) {
        relBuilder.project(projectAfterAgg, origAgg.getRowType().getFieldNames());
    }
    call.transformTo(relBuilder.build());
}
Also used : RelMetadataQuery(org.apache.calcite.rel.metadata.RelMetadataQuery) ImmutableBitSet(org.apache.calcite.util.ImmutableBitSet) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) Mapping(org.apache.calcite.util.mapping.Mapping) RelDataType(org.apache.calcite.rel.type.RelDataType) RexBuilder(org.apache.calcite.rex.RexBuilder) ArrayList(java.util.ArrayList) List(java.util.List) SqlSplittableAggFunction(org.apache.calcite.sql.SqlSplittableAggFunction) RelBuilder(org.apache.calcite.tools.RelBuilder) Join(org.apache.calcite.rel.core.Join) LogicalJoin(org.apache.calcite.rel.logical.LogicalJoin) SqlAggFunction(org.apache.calcite.sql.SqlAggFunction) AggregateCall(org.apache.calcite.rel.core.AggregateCall) RelNode(org.apache.calcite.rel.RelNode) Mappings(org.apache.calcite.util.mapping.Mappings) RexInputRef(org.apache.calcite.rex.RexInputRef) Aggregate(org.apache.calcite.rel.core.Aggregate) LogicalAggregate(org.apache.calcite.rel.logical.LogicalAggregate) RexNode(org.apache.calcite.rex.RexNode)

Example 60 with Aggregate

use of org.apache.calcite.rel.core.Aggregate in project flink by apache.

the class FlinkAggregateExpandDistinctAggregatesRule method rewriteUsingGroupingSets.

private void rewriteUsingGroupingSets(RelOptRuleCall call, Aggregate aggregate) {
    final Set<ImmutableBitSet> groupSetTreeSet = new TreeSet<>(ImmutableBitSet.ORDERING);
    final Map<ImmutableBitSet, Integer> groupSetToDistinctAggCallFilterArg = new HashMap<>();
    for (AggregateCall aggCall : aggregate.getAggCallList()) {
        if (!aggCall.isDistinct()) {
            groupSetTreeSet.add(aggregate.getGroupSet());
        } else {
            ImmutableBitSet groupSet = ImmutableBitSet.of(aggCall.getArgList()).setIf(aggCall.filterArg, aggCall.filterArg >= 0).union(aggregate.getGroupSet());
            groupSetToDistinctAggCallFilterArg.put(groupSet, aggCall.filterArg);
            groupSetTreeSet.add(groupSet);
        }
    }
    final com.google.common.collect.ImmutableList<ImmutableBitSet> groupSets = com.google.common.collect.ImmutableList.copyOf(groupSetTreeSet);
    final ImmutableBitSet fullGroupSet = ImmutableBitSet.union(groupSets);
    final List<AggregateCall> distinctAggCalls = new ArrayList<>();
    for (Pair<AggregateCall, String> aggCall : aggregate.getNamedAggCalls()) {
        if (!aggCall.left.isDistinct()) {
            AggregateCall newAggCall = aggCall.left.adaptTo(aggregate.getInput(), aggCall.left.getArgList(), aggCall.left.filterArg, aggregate.getGroupCount(), fullGroupSet.cardinality());
            distinctAggCalls.add(newAggCall.rename(aggCall.right));
        }
    }
    final RelBuilder relBuilder = call.builder();
    relBuilder.push(aggregate.getInput());
    final int groupCount = fullGroupSet.cardinality();
    final Map<ImmutableBitSet, Integer> filters = new LinkedHashMap<>();
    final int z = groupCount + distinctAggCalls.size();
    distinctAggCalls.add(AggregateCall.create(SqlStdOperatorTable.GROUPING, false, false, false, ImmutableIntList.copyOf(fullGroupSet), -1, RelCollations.EMPTY, groupSets.size(), relBuilder.peek(), null, "$g"));
    for (Ord<ImmutableBitSet> groupSet : Ord.zip(groupSets)) {
        filters.put(groupSet.e, z + groupSet.i);
    }
    relBuilder.aggregate(relBuilder.groupKey(fullGroupSet, groupSets), distinctAggCalls);
    final RelNode distinct = relBuilder.peek();
    // values to BOOLEAN.
    if (!filters.isEmpty()) {
        final List<RexNode> nodes = new ArrayList<>(relBuilder.fields());
        final RexNode nodeZ = nodes.remove(nodes.size() - 1);
        for (Map.Entry<ImmutableBitSet, Integer> entry : filters.entrySet()) {
            final long v = groupValue(fullGroupSet, entry.getKey());
            // Get and remap the filterArg of the distinct aggregate call.
            int distinctAggCallFilterArg = remap(fullGroupSet, groupSetToDistinctAggCallFilterArg.getOrDefault(entry.getKey(), -1));
            RexNode expr;
            if (distinctAggCallFilterArg < 0) {
                expr = relBuilder.equals(nodeZ, relBuilder.literal(v));
            } else {
                RexBuilder rexBuilder = aggregate.getCluster().getRexBuilder();
                // merge the filter of the distinct aggregate call itself.
                expr = relBuilder.and(relBuilder.equals(nodeZ, relBuilder.literal(v)), rexBuilder.makeCall(SqlStdOperatorTable.IS_TRUE, relBuilder.field(distinctAggCallFilterArg)));
            }
            nodes.add(relBuilder.alias(expr, "$g_" + v));
        }
        relBuilder.project(nodes);
    }
    int aggCallIdx = 0;
    int x = groupCount;
    final List<AggregateCall> newCalls = new ArrayList<>();
    // TODO supports more aggCalls (currently only supports COUNT)
    // Some aggregate functions (e.g. COUNT) have the special property that they can return a
    // non-null result without any input. We need to make sure we return a result in this case.
    final List<Integer> needDefaultValueAggCalls = new ArrayList<>();
    for (AggregateCall aggCall : aggregate.getAggCallList()) {
        final int newFilterArg;
        final List<Integer> newArgList;
        final SqlAggFunction aggregation;
        if (!aggCall.isDistinct()) {
            aggregation = SqlStdOperatorTable.MIN;
            newArgList = ImmutableIntList.of(x++);
            newFilterArg = filters.get(aggregate.getGroupSet());
            switch(aggCall.getAggregation().getKind()) {
                case COUNT:
                    needDefaultValueAggCalls.add(aggCallIdx);
                    break;
                default:
            }
        } else {
            aggregation = aggCall.getAggregation();
            newArgList = remap(fullGroupSet, aggCall.getArgList());
            newFilterArg = filters.get(ImmutableBitSet.of(aggCall.getArgList()).setIf(aggCall.filterArg, aggCall.filterArg >= 0).union(aggregate.getGroupSet()));
        }
        final AggregateCall newCall = AggregateCall.create(aggregation, false, aggCall.isApproximate(), false, newArgList, newFilterArg, RelCollations.EMPTY, aggregate.getGroupCount(), distinct, null, aggCall.name);
        newCalls.add(newCall);
        aggCallIdx++;
    }
    relBuilder.aggregate(relBuilder.groupKey(remap(fullGroupSet, aggregate.getGroupSet()), remap(fullGroupSet, aggregate.getGroupSets())), newCalls);
    if (!needDefaultValueAggCalls.isEmpty() && aggregate.getGroupCount() == 0) {
        final Aggregate newAgg = (Aggregate) relBuilder.peek();
        final List<RexNode> nodes = new ArrayList<>();
        for (int i = 0; i < newAgg.getGroupCount(); ++i) {
            nodes.add(RexInputRef.of(i, newAgg.getRowType()));
        }
        for (int i = 0; i < newAgg.getAggCallList().size(); ++i) {
            final RexNode inputRef = RexInputRef.of(newAgg.getGroupCount() + i, newAgg.getRowType());
            RexNode newNode = inputRef;
            if (needDefaultValueAggCalls.contains(i)) {
                SqlKind originalFunKind = aggregate.getAggCallList().get(i).getAggregation().getKind();
                switch(originalFunKind) {
                    case COUNT:
                        newNode = relBuilder.call(SqlStdOperatorTable.CASE, relBuilder.isNotNull(inputRef), inputRef, relBuilder.literal(BigDecimal.ZERO));
                        break;
                    default:
                }
            }
            nodes.add(newNode);
        }
        relBuilder.project(nodes);
    }
    relBuilder.convert(aggregate.getRowType(), true);
    call.transformTo(relBuilder.build());
}
Also used : ImmutableBitSet(org.apache.calcite.util.ImmutableBitSet) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) ArrayList(java.util.ArrayList) LinkedHashMap(java.util.LinkedHashMap) TreeSet(java.util.TreeSet) RexBuilder(org.apache.calcite.rex.RexBuilder) RelBuilder(org.apache.calcite.tools.RelBuilder) SqlAggFunction(org.apache.calcite.sql.SqlAggFunction) SqlKind(org.apache.calcite.sql.SqlKind) AggregateCall(org.apache.calcite.rel.core.AggregateCall) RelNode(org.apache.calcite.rel.RelNode) Aggregate(org.apache.calcite.rel.core.Aggregate) LogicalAggregate(org.apache.calcite.rel.logical.LogicalAggregate) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) Map(java.util.Map) RexNode(org.apache.calcite.rex.RexNode)

Aggregations

Aggregate (org.apache.calcite.rel.core.Aggregate)63 RelNode (org.apache.calcite.rel.RelNode)34 AggregateCall (org.apache.calcite.rel.core.AggregateCall)30 ArrayList (java.util.ArrayList)29 RexNode (org.apache.calcite.rex.RexNode)25 LogicalAggregate (org.apache.calcite.rel.logical.LogicalAggregate)23 ImmutableBitSet (org.apache.calcite.util.ImmutableBitSet)20 RelBuilder (org.apache.calcite.tools.RelBuilder)19 RexBuilder (org.apache.calcite.rex.RexBuilder)15 HashMap (java.util.HashMap)14 Project (org.apache.calcite.rel.core.Project)14 RexInputRef (org.apache.calcite.rex.RexInputRef)13 RelMetadataQuery (org.apache.calcite.rel.metadata.RelMetadataQuery)12 Filter (org.apache.calcite.rel.core.Filter)11 RelDataType (org.apache.calcite.rel.type.RelDataType)11 Join (org.apache.calcite.rel.core.Join)10 RelDataTypeField (org.apache.calcite.rel.type.RelDataTypeField)10 HiveAggregate (org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveAggregate)9 List (java.util.List)8 ImmutableList (com.google.common.collect.ImmutableList)7