Search in sources :

Example 1 with RexFieldCollation

use of org.apache.calcite.rex.RexFieldCollation in project drill by apache.

the class DrillOptiqTest method testUnsupportedRexNode.

/* Method checks if we raise the appropriate error while dealing with RexNode that cannot be converted to
   * equivalent Drill expressions
   */
@Test
public void testUnsupportedRexNode() {
    try {
        // Create the data type factory.
        RelDataTypeFactory relFactory = new SqlTypeFactoryImpl(DrillRelDataTypeSystem.DRILL_REL_DATATYPE_SYSTEM);
        // Create the rex builder
        RexBuilder rex = new RexBuilder(relFactory);
        RelDataType anyType = relFactory.createSqlType(SqlTypeName.ANY);
        List<RexNode> emptyList = new LinkedList<>();
        ImmutableList<RexFieldCollation> e = ImmutableList.copyOf(new RexFieldCollation[0]);
        // create a dummy RexOver object.
        RexNode window = rex.makeOver(anyType, SqlStdOperatorTable.AVG, emptyList, emptyList, e, null, null, true, false, false);
        DrillOptiq.toDrill(null, (RelNode) null, window);
    } catch (UserException e) {
        if (e.getMessage().contains(DrillOptiq.UNSUPPORTED_REX_NODE_ERROR)) {
            // got expected error return
            return;
        }
        Assert.fail("Hit exception with unexpected error message");
    }
    Assert.fail("Failed to raise the expected exception");
}
Also used : SqlTypeFactoryImpl(org.apache.calcite.sql.type.SqlTypeFactoryImpl) RelDataTypeFactory(org.apache.calcite.rel.type.RelDataTypeFactory) RexBuilder(org.apache.calcite.rex.RexBuilder) RelDataType(org.apache.calcite.rel.type.RelDataType) UserException(org.apache.drill.common.exceptions.UserException) RexFieldCollation(org.apache.calcite.rex.RexFieldCollation) LinkedList(java.util.LinkedList) RexNode(org.apache.calcite.rex.RexNode) Test(org.junit.Test)

Example 2 with RexFieldCollation

use of org.apache.calcite.rex.RexFieldCollation in project hive by apache.

the class ExprNodeConverter method getPSpec.

private PartitioningSpec getPSpec(RexWindow window) {
    PartitioningSpec partitioning = new PartitioningSpec();
    Schema schema = new Schema(tabAlias, inputRowType.getFieldList());
    if (window.partitionKeys != null && !window.partitionKeys.isEmpty()) {
        PartitionSpec pSpec = new PartitionSpec();
        for (RexNode pk : window.partitionKeys) {
            PartitionExpression exprSpec = new PartitionExpression();
            ASTNode astNode = pk.accept(new RexVisitor(schema));
            exprSpec.setExpression(astNode);
            pSpec.addExpression(exprSpec);
        }
        partitioning.setPartSpec(pSpec);
    }
    if (window.orderKeys != null && !window.orderKeys.isEmpty()) {
        OrderSpec oSpec = new OrderSpec();
        for (RexFieldCollation ok : window.orderKeys) {
            OrderExpression exprSpec = new OrderExpression();
            Order order = ok.getDirection() == RelFieldCollation.Direction.ASCENDING ? Order.ASC : Order.DESC;
            NullOrder nullOrder;
            if (ok.right.contains(SqlKind.NULLS_FIRST)) {
                nullOrder = NullOrder.NULLS_FIRST;
            } else if (ok.right.contains(SqlKind.NULLS_LAST)) {
                nullOrder = NullOrder.NULLS_LAST;
            } else {
                // Default
                nullOrder = ok.getDirection() == RelFieldCollation.Direction.ASCENDING ? NullOrder.NULLS_FIRST : NullOrder.NULLS_LAST;
            }
            exprSpec.setOrder(order);
            exprSpec.setNullOrder(nullOrder);
            ASTNode astNode = ok.left.accept(new RexVisitor(schema));
            exprSpec.setExpression(astNode);
            oSpec.addExpression(exprSpec);
        }
        partitioning.setOrderSpec(oSpec);
    }
    return partitioning;
}
Also used : NullOrder(org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.NullOrder) Order(org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.Order) OrderSpec(org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.OrderSpec) RexVisitor(org.apache.hadoop.hive.ql.optimizer.calcite.translator.ASTConverter.RexVisitor) PartitionExpression(org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.PartitionExpression) OrderExpression(org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.OrderExpression) Schema(org.apache.hadoop.hive.ql.optimizer.calcite.translator.ASTConverter.Schema) ASTNode(org.apache.hadoop.hive.ql.parse.ASTNode) NullOrder(org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.NullOrder) PartitionSpec(org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.PartitionSpec) RexFieldCollation(org.apache.calcite.rex.RexFieldCollation) PartitioningSpec(org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.PartitioningSpec) RexNode(org.apache.calcite.rex.RexNode)

Example 3 with RexFieldCollation

use of org.apache.calcite.rex.RexFieldCollation in project hive by apache.

the class HiveWindowingFixRule method onMatch.

@Override
public void onMatch(RelOptRuleCall call) {
    Project project = call.rel(0);
    Aggregate aggregate = call.rel(1);
    // 1. We go over the expressions in the project operator
    //    and we separate the windowing nodes that are result
    //    of an aggregate expression from the rest of nodes
    final int groupingFields = aggregate.getGroupCount() + aggregate.getIndicatorCount();
    Set<String> projectExprsDigest = new HashSet<String>();
    Map<String, RexNode> windowingExprsDigestToNodes = new HashMap<String, RexNode>();
    for (RexNode r : project.getChildExps()) {
        if (r instanceof RexOver) {
            RexOver rexOverNode = (RexOver) r;
            // Operands
            for (RexNode operand : rexOverNode.getOperands()) {
                if (operand instanceof RexInputRef && ((RexInputRef) operand).getIndex() >= groupingFields) {
                    windowingExprsDigestToNodes.put(operand.toString(), operand);
                }
            }
            // Partition keys
            for (RexNode partitionKey : rexOverNode.getWindow().partitionKeys) {
                if (partitionKey instanceof RexInputRef && ((RexInputRef) partitionKey).getIndex() >= groupingFields) {
                    windowingExprsDigestToNodes.put(partitionKey.toString(), partitionKey);
                }
            }
            // Order keys
            for (RexFieldCollation orderKey : rexOverNode.getWindow().orderKeys) {
                if (orderKey.left instanceof RexInputRef && ((RexInputRef) orderKey.left).getIndex() >= groupingFields) {
                    windowingExprsDigestToNodes.put(orderKey.left.toString(), orderKey.left);
                }
            }
        } else {
            projectExprsDigest.add(r.toString());
        }
    }
    // 2. We check whether there is a column needed by the
    //    windowing operation that is missing in the
    //    project expressions. For instance, if the windowing
    //    operation is over an aggregation column, Hive expects
    //    that column to be in the Select clause of the query.
    //    The idea is that if there is a column missing, we will
    //    replace the old project operator by two new project
    //    operators:
    //    - a project operator containing the original columns
    //      of the project operator plus all the columns that were
    //      missing
    //    - a project on top of the previous one, that will take
    //      out the columns that were missing and were added by the
    //      previous project
    // These data structures are needed to create the new project
    // operator (below)
    final List<RexNode> belowProjectExprs = new ArrayList<RexNode>();
    final List<String> belowProjectColumnNames = new ArrayList<String>();
    // This data structure is needed to create the new project
    // operator (top)
    final List<RexNode> topProjectExprs = new ArrayList<RexNode>();
    final int projectCount = project.getChildExps().size();
    for (int i = 0; i < projectCount; i++) {
        belowProjectExprs.add(project.getChildExps().get(i));
        belowProjectColumnNames.add(project.getRowType().getFieldNames().get(i));
        topProjectExprs.add(RexInputRef.of(i, project.getRowType()));
    }
    boolean windowingFix = false;
    for (Entry<String, RexNode> windowingExpr : windowingExprsDigestToNodes.entrySet()) {
        if (!projectExprsDigest.contains(windowingExpr.getKey())) {
            windowingFix = true;
            belowProjectExprs.add(windowingExpr.getValue());
            int colIndex = 0;
            String alias = "window_col_" + colIndex;
            while (belowProjectColumnNames.contains(alias)) {
                alias = "window_col_" + (colIndex++);
            }
            belowProjectColumnNames.add(alias);
        }
    }
    if (!windowingFix) {
        // We do not need to do anything, we bail out
        return;
    }
    // 3. We need to fix it, we create the two replacement project
    //    operators
    RelNode newProjectRel = projectFactory.createProject(aggregate, belowProjectExprs, belowProjectColumnNames);
    RelNode newTopProjectRel = projectFactory.createProject(newProjectRel, topProjectExprs, project.getRowType().getFieldNames());
    call.transformTo(newTopProjectRel);
}
Also used : RexOver(org.apache.calcite.rex.RexOver) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) RexFieldCollation(org.apache.calcite.rex.RexFieldCollation) Project(org.apache.calcite.rel.core.Project) RelNode(org.apache.calcite.rel.RelNode) RexInputRef(org.apache.calcite.rex.RexInputRef) Aggregate(org.apache.calcite.rel.core.Aggregate) HashSet(java.util.HashSet) RexNode(org.apache.calcite.rex.RexNode)

Aggregations

RexFieldCollation (org.apache.calcite.rex.RexFieldCollation)3 RexNode (org.apache.calcite.rex.RexNode)3 ArrayList (java.util.ArrayList)1 HashMap (java.util.HashMap)1 HashSet (java.util.HashSet)1 LinkedList (java.util.LinkedList)1 RelNode (org.apache.calcite.rel.RelNode)1 Aggregate (org.apache.calcite.rel.core.Aggregate)1 Project (org.apache.calcite.rel.core.Project)1 RelDataType (org.apache.calcite.rel.type.RelDataType)1 RelDataTypeFactory (org.apache.calcite.rel.type.RelDataTypeFactory)1 RexBuilder (org.apache.calcite.rex.RexBuilder)1 RexInputRef (org.apache.calcite.rex.RexInputRef)1 RexOver (org.apache.calcite.rex.RexOver)1 SqlTypeFactoryImpl (org.apache.calcite.sql.type.SqlTypeFactoryImpl)1 UserException (org.apache.drill.common.exceptions.UserException)1 RexVisitor (org.apache.hadoop.hive.ql.optimizer.calcite.translator.ASTConverter.RexVisitor)1 Schema (org.apache.hadoop.hive.ql.optimizer.calcite.translator.ASTConverter.Schema)1 ASTNode (org.apache.hadoop.hive.ql.parse.ASTNode)1 NullOrder (org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.NullOrder)1