Search in sources :

Example 21 with RexNode

use of org.apache.calcite.rex.RexNode in project hive by apache.

the class PlanModifierForReturnPath method introduceProjectIfNeeded.

private static RelNode introduceProjectIfNeeded(RelNode optimizedOptiqPlan, List<FieldSchema> resultSchema) throws CalciteSemanticException {
    List<String> fieldNames = new ArrayList<>();
    for (FieldSchema fieldSchema : resultSchema) {
        fieldNames.add(fieldSchema.getName());
    }
    RelNode newRoot = null;
    List<RexNode> projectList = null;
    if (!(optimizedOptiqPlan instanceof Project)) {
        projectList = HiveCalciteUtil.getProjsFromBelowAsInputRef(optimizedOptiqPlan);
        newRoot = HiveProject.create(optimizedOptiqPlan, projectList, fieldNames);
    } else {
        HiveProject project = (HiveProject) optimizedOptiqPlan;
        newRoot = HiveProject.create(project.getInput(0), project.getChildExps(), fieldNames);
    }
    return newRoot;
}
Also used : Project(org.apache.calcite.rel.core.Project) HiveProject(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject) RelNode(org.apache.calcite.rel.RelNode) HiveProject(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) ArrayList(java.util.ArrayList) RexNode(org.apache.calcite.rex.RexNode)

Example 22 with RexNode

use of org.apache.calcite.rex.RexNode in project hive by apache.

the class PlanModifierUtil method generateInvalidSchemaMessage.

protected static String generateInvalidSchemaMessage(Project topLevelProj, List<FieldSchema> resultSchema, int fieldsForOB) {
    String errorDesc = "Result Schema didn't match Calcite Optimized Op Tree; schema: ";
    for (FieldSchema fs : resultSchema) {
        errorDesc += "[" + fs.getName() + ":" + fs.getType() + "], ";
    }
    errorDesc += " projection fields: ";
    for (RexNode exp : topLevelProj.getChildExps()) {
        errorDesc += "[" + exp.toString() + ":" + exp.getType() + "], ";
    }
    if (fieldsForOB != 0) {
        errorDesc += fieldsForOB + " fields removed due to ORDER BY  ";
    }
    return errorDesc.substring(0, errorDesc.length() - 2);
}
Also used : FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) RexNode(org.apache.calcite.rex.RexNode)

Example 23 with RexNode

use of org.apache.calcite.rex.RexNode in project hive by apache.

the class PlanModifierUtil method fixTopOBSchema.

protected static void fixTopOBSchema(final RelNode rootRel, Pair<RelNode, RelNode> topSelparentPair, List<FieldSchema> resultSchema, boolean replaceProject) throws CalciteSemanticException {
    if (!(topSelparentPair.getKey() instanceof Sort) || !HiveCalciteUtil.orderRelNode(topSelparentPair.getKey())) {
        return;
    }
    HiveSortLimit obRel = (HiveSortLimit) topSelparentPair.getKey();
    Project obChild = (Project) topSelparentPair.getValue();
    if (obChild.getRowType().getFieldCount() <= resultSchema.size()) {
        return;
    }
    RelDataType rt = obChild.getRowType();
    @SuppressWarnings({ "unchecked", "rawtypes" }) Set<Integer> collationInputRefs = new HashSet(RelCollations.ordinals(obRel.getCollation()));
    ImmutableMap.Builder<Integer, RexNode> inputRefToCallMapBldr = ImmutableMap.builder();
    for (int i = resultSchema.size(); i < rt.getFieldCount(); i++) {
        if (collationInputRefs.contains(i)) {
            RexNode obyExpr = obChild.getChildExps().get(i);
            if (obyExpr instanceof RexCall) {
                LOG.debug("Old RexCall : " + obyExpr);
                obyExpr = adjustOBSchema((RexCall) obyExpr, obChild, resultSchema);
                LOG.debug("New RexCall : " + obyExpr);
            }
            inputRefToCallMapBldr.put(i, obyExpr);
        }
    }
    ImmutableMap<Integer, RexNode> inputRefToCallMap = inputRefToCallMapBldr.build();
    if ((obChild.getRowType().getFieldCount() - inputRefToCallMap.size()) != resultSchema.size()) {
        LOG.error(generateInvalidSchemaMessage(obChild, resultSchema, inputRefToCallMap.size()));
        throw new CalciteSemanticException("Result Schema didn't match Optimized Op Tree Schema");
    }
    if (replaceProject) {
        // This removes order-by only expressions from the projections.
        HiveProject replacementProjectRel = HiveProject.create(obChild.getInput(), obChild.getChildExps().subList(0, resultSchema.size()), obChild.getRowType().getFieldNames().subList(0, resultSchema.size()));
        obRel.replaceInput(0, replacementProjectRel);
    }
    obRel.setInputRefToCallMap(inputRefToCallMap);
}
Also used : RelDataType(org.apache.calcite.rel.type.RelDataType) HiveSortLimit(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSortLimit) ImmutableMap(com.google.common.collect.ImmutableMap) RexCall(org.apache.calcite.rex.RexCall) Project(org.apache.calcite.rel.core.Project) HiveProject(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject) HiveProject(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject) Sort(org.apache.calcite.rel.core.Sort) CalciteSemanticException(org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException) HashSet(java.util.HashSet) RexNode(org.apache.calcite.rex.RexNode)

Example 24 with RexNode

use of org.apache.calcite.rex.RexNode in project hive by apache.

the class RexNodeConverter method convert.

protected RexNode convert(ExprNodeConstantDesc literal) throws CalciteSemanticException {
    RexBuilder rexBuilder = cluster.getRexBuilder();
    RelDataTypeFactory dtFactory = rexBuilder.getTypeFactory();
    PrimitiveTypeInfo hiveType = (PrimitiveTypeInfo) literal.getTypeInfo();
    RelDataType calciteDataType = TypeConverter.convert(hiveType, dtFactory);
    PrimitiveCategory hiveTypeCategory = hiveType.getPrimitiveCategory();
    ConstantObjectInspector coi = literal.getWritableObjectInspector();
    Object value = ObjectInspectorUtils.copyToStandardJavaObject(coi.getWritableConstantValue(), coi);
    RexNode calciteLiteral = null;
    // If value is null, the type should also be VOID.
    if (value == null) {
        hiveTypeCategory = PrimitiveCategory.VOID;
    }
    // TODO: Verify if we need to use ConstantObjectInspector to unwrap data
    switch(hiveTypeCategory) {
        case BOOLEAN:
            calciteLiteral = rexBuilder.makeLiteral(((Boolean) value).booleanValue());
            break;
        case BYTE:
            calciteLiteral = rexBuilder.makeExactLiteral(new BigDecimal((Byte) value), calciteDataType);
            break;
        case SHORT:
            calciteLiteral = rexBuilder.makeExactLiteral(new BigDecimal((Short) value), calciteDataType);
            break;
        case INT:
            calciteLiteral = rexBuilder.makeExactLiteral(new BigDecimal((Integer) value));
            break;
        case LONG:
            calciteLiteral = rexBuilder.makeBigintLiteral(new BigDecimal((Long) value));
            break;
        // TODO: is Decimal an exact numeric or approximate numeric?
        case DECIMAL:
            if (value instanceof HiveDecimal) {
                value = ((HiveDecimal) value).bigDecimalValue();
            } else if (value instanceof Decimal128) {
                value = ((Decimal128) value).toBigDecimal();
            }
            if (value == null) {
                // literals.
                throw new CalciteSemanticException("Expression " + literal.getExprString() + " is not a valid decimal", UnsupportedFeature.Invalid_decimal);
            // TODO: return createNullLiteral(literal);
            }
            BigDecimal bd = (BigDecimal) value;
            BigInteger unscaled = bd.unscaledValue();
            if (unscaled.compareTo(MIN_LONG_BI) >= 0 && unscaled.compareTo(MAX_LONG_BI) <= 0) {
                calciteLiteral = rexBuilder.makeExactLiteral(bd);
            } else {
                // CBO doesn't support unlimited precision decimals. In practice, this
                // will work...
                // An alternative would be to throw CboSemanticException and fall back
                // to no CBO.
                RelDataType relType = cluster.getTypeFactory().createSqlType(SqlTypeName.DECIMAL, unscaled.toString().length(), bd.scale());
                calciteLiteral = rexBuilder.makeExactLiteral(bd, relType);
            }
            break;
        case FLOAT:
            calciteLiteral = rexBuilder.makeApproxLiteral(new BigDecimal(Float.toString((Float) value)), calciteDataType);
            break;
        case DOUBLE:
            // TODO: The best solution is to support NaN in expression reduction.
            if (Double.isNaN((Double) value)) {
                throw new CalciteSemanticException("NaN", UnsupportedFeature.Invalid_decimal);
            }
            calciteLiteral = rexBuilder.makeApproxLiteral(new BigDecimal(Double.toString((Double) value)), calciteDataType);
            break;
        case CHAR:
            if (value instanceof HiveChar) {
                value = ((HiveChar) value).getValue();
            }
            calciteLiteral = rexBuilder.makeCharLiteral(asUnicodeString((String) value));
            break;
        case VARCHAR:
            if (value instanceof HiveVarchar) {
                value = ((HiveVarchar) value).getValue();
            }
            calciteLiteral = rexBuilder.makeCharLiteral(asUnicodeString((String) value));
            break;
        case STRING:
            calciteLiteral = rexBuilder.makeCharLiteral(asUnicodeString((String) value));
            break;
        case DATE:
            Calendar cal = new GregorianCalendar();
            cal.setTime((Date) value);
            calciteLiteral = rexBuilder.makeDateLiteral(cal);
            break;
        case TIMESTAMP:
            Calendar c = null;
            if (value instanceof Calendar) {
                c = (Calendar) value;
            } else {
                c = Calendar.getInstance();
                c.setTimeInMillis(((Timestamp) value).getTime());
            }
            calciteLiteral = rexBuilder.makeTimestampLiteral(c, RelDataType.PRECISION_NOT_SPECIFIED);
            break;
        case INTERVAL_YEAR_MONTH:
            // Calcite year-month literal value is months as BigDecimal
            BigDecimal totalMonths = BigDecimal.valueOf(((HiveIntervalYearMonth) value).getTotalMonths());
            calciteLiteral = rexBuilder.makeIntervalLiteral(totalMonths, new SqlIntervalQualifier(TimeUnit.YEAR, TimeUnit.MONTH, new SqlParserPos(1, 1)));
            break;
        case INTERVAL_DAY_TIME:
            // Calcite day-time interval is millis value as BigDecimal
            // Seconds converted to millis
            BigDecimal secsValueBd = BigDecimal.valueOf(((HiveIntervalDayTime) value).getTotalSeconds() * 1000);
            // Nanos converted to millis
            BigDecimal nanosValueBd = BigDecimal.valueOf(((HiveIntervalDayTime) value).getNanos(), 6);
            calciteLiteral = rexBuilder.makeIntervalLiteral(secsValueBd.add(nanosValueBd), new SqlIntervalQualifier(TimeUnit.MILLISECOND, null, new SqlParserPos(1, 1)));
            break;
        case VOID:
            calciteLiteral = cluster.getRexBuilder().makeLiteral(null, cluster.getTypeFactory().createSqlType(SqlTypeName.NULL), true);
            break;
        case BINARY:
        case UNKNOWN:
        default:
            throw new RuntimeException("UnSupported Literal");
    }
    return calciteLiteral;
}
Also used : SqlParserPos(org.apache.calcite.sql.parser.SqlParserPos) SqlIntervalQualifier(org.apache.calcite.sql.SqlIntervalQualifier) GregorianCalendar(java.util.GregorianCalendar) Calendar(java.util.Calendar) HiveChar(org.apache.hadoop.hive.common.type.HiveChar) GregorianCalendar(java.util.GregorianCalendar) RelDataType(org.apache.calcite.rel.type.RelDataType) Decimal128(org.apache.hadoop.hive.common.type.Decimal128) HiveVarchar(org.apache.hadoop.hive.common.type.HiveVarchar) PrimitiveTypeInfo(org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo) BigDecimal(java.math.BigDecimal) RelDataTypeFactory(org.apache.calcite.rel.type.RelDataTypeFactory) HiveDecimal(org.apache.hadoop.hive.common.type.HiveDecimal) RexBuilder(org.apache.calcite.rex.RexBuilder) BigInteger(java.math.BigInteger) ConstantObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.ConstantObjectInspector) CalciteSemanticException(org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException) PrimitiveCategory(org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.PrimitiveCategory) HiveIntervalDayTime(org.apache.hadoop.hive.common.type.HiveIntervalDayTime) RexNode(org.apache.calcite.rex.RexNode)

Example 25 with RexNode

use of org.apache.calcite.rex.RexNode in project hive by apache.

the class HiveRelMdPredicates method getPredicates.

/**
   * Infers predicates for a Union.
   */
public RelOptPredicateList getPredicates(Union union, RelMetadataQuery mq) {
    RexBuilder rB = union.getCluster().getRexBuilder();
    Map<String, RexNode> finalPreds = new HashMap<>();
    List<RexNode> finalResidualPreds = new ArrayList<>();
    for (int i = 0; i < union.getInputs().size(); i++) {
        RelNode input = union.getInputs().get(i);
        RelOptPredicateList info = mq.getPulledUpPredicates(input);
        if (info.pulledUpPredicates.isEmpty()) {
            return RelOptPredicateList.EMPTY;
        }
        Map<String, RexNode> preds = new HashMap<>();
        List<RexNode> residualPreds = new ArrayList<>();
        for (RexNode pred : info.pulledUpPredicates) {
            final String predString = pred.toString();
            if (i == 0) {
                preds.put(predString, pred);
                continue;
            }
            if (finalPreds.containsKey(predString)) {
                preds.put(predString, pred);
            } else {
                residualPreds.add(pred);
            }
        }
        // Add new residual preds
        finalResidualPreds.add(RexUtil.composeConjunction(rB, residualPreds, false));
        // Add those that are not part of the final set to residual
        for (Entry<String, RexNode> e : finalPreds.entrySet()) {
            if (!preds.containsKey(e.getKey())) {
                // This node was in previous union inputs, but it is not in this one
                for (int j = 0; j < i; j++) {
                    finalResidualPreds.set(j, RexUtil.composeConjunction(rB, Lists.newArrayList(finalResidualPreds.get(j), e.getValue()), false));
                }
            }
        }
        // Final preds
        finalPreds = preds;
    }
    List<RexNode> preds = new ArrayList<>(finalPreds.values());
    RexNode disjPred = RexUtil.composeDisjunction(rB, finalResidualPreds, false);
    if (!disjPred.isAlwaysTrue()) {
        preds.add(disjPred);
    }
    return RelOptPredicateList.of(preds);
}
Also used : RelNode(org.apache.calcite.rel.RelNode) HashMap(java.util.HashMap) RelOptPredicateList(org.apache.calcite.plan.RelOptPredicateList) ArrayList(java.util.ArrayList) RexBuilder(org.apache.calcite.rex.RexBuilder) RexNode(org.apache.calcite.rex.RexNode)

Aggregations

RexNode (org.apache.calcite.rex.RexNode)204 RelNode (org.apache.calcite.rel.RelNode)75 ArrayList (java.util.ArrayList)68 RexBuilder (org.apache.calcite.rex.RexBuilder)52 RelDataType (org.apache.calcite.rel.type.RelDataType)48 RexInputRef (org.apache.calcite.rex.RexInputRef)43 RelDataTypeField (org.apache.calcite.rel.type.RelDataTypeField)41 RexCall (org.apache.calcite.rex.RexCall)32 ImmutableBitSet (org.apache.calcite.util.ImmutableBitSet)28 Pair (org.apache.calcite.util.Pair)22 HashMap (java.util.HashMap)21 AggregateCall (org.apache.calcite.rel.core.AggregateCall)21 RexLiteral (org.apache.calcite.rex.RexLiteral)19 ImmutableList (com.google.common.collect.ImmutableList)18 Project (org.apache.calcite.rel.core.Project)17 RelDataTypeFactory (org.apache.calcite.rel.type.RelDataTypeFactory)14 Filter (org.apache.calcite.rel.core.Filter)12 HashSet (java.util.HashSet)11 CalciteSemanticException (org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException)11 BigDecimal (java.math.BigDecimal)10