Search in sources :

Example 76 with SqlOperator

use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.sql.SqlOperator in project hazelcast by hazelcast.

the class HazelcastOperandTypeCheckerAware method prepareBinding.

default HazelcastCallBinding prepareBinding(SqlCallBinding binding) {
    SqlOperator operator = binding.getOperator();
    assert operator == this;
    // Resolve unknown types if needed.
    SqlOperandTypeInference operandTypeInference = operator.getOperandTypeInference();
    return prepareBinding(binding, operandTypeInference);
}
Also used : SqlOperator(org.apache.calcite.sql.SqlOperator) SqlOperandTypeInference(org.apache.calcite.sql.type.SqlOperandTypeInference)

Example 77 with SqlOperator

use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.sql.SqlOperator in project hive by apache.

the class HiveCalciteUtil method checkMaterializable.

/**
 * Check if the expression is usable for query materialization, returning the first failing expression.
 */
public static RexCall checkMaterializable(RexNode expr) {
    RexCall failingCall = null;
    if (expr == null) {
        return null;
    }
    RexVisitor<Void> visitor = new RexVisitorImpl<Void>(true) {

        @Override
        public Void visitCall(org.apache.calcite.rex.RexCall call) {
            // non-deterministic functions as well as runtime constants are not materializable.
            SqlOperator op = call.getOperator();
            if (!op.isDeterministic() || op.isDynamicFunction() || (op instanceof HiveSqlFunction && ((HiveSqlFunction) op).isRuntimeConstant())) {
                throw new Util.FoundOne(call);
            }
            return super.visitCall(call);
        }
    };
    try {
        expr.accept(visitor);
    } catch (Util.FoundOne e) {
        failingCall = (RexCall) e.getNode();
    }
    return failingCall;
}
Also used : RexCall(org.apache.calcite.rex.RexCall) HiveSqlFunction(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSqlFunction) SqlOperator(org.apache.calcite.sql.SqlOperator) RexUtil(org.apache.calcite.rex.RexUtil) SqlValidatorUtil(org.apache.calcite.sql.validate.SqlValidatorUtil) RelOptUtil(org.apache.calcite.plan.RelOptUtil) Util(org.apache.calcite.util.Util) RexVisitorImpl(org.apache.calcite.rex.RexVisitorImpl)

Example 78 with SqlOperator

use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.sql.SqlOperator in project hive by apache.

the class SqlFunctionConverter method getCalciteFn.

public static SqlOperator getCalciteFn(String hiveUdfName, List<RelDataType> calciteArgTypes, RelDataType calciteRetType, boolean deterministic, boolean runtimeConstant) throws CalciteSemanticException {
    SqlOperator calciteOp;
    CalciteUDFInfo uInf = getUDFInfo(hiveUdfName, calciteArgTypes, calciteRetType);
    switch(hiveUdfName) {
        // TODO: Perhaps we should do this for all functions, not just +,-
        case "-":
            calciteOp = new SqlMonotonicBinaryOperator("-", SqlKind.MINUS, 40, true, uInf.returnTypeInference, uInf.operandTypeInference, OperandTypes.MINUS_OPERATOR);
            break;
        case "+":
            calciteOp = new SqlMonotonicBinaryOperator("+", SqlKind.PLUS, 40, true, uInf.returnTypeInference, uInf.operandTypeInference, OperandTypes.PLUS_OPERATOR);
            break;
        default:
            calciteOp = hiveToCalcite.get(hiveUdfName);
            if (null == calciteOp) {
                calciteOp = new HiveSqlFunction(uInf.udfName, SqlKind.OTHER_FUNCTION, uInf.returnTypeInference, uInf.operandTypeInference, uInf.operandTypeChecker, SqlFunctionCategory.USER_DEFINED_FUNCTION, deterministic, runtimeConstant);
            }
            break;
    }
    return calciteOp;
}
Also used : HiveSqlFunction(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSqlFunction) HiveFromUnixTimeSqlOperator(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFromUnixTimeSqlOperator) HiveUnixTimestampSqlOperator(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveUnixTimestampSqlOperator) HiveDateSubSqlOperator(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveDateSubSqlOperator) HiveTruncSqlOperator(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTruncSqlOperator) HiveToUnixTimestampSqlOperator(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveToUnixTimestampSqlOperator) SqlOperator(org.apache.calcite.sql.SqlOperator) HiveToDateSqlOperator(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveToDateSqlOperator) HiveDateAddSqlOperator(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveDateAddSqlOperator) SqlMonotonicBinaryOperator(org.apache.calcite.sql.fun.SqlMonotonicBinaryOperator)

Example 79 with SqlOperator

use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.sql.SqlOperator in project hive by apache.

the class HiveJdbcImplementor method convertConditionToSqlNode.

/**
 * Converts a {@link RexNode} condition into a {@link SqlNode}.
 *
 * @param node            Join condition
 * @param leftContext     Left context
 * @param rightContext    Right context
 * @param leftFieldCount  Number of fields on left result
 * @return SqlNode that represents the condition
 */
public static SqlNode convertConditionToSqlNode(RexNode node, Context leftContext, Context rightContext, int leftFieldCount) {
    // throw an Exception
    if (node.isAlwaysTrue()) {
        return SqlLiteral.createBoolean(true, POS);
    }
    if (node.isAlwaysFalse()) {
        return SqlLiteral.createBoolean(false, POS);
    }
    if (node instanceof RexInputRef) {
        Context joinContext = leftContext.implementor().joinContext(leftContext, rightContext);
        return joinContext.toSql(null, node);
    }
    if (!(node instanceof RexCall)) {
        throw new AssertionError(node);
    }
    final List<RexNode> operands;
    final SqlOperator op;
    final Context joinContext;
    switch(node.getKind()) {
        case AND:
        case OR:
            operands = ((RexCall) node).getOperands();
            op = ((RexCall) node).getOperator();
            SqlNode sqlCondition = null;
            for (RexNode operand : operands) {
                SqlNode x = convertConditionToSqlNode(operand, leftContext, rightContext, leftFieldCount);
                if (sqlCondition == null) {
                    sqlCondition = x;
                } else {
                    sqlCondition = op.createCall(POS, sqlCondition, x);
                }
            }
            return sqlCondition;
        case EQUALS:
        case IS_NOT_DISTINCT_FROM:
        case NOT_EQUALS:
        case GREATER_THAN:
        case GREATER_THAN_OR_EQUAL:
        case LESS_THAN:
        case LESS_THAN_OR_EQUAL:
            node = stripCastFromString(node);
            operands = ((RexCall) node).getOperands();
            op = ((RexCall) node).getOperator();
            if (operands.size() == 2 && operands.get(0) instanceof RexInputRef && operands.get(1) instanceof RexInputRef) {
                final RexInputRef op0 = (RexInputRef) operands.get(0);
                final RexInputRef op1 = (RexInputRef) operands.get(1);
                if (op0.getIndex() < leftFieldCount && op1.getIndex() >= leftFieldCount) {
                    // Arguments were of form 'op0 = op1'
                    return op.createCall(POS, leftContext.field(op0.getIndex()), rightContext.field(op1.getIndex() - leftFieldCount));
                }
                if (op1.getIndex() < leftFieldCount && op0.getIndex() >= leftFieldCount) {
                    // Arguments were of form 'op1 = op0'
                    return reverseOperatorDirection(op).createCall(POS, leftContext.field(op1.getIndex()), rightContext.field(op0.getIndex() - leftFieldCount));
                }
            }
            joinContext = leftContext.implementor().joinContext(leftContext, rightContext);
            return joinContext.toSql(null, node);
        case IS_NULL:
        case IS_NOT_NULL:
            operands = ((RexCall) node).getOperands();
            if (operands.size() == 1 && operands.get(0) instanceof RexInputRef) {
                op = ((RexCall) node).getOperator();
                final RexInputRef op0 = (RexInputRef) operands.get(0);
                if (op0.getIndex() < leftFieldCount) {
                    return op.createCall(POS, leftContext.field(op0.getIndex()));
                } else {
                    return op.createCall(POS, rightContext.field(op0.getIndex() - leftFieldCount));
                }
            }
            joinContext = leftContext.implementor().joinContext(leftContext, rightContext);
            return joinContext.toSql(null, node);
        default:
            joinContext = leftContext.implementor().joinContext(leftContext, rightContext);
            return joinContext.toSql(null, node);
    }
}
Also used : RexCall(org.apache.calcite.rex.RexCall) SqlOperator(org.apache.calcite.sql.SqlOperator) RexInputRef(org.apache.calcite.rex.RexInputRef) RexNode(org.apache.calcite.rex.RexNode) SqlNode(org.apache.calcite.sql.SqlNode)

Example 80 with SqlOperator

use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.sql.SqlOperator in project hive by apache.

the class HiveUnionSimpleSelectsToInlineTableRule method onMatch.

@Override
public void onMatch(RelOptRuleCall call) {
    RexBuilder rexBuilder = call.builder().getRexBuilder();
    final HiveUnion union = call.rel(0);
    if (!union.all) {
        return;
    }
    List<RelNode> inputs = new ArrayList<RelNode>();
    List<Project> projects = new ArrayList<>();
    List<HiveTableFunctionScan> inlineTables = new ArrayList<>();
    for (RelNode input : union.getInputs()) {
        input = HiveRelDecorrelator.stripHep(input);
        if (isPlainProject(input)) {
            projects.add((Project) input);
            continue;
        }
        if (isInlineTableOperand(input)) {
            inlineTables.add((HiveTableFunctionScan) input);
            continue;
        }
        inputs.add(input);
    }
    if (projects.size() + inlineTables.size() <= 1) {
        // nothing to do
        return;
    }
    RowStorage newRows = new RowStorage();
    for (HiveTableFunctionScan rel : inlineTables) {
        // inline(array(row1,row2,...))
        RexCall rex = (RexCall) ((RexCall) rel.getCall()).operands.get(0);
        for (RexNode row : rex.operands) {
            if (!(row.getType() instanceof RelRecordType)) {
                return;
            }
            newRows.addRow(row);
        }
    }
    for (Project proj : projects) {
        RexNode row = rexBuilder.makeCall(SqlStdOperatorTable.ROW, proj.getProjects());
        if (!(row.getType() instanceof RelRecordType)) {
            return;
        }
        newRows.addRow(row);
    }
    if (newRows.keySet().size() + inputs.size() == union.getInputs().size()) {
        // nothing to do
        return;
    }
    if (dummyTable == null) {
        LOG.warn("Unexpected; rule would match - but dummyTable is not available");
        return;
    }
    for (RelRecordType type : newRows.keySet()) {
        List<RexNode> rows = newRows.get(type);
        RelDataType arrayType = rexBuilder.getTypeFactory().createArrayType(type, -1);
        try {
            SqlOperator inlineFn = SqlFunctionConverter.getCalciteFn("inline", Collections.singletonList(arrayType), type, true, false);
            SqlOperator arrayFn = SqlFunctionConverter.getCalciteFn("array", Collections.nCopies(rows.size(), type), arrayType, true, false);
            RexNode expr = rexBuilder.makeCall(arrayFn, rows);
            expr = rexBuilder.makeCall(inlineFn, expr);
            RelNode newInlineTable = buildTableFunctionScan(expr, union.getCluster());
            inputs.add(newInlineTable);
        } catch (CalciteSemanticException e) {
            LOG.debug("Conversion failed with exception", e);
            return;
        }
    }
    if (inputs.size() > 1) {
        HiveUnion newUnion = (HiveUnion) union.copy(union.getTraitSet(), inputs, true);
        call.transformTo(newUnion);
    } else {
        call.transformTo(inputs.get(0));
    }
}
Also used : SqlOperator(org.apache.calcite.sql.SqlOperator) HiveTableFunctionScan(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableFunctionScan) ArrayList(java.util.ArrayList) RelDataType(org.apache.calcite.rel.type.RelDataType) HiveUnion(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveUnion) RelRecordType(org.apache.calcite.rel.type.RelRecordType) RexCall(org.apache.calcite.rex.RexCall) Project(org.apache.calcite.rel.core.Project) RelNode(org.apache.calcite.rel.RelNode) RexBuilder(org.apache.calcite.rex.RexBuilder) CalciteSemanticException(org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException) RexNode(org.apache.calcite.rex.RexNode)

Aggregations

SqlOperator (org.apache.calcite.sql.SqlOperator)129 ArrayList (java.util.ArrayList)44 RexNode (org.apache.calcite.rex.RexNode)41 RelDataType (org.apache.calcite.rel.type.RelDataType)25 SqlCall (org.apache.calcite.sql.SqlCall)24 RexCall (org.apache.calcite.rex.RexCall)21 SqlNode (org.apache.calcite.sql.SqlNode)21 SqlKind (org.apache.calcite.sql.SqlKind)15 List (java.util.List)13 SqlFunction (org.apache.calcite.sql.SqlFunction)13 RelNode (org.apache.calcite.rel.RelNode)12 RexBuilder (org.apache.calcite.rex.RexBuilder)11 RexInputRef (org.apache.calcite.rex.RexInputRef)11 NlsString (org.apache.calcite.util.NlsString)11 Test (org.junit.Test)11 SqlBasicCall (org.apache.calcite.sql.SqlBasicCall)10 SqlIdentifier (org.apache.calcite.sql.SqlIdentifier)10 RexNode (org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rex.RexNode)9 SqlOperator (org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.sql.SqlOperator)9 RexLiteral (org.apache.calcite.rex.RexLiteral)9