use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.sql.SqlOperator in project hazelcast by hazelcast.
the class HazelcastOperandTypeCheckerAware method prepareBinding.
default HazelcastCallBinding prepareBinding(SqlCallBinding binding) {
SqlOperator operator = binding.getOperator();
assert operator == this;
// Resolve unknown types if needed.
SqlOperandTypeInference operandTypeInference = operator.getOperandTypeInference();
return prepareBinding(binding, operandTypeInference);
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.sql.SqlOperator in project hive by apache.
the class HiveCalciteUtil method checkMaterializable.
/**
* Check if the expression is usable for query materialization, returning the first failing expression.
*/
public static RexCall checkMaterializable(RexNode expr) {
RexCall failingCall = null;
if (expr == null) {
return null;
}
RexVisitor<Void> visitor = new RexVisitorImpl<Void>(true) {
@Override
public Void visitCall(org.apache.calcite.rex.RexCall call) {
// non-deterministic functions as well as runtime constants are not materializable.
SqlOperator op = call.getOperator();
if (!op.isDeterministic() || op.isDynamicFunction() || (op instanceof HiveSqlFunction && ((HiveSqlFunction) op).isRuntimeConstant())) {
throw new Util.FoundOne(call);
}
return super.visitCall(call);
}
};
try {
expr.accept(visitor);
} catch (Util.FoundOne e) {
failingCall = (RexCall) e.getNode();
}
return failingCall;
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.sql.SqlOperator in project hive by apache.
the class SqlFunctionConverter method getCalciteFn.
public static SqlOperator getCalciteFn(String hiveUdfName, List<RelDataType> calciteArgTypes, RelDataType calciteRetType, boolean deterministic, boolean runtimeConstant) throws CalciteSemanticException {
SqlOperator calciteOp;
CalciteUDFInfo uInf = getUDFInfo(hiveUdfName, calciteArgTypes, calciteRetType);
switch(hiveUdfName) {
// TODO: Perhaps we should do this for all functions, not just +,-
case "-":
calciteOp = new SqlMonotonicBinaryOperator("-", SqlKind.MINUS, 40, true, uInf.returnTypeInference, uInf.operandTypeInference, OperandTypes.MINUS_OPERATOR);
break;
case "+":
calciteOp = new SqlMonotonicBinaryOperator("+", SqlKind.PLUS, 40, true, uInf.returnTypeInference, uInf.operandTypeInference, OperandTypes.PLUS_OPERATOR);
break;
default:
calciteOp = hiveToCalcite.get(hiveUdfName);
if (null == calciteOp) {
calciteOp = new HiveSqlFunction(uInf.udfName, SqlKind.OTHER_FUNCTION, uInf.returnTypeInference, uInf.operandTypeInference, uInf.operandTypeChecker, SqlFunctionCategory.USER_DEFINED_FUNCTION, deterministic, runtimeConstant);
}
break;
}
return calciteOp;
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.sql.SqlOperator in project hive by apache.
the class HiveJdbcImplementor method convertConditionToSqlNode.
/**
* Converts a {@link RexNode} condition into a {@link SqlNode}.
*
* @param node Join condition
* @param leftContext Left context
* @param rightContext Right context
* @param leftFieldCount Number of fields on left result
* @return SqlNode that represents the condition
*/
public static SqlNode convertConditionToSqlNode(RexNode node, Context leftContext, Context rightContext, int leftFieldCount) {
// throw an Exception
if (node.isAlwaysTrue()) {
return SqlLiteral.createBoolean(true, POS);
}
if (node.isAlwaysFalse()) {
return SqlLiteral.createBoolean(false, POS);
}
if (node instanceof RexInputRef) {
Context joinContext = leftContext.implementor().joinContext(leftContext, rightContext);
return joinContext.toSql(null, node);
}
if (!(node instanceof RexCall)) {
throw new AssertionError(node);
}
final List<RexNode> operands;
final SqlOperator op;
final Context joinContext;
switch(node.getKind()) {
case AND:
case OR:
operands = ((RexCall) node).getOperands();
op = ((RexCall) node).getOperator();
SqlNode sqlCondition = null;
for (RexNode operand : operands) {
SqlNode x = convertConditionToSqlNode(operand, leftContext, rightContext, leftFieldCount);
if (sqlCondition == null) {
sqlCondition = x;
} else {
sqlCondition = op.createCall(POS, sqlCondition, x);
}
}
return sqlCondition;
case EQUALS:
case IS_NOT_DISTINCT_FROM:
case NOT_EQUALS:
case GREATER_THAN:
case GREATER_THAN_OR_EQUAL:
case LESS_THAN:
case LESS_THAN_OR_EQUAL:
node = stripCastFromString(node);
operands = ((RexCall) node).getOperands();
op = ((RexCall) node).getOperator();
if (operands.size() == 2 && operands.get(0) instanceof RexInputRef && operands.get(1) instanceof RexInputRef) {
final RexInputRef op0 = (RexInputRef) operands.get(0);
final RexInputRef op1 = (RexInputRef) operands.get(1);
if (op0.getIndex() < leftFieldCount && op1.getIndex() >= leftFieldCount) {
// Arguments were of form 'op0 = op1'
return op.createCall(POS, leftContext.field(op0.getIndex()), rightContext.field(op1.getIndex() - leftFieldCount));
}
if (op1.getIndex() < leftFieldCount && op0.getIndex() >= leftFieldCount) {
// Arguments were of form 'op1 = op0'
return reverseOperatorDirection(op).createCall(POS, leftContext.field(op1.getIndex()), rightContext.field(op0.getIndex() - leftFieldCount));
}
}
joinContext = leftContext.implementor().joinContext(leftContext, rightContext);
return joinContext.toSql(null, node);
case IS_NULL:
case IS_NOT_NULL:
operands = ((RexCall) node).getOperands();
if (operands.size() == 1 && operands.get(0) instanceof RexInputRef) {
op = ((RexCall) node).getOperator();
final RexInputRef op0 = (RexInputRef) operands.get(0);
if (op0.getIndex() < leftFieldCount) {
return op.createCall(POS, leftContext.field(op0.getIndex()));
} else {
return op.createCall(POS, rightContext.field(op0.getIndex() - leftFieldCount));
}
}
joinContext = leftContext.implementor().joinContext(leftContext, rightContext);
return joinContext.toSql(null, node);
default:
joinContext = leftContext.implementor().joinContext(leftContext, rightContext);
return joinContext.toSql(null, node);
}
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.sql.SqlOperator in project hive by apache.
the class HiveUnionSimpleSelectsToInlineTableRule method onMatch.
@Override
public void onMatch(RelOptRuleCall call) {
RexBuilder rexBuilder = call.builder().getRexBuilder();
final HiveUnion union = call.rel(0);
if (!union.all) {
return;
}
List<RelNode> inputs = new ArrayList<RelNode>();
List<Project> projects = new ArrayList<>();
List<HiveTableFunctionScan> inlineTables = new ArrayList<>();
for (RelNode input : union.getInputs()) {
input = HiveRelDecorrelator.stripHep(input);
if (isPlainProject(input)) {
projects.add((Project) input);
continue;
}
if (isInlineTableOperand(input)) {
inlineTables.add((HiveTableFunctionScan) input);
continue;
}
inputs.add(input);
}
if (projects.size() + inlineTables.size() <= 1) {
// nothing to do
return;
}
RowStorage newRows = new RowStorage();
for (HiveTableFunctionScan rel : inlineTables) {
// inline(array(row1,row2,...))
RexCall rex = (RexCall) ((RexCall) rel.getCall()).operands.get(0);
for (RexNode row : rex.operands) {
if (!(row.getType() instanceof RelRecordType)) {
return;
}
newRows.addRow(row);
}
}
for (Project proj : projects) {
RexNode row = rexBuilder.makeCall(SqlStdOperatorTable.ROW, proj.getProjects());
if (!(row.getType() instanceof RelRecordType)) {
return;
}
newRows.addRow(row);
}
if (newRows.keySet().size() + inputs.size() == union.getInputs().size()) {
// nothing to do
return;
}
if (dummyTable == null) {
LOG.warn("Unexpected; rule would match - but dummyTable is not available");
return;
}
for (RelRecordType type : newRows.keySet()) {
List<RexNode> rows = newRows.get(type);
RelDataType arrayType = rexBuilder.getTypeFactory().createArrayType(type, -1);
try {
SqlOperator inlineFn = SqlFunctionConverter.getCalciteFn("inline", Collections.singletonList(arrayType), type, true, false);
SqlOperator arrayFn = SqlFunctionConverter.getCalciteFn("array", Collections.nCopies(rows.size(), type), arrayType, true, false);
RexNode expr = rexBuilder.makeCall(arrayFn, rows);
expr = rexBuilder.makeCall(inlineFn, expr);
RelNode newInlineTable = buildTableFunctionScan(expr, union.getCluster());
inputs.add(newInlineTable);
} catch (CalciteSemanticException e) {
LOG.debug("Conversion failed with exception", e);
return;
}
}
if (inputs.size() > 1) {
HiveUnion newUnion = (HiveUnion) union.copy(union.getTraitSet(), inputs, true);
call.transformTo(newUnion);
} else {
call.transformTo(inputs.get(0));
}
}
Aggregations