use of org.apache.calcite.sql.SqlFunction in project flink by splunk.
the class SqlValidatorImpl method handleUnresolvedFunction.
public CalciteException handleUnresolvedFunction(SqlCall call, SqlFunction unresolvedFunction, List<RelDataType> argTypes, List<String> argNames) {
// For builtins, we can give a better error message
final List<SqlOperator> overloads = new ArrayList<>();
opTab.lookupOperatorOverloads(unresolvedFunction.getNameAsId(), null, SqlSyntax.FUNCTION, overloads, catalogReader.nameMatcher());
if (overloads.size() == 1) {
SqlFunction fun = (SqlFunction) overloads.get(0);
if ((fun.getSqlIdentifier() == null) && (fun.getSyntax() != SqlSyntax.FUNCTION_ID)) {
final int expectedArgCount = fun.getOperandCountRange().getMin();
throw newValidationError(call, RESOURCE.invalidArgCount(call.getOperator().getName(), expectedArgCount));
}
}
AssignableOperandTypeChecker typeChecking = new AssignableOperandTypeChecker(argTypes, argNames);
String signature = typeChecking.getAllowedSignatures(unresolvedFunction, unresolvedFunction.getName());
throw newValidationError(call, RESOURCE.validatorUnknownFunction(signature));
}
use of org.apache.calcite.sql.SqlFunction in project flink by splunk.
the class FunctionCatalogOperatorTable method convertToBridgingSqlFunction.
private Optional<SqlFunction> convertToBridgingSqlFunction(@Nullable SqlFunctionCategory category, ContextResolvedFunction resolvedFunction) {
final FunctionDefinition definition = resolvedFunction.getDefinition();
if (!verifyFunctionKind(category, resolvedFunction)) {
return Optional.empty();
}
final TypeInference typeInference;
try {
typeInference = definition.getTypeInference(dataTypeFactory);
} catch (Throwable t) {
throw new ValidationException(String.format("An error occurred in the type inference logic of function '%s'.", resolvedFunction), t);
}
if (typeInference.getOutputTypeStrategy() == TypeStrategies.MISSING) {
return Optional.empty();
}
final SqlFunction function;
if (definition.getKind() == FunctionKind.AGGREGATE || definition.getKind() == FunctionKind.TABLE_AGGREGATE) {
function = BridgingSqlAggFunction.of(dataTypeFactory, typeFactory, SqlKind.OTHER_FUNCTION, resolvedFunction, typeInference);
} else {
function = BridgingSqlFunction.of(dataTypeFactory, typeFactory, SqlKind.OTHER_FUNCTION, resolvedFunction, typeInference);
}
return Optional.of(function);
}
use of org.apache.calcite.sql.SqlFunction in project hive by apache.
the class HiveSubQueryRemoveRule method rewriteScalar.
private RexNode rewriteScalar(RelMetadataQuery mq, RexSubQuery e, Set<CorrelationId> variablesSet, RelBuilder builder, int offset, int inputCount, boolean isCorrScalarAgg) {
// if scalar query has aggregate and no windowing and no gby avoid adding sq_count_check
// since it is guaranteed to produce at most one row
Double maxRowCount = mq.getMaxRowCount(e.rel);
boolean shouldIntroSQCountCheck = maxRowCount == null || maxRowCount > 1.0;
if (shouldIntroSQCountCheck) {
builder.push(e.rel);
// returns single row/column
builder.aggregate(builder.groupKey(), builder.count(false, "cnt"));
SqlFunction countCheck = new SqlFunction("sq_count_check", SqlKind.OTHER_FUNCTION, ReturnTypes.BOOLEAN, InferTypes.RETURN_TYPE, OperandTypes.NUMERIC, SqlFunctionCategory.USER_DEFINED_FUNCTION);
// we create FILTER (sq_count_check(count())) instead of PROJECT because RelFieldTrimmer
// ends up getting rid of Project since it is not used further up the tree
// sq_count_check returns true when subquery returns single row, else it fails
builder.filter(builder.call(countCheck, builder.field("cnt")));
if (!variablesSet.isEmpty()) {
builder.join(JoinRelType.LEFT, builder.literal(true), variablesSet);
} else {
builder.join(JoinRelType.INNER, builder.literal(true), variablesSet);
}
offset++;
}
if (isCorrScalarAgg) {
// Transformation :
// Outer Query Left Join (inner query) on correlated predicate
// and preserve rows only from left side.
builder.push(e.rel);
final List<RexNode> parentQueryFields = new ArrayList<>();
parentQueryFields.addAll(builder.fields());
// id is appended since there could be multiple scalar subqueries and FILTER
// is created using field name
String indicator = "trueLiteral";
parentQueryFields.add(builder.alias(builder.literal(true), indicator));
builder.project(parentQueryFields);
builder.join(JoinRelType.LEFT, builder.literal(true), variablesSet);
final ImmutableList.Builder<RexNode> operands = ImmutableList.builder();
RexNode literal;
if (isAggZeroOnEmpty(e)) {
// since count has a return type of BIG INT we need to make a literal of type big int
// relbuilder's literal doesn't allow this
literal = e.rel.getCluster().getRexBuilder().makeBigintLiteral(new BigDecimal(0));
} else {
literal = e.rel.getCluster().getRexBuilder().makeNullLiteral(getAggTypeForScalarSub(e));
}
operands.add((builder.isNull(builder.field(indicator))), literal);
operands.add(field(builder, 1, builder.fields().size() - 2));
return builder.call(SqlStdOperatorTable.CASE, operands.build());
}
// Transformation is to left join for correlated predicates and inner join otherwise,
// but do a count on inner side before that to make sure it generates atmost 1 row.
builder.push(e.rel);
builder.join(JoinRelType.LEFT, builder.literal(true), variablesSet);
return field(builder, inputCount, offset);
}
use of org.apache.calcite.sql.SqlFunction in project hive by apache.
the class DataSketchesFunctions method registerAsHiveFunction.
private void registerAsHiveFunction(SketchFunctionDescriptor sfd) {
if (sfd != null && sfd.getReturnRelDataType().isPresent()) {
SqlFunction cdfFn = new HiveSqlFunction(sfd.name, SqlKind.OTHER_FUNCTION, ReturnTypes.explicit(sfd.getReturnRelDataType().get()), InferTypes.ANY_NULLABLE, OperandTypes.family(), SqlFunctionCategory.USER_DEFINED_FUNCTION, true, false);
sfd.setCalciteFunction(cdfFn);
}
}
use of org.apache.calcite.sql.SqlFunction in project hive by apache.
the class DruidSqlOperatorConverter method getDefaultMap.
public static final Map<SqlOperator, org.apache.calcite.adapter.druid.DruidSqlOperatorConverter> getDefaultMap() {
if (druidOperatorMap == null) {
druidOperatorMap = new HashMap<SqlOperator, org.apache.calcite.adapter.druid.DruidSqlOperatorConverter>();
DruidQuery.DEFAULT_OPERATORS_LIST.stream().forEach(op -> druidOperatorMap.put(op.calciteOperator(), op));
// Override Hive specific operators
druidOperatorMap.putAll(Maps.asMap(HiveFloorDate.ALL_FUNCTIONS, (Function<SqlFunction, org.apache.calcite.adapter.druid.DruidSqlOperatorConverter>) input -> new FloorOperatorConversion()));
druidOperatorMap.putAll(Maps.asMap(HiveExtractDate.ALL_FUNCTIONS, (Function<SqlFunction, org.apache.calcite.adapter.druid.DruidSqlOperatorConverter>) input -> new ExtractOperatorConversion()));
druidOperatorMap.put(HiveConcat.INSTANCE, new DirectOperatorConversion(HiveConcat.INSTANCE, "concat"));
druidOperatorMap.put(SqlStdOperatorTable.SUBSTRING, new DruidSqlOperatorConverter.DruidSubstringOperatorConversion());
druidOperatorMap.put(SqlStdOperatorTable.IS_NULL, new UnaryFunctionOperatorConversion(SqlStdOperatorTable.IS_NULL, "isnull"));
druidOperatorMap.put(SqlStdOperatorTable.IS_NOT_NULL, new UnaryFunctionOperatorConversion(SqlStdOperatorTable.IS_NOT_NULL, "notnull"));
druidOperatorMap.put(HiveTruncSqlOperator.INSTANCE, new DruidDateTruncOperatorConversion());
druidOperatorMap.put(HiveToDateSqlOperator.INSTANCE, new DruidToDateOperatorConversion());
druidOperatorMap.put(HiveFromUnixTimeSqlOperator.INSTANCE, new DruidFormUnixTimeOperatorConversion());
druidOperatorMap.put(HiveToUnixTimestampSqlOperator.INSTANCE, new DruidUnixTimestampOperatorConversion());
druidOperatorMap.put(HiveDateAddSqlOperator.INSTANCE, new DruidDateArithmeticOperatorConversion(1, HiveDateAddSqlOperator.INSTANCE));
druidOperatorMap.put(HiveDateSubSqlOperator.INSTANCE, new DruidDateArithmeticOperatorConversion(-1, HiveDateSubSqlOperator.INSTANCE));
}
return druidOperatorMap;
}
Aggregations