use of io.confluent.ksql.schema.ksql.types.SqlType in project ksql by confluentinc.
the class TermCompiler method visitSubscriptExpression.
@Override
public Term visitSubscriptExpression(final SubscriptExpression node, final Context context) {
final SqlType internalSchema = expressionTypeManager.getExpressionSqlType(node.getBase(), context.getLambdaSqlTypeMapping());
switch(internalSchema.baseType()) {
case ARRAY:
final SqlArray array = (SqlArray) internalSchema;
final Term listTerm = process(node.getBase(), context);
final Term indexTerm = process(node.getIndex(), context);
return new SubscriptTerm(listTerm, indexTerm, (o, index) -> ArrayAccess.arrayAccess(((List<?>) o), (Integer) index), array.getItemType());
case MAP:
final SqlMap mapSchema = (SqlMap) internalSchema;
final Term mapTerm = process(node.getBase(), context);
final Term keyTerm = process(node.getIndex(), context);
return new SubscriptTerm(mapTerm, keyTerm, (map, key) -> ((Map<?, ?>) map).get(key), mapSchema.getValueType());
default:
throw new UnsupportedOperationException();
}
}
use of io.confluent.ksql.schema.ksql.types.SqlType in project ksql by confluentinc.
the class TermCompiler method visitCast.
@Override
public Term visitCast(final Cast node, final Context context) {
final Term term = process(node.getExpression(), context);
final SqlType from = term.getSqlType();
final SqlType to = node.getType().getSqlType();
return CastInterpreter.cast(term, from, to, ksqlConfig);
}
use of io.confluent.ksql.schema.ksql.types.SqlType in project ksql by confluentinc.
the class CastEvaluator method castStructToStruct.
@SuppressWarnings("OptionalGetWithoutIsPresent")
private static String castStructToStruct(final String innerCode, final SqlType from, final SqlType to, final KsqlConfig config) {
final SqlStruct fromStruct = (SqlStruct) from;
final SqlStruct toStruct = (SqlStruct) to;
try {
final String mappers = fromStruct.fields().stream().filter(fromField -> toStruct.field(fromField.name()).isPresent()).map(fromField -> castFieldToField(fromField, toStruct.field(fromField.name()).get(), config)).collect(Collectors.joining(System.lineSeparator(), "ImmutableMap.builder()\n\t\t", "\n\t\t.build()"));
// Inefficient, but only way to pass type until SqlToJavaVisitor supports passing
// additional parameters to the generated code. Hopefully, JVM optimises this away.
final String schemaCode = "SchemaConverters.sqlToConnectConverter()" + ".toConnectSchema(" + SqlTypeCodeGen.generateCode(toStruct) + ")";
return "CastEvaluator.castStruct(" + innerCode + ", " + mappers + "," + schemaCode + ")";
} catch (final UnsupportedCastException e) {
throw new UnsupportedCastException(from, to, e);
}
}
use of io.confluent.ksql.schema.ksql.types.SqlType in project ksql by confluentinc.
the class FunctionLoaderUtils method handleUdfReturnSchema.
// CHECKSTYLE_RULES.OFF: CyclomaticComplexity
static SchemaProvider handleUdfReturnSchema(final Class theClass, final ParamType javaReturnSchema, final String annotationSchema, final SqlTypeParser parser, final String schemaProviderFunctionName, final String functionName, final boolean isVariadic) {
// CHECKSTYLE_RULES.ON: CyclomaticComplexity
final Function<List<SqlArgument>, SqlType> schemaProvider;
if (!Udf.NO_SCHEMA_PROVIDER.equals(schemaProviderFunctionName)) {
schemaProvider = handleUdfSchemaProviderAnnotation(schemaProviderFunctionName, theClass, functionName);
} else if (!Udf.NO_SCHEMA.equals(annotationSchema)) {
final SqlType sqlType = parser.parse(annotationSchema).getSqlType();
schemaProvider = args -> sqlType;
} else if (!GenericsUtil.hasGenerics(javaReturnSchema)) {
// it is important to do this eagerly and not in the lambda so that
// we can fail early (when loading the UDF) instead of when the user
// attempts to use the UDF
final SqlType sqlType = fromJavaType(javaReturnSchema, functionName);
schemaProvider = args -> sqlType;
} else {
schemaProvider = null;
}
return (parameters, arguments) -> {
if (schemaProvider != null) {
final SqlType returnType = schemaProvider.apply(arguments);
if (!(ParamTypes.areCompatible(SqlArgument.of(returnType), javaReturnSchema, false))) {
throw new KsqlException(String.format("Return type %s of UDF %s does not match the declared " + "return type %s.", returnType, functionName.toUpperCase(), SchemaConverters.functionToSqlConverter().toSqlType(javaReturnSchema)));
}
return returnType;
}
final Map<GenericType, SqlType> genericMapping = new HashMap<>();
for (int i = 0; i < Math.min(parameters.size(), arguments.size()); i++) {
final ParamType schema = parameters.get(i);
if (schema instanceof LambdaType) {
if (isVariadic && i == parameters.size() - 1) {
throw new KsqlException(String.format("Lambda function %s cannot be variadic.", arguments.get(i).toString()));
}
genericMapping.putAll(GenericsUtil.reserveGenerics(schema, arguments.get(i)));
} else {
// we resolve any variadic as if it were an array so that the type
// structure matches the input type
final SqlType instance = isVariadic && i == parameters.size() - 1 ? SqlTypes.array(arguments.get(i).getSqlTypeOrThrow()) : arguments.get(i).getSqlTypeOrThrow();
genericMapping.putAll(GenericsUtil.reserveGenerics(schema, SqlArgument.of(instance)));
}
}
return GenericsUtil.applyResolved(javaReturnSchema, genericMapping);
};
}
use of io.confluent.ksql.schema.ksql.types.SqlType in project ksql by confluentinc.
the class MetaStoreFixture method getNewMetaStore.
public static MutableMetaStore getNewMetaStore(final FunctionRegistry functionRegistry, final ValueFormat valueFormat) {
final MutableMetaStore metaStore = new MetaStoreImpl(functionRegistry);
final KeyFormat keyFormat = KeyFormat.nonWindowed(FormatInfo.of(FormatFactory.KAFKA.name()), SerdeFeatures.of());
final LogicalSchema test1Schema = LogicalSchema.builder().keyColumn(ColumnName.of("COL0"), SqlTypes.BIGINT).valueColumn(ColumnName.of("COL1"), SqlTypes.STRING).valueColumn(ColumnName.of("COL2"), SqlTypes.STRING).valueColumn(ColumnName.of("COL3"), SqlTypes.DOUBLE).valueColumn(ColumnName.of("COL4"), SqlTypes.array(SqlTypes.DOUBLE)).valueColumn(ColumnName.of("COL5"), SqlTypes.map(SqlTypes.STRING, SqlTypes.DOUBLE)).headerColumn(ColumnName.of("HEAD"), Optional.empty()).build();
final KsqlTopic ksqlTopic0 = new KsqlTopic("test0", keyFormat, valueFormat);
final KsqlStream<?> ksqlStream0 = new KsqlStream<>("sqlexpression", SourceName.of("TEST0"), test1Schema, Optional.empty(), false, ksqlTopic0, false);
metaStore.putSource(ksqlStream0, false);
final KsqlTopic ksqlTopic1 = new KsqlTopic("test1", keyFormat, valueFormat);
final KsqlStream<?> ksqlStream1 = new KsqlStream<>("sqlexpression", SourceName.of("TEST1"), test1Schema, Optional.empty(), false, ksqlTopic1, false);
metaStore.putSource(ksqlStream1, false);
final LogicalSchema test2Schema = LogicalSchema.builder().keyColumn(ColumnName.of("COL0"), SqlTypes.BIGINT).valueColumn(ColumnName.of("COL1"), SqlTypes.STRING).valueColumn(ColumnName.of("COL2"), SqlTypes.STRING).valueColumn(ColumnName.of("COL3"), SqlTypes.DOUBLE).valueColumn(ColumnName.of("COL4"), SqlTypes.BOOLEAN).build();
final KsqlTopic ksqlTopic2 = new KsqlTopic("test2", keyFormat, valueFormat);
final KsqlTable<String> ksqlTable = new KsqlTable<>("sqlexpression", SourceName.of("TEST2"), test2Schema, Optional.empty(), false, ksqlTopic2, false);
metaStore.putSource(ksqlTable, false);
final SqlType addressSchema = SqlTypes.struct().field("NUMBER", SqlTypes.BIGINT).field("STREET", SqlTypes.STRING).field("CITY", SqlTypes.STRING).field("STATE", SqlTypes.STRING).field("ZIPCODE", SqlTypes.BIGINT).build();
final SqlType categorySchema = SqlTypes.struct().field("ID", SqlTypes.BIGINT).field("NAME", SqlTypes.STRING).build();
final SqlType itemInfoSchema = SqlTypes.struct().field("ITEMID", SqlTypes.BIGINT).field("NAME", SqlTypes.STRING).field("CATEGORY", categorySchema).build();
final LogicalSchema ordersSchema = LogicalSchema.builder().keyColumn(ColumnName.of("ORDERTIME"), SqlTypes.BIGINT).valueColumn(ColumnName.of("ORDERID"), SqlTypes.BIGINT).valueColumn(ColumnName.of("ITEMID"), SqlTypes.STRING).valueColumn(ColumnName.of("ITEMINFO"), itemInfoSchema).valueColumn(ColumnName.of("ORDERUNITS"), SqlTypes.INTEGER).valueColumn(ColumnName.of("ARRAYCOL"), SqlTypes.array(SqlTypes.DOUBLE)).valueColumn(ColumnName.of("MAPCOL"), SqlTypes.map(SqlTypes.STRING, SqlTypes.DOUBLE)).valueColumn(ColumnName.of("ADDRESS"), addressSchema).valueColumn(ColumnName.of("TIMESTAMPCOL"), SqlTypes.TIMESTAMP).valueColumn(ColumnName.of("TIMECOL"), SqlTypes.TIME).valueColumn(ColumnName.of("DATECOL"), SqlTypes.DATE).valueColumn(ColumnName.of("BYTESCOL"), SqlTypes.BYTES).build();
final KsqlTopic ksqlTopicOrders = new KsqlTopic("orders_topic", keyFormat, valueFormat);
final KsqlStream<?> ksqlStreamOrders = new KsqlStream<>("sqlexpression", SourceName.of("ORDERS"), ordersSchema, Optional.empty(), false, ksqlTopicOrders, false);
metaStore.putSource(ksqlStreamOrders, false);
final LogicalSchema testTable3 = LogicalSchema.builder().keyColumn(ColumnName.of("COL0"), SqlTypes.BIGINT).valueColumn(ColumnName.of("COL1"), SqlTypes.STRING).valueColumn(ColumnName.of("COL2"), SqlTypes.STRING).valueColumn(ColumnName.of("COL3"), SqlTypes.DOUBLE).valueColumn(ColumnName.of("COL4"), SqlTypes.BOOLEAN).build();
final KsqlTopic ksqlTopic3 = new KsqlTopic("test3", keyFormat, valueFormat);
final KsqlTable<String> ksqlTable3 = new KsqlTable<>("sqlexpression", SourceName.of("TEST3"), testTable3, Optional.empty(), false, ksqlTopic3, false);
metaStore.putSource(ksqlTable3, false);
final SqlType nestedOrdersSchema = SqlTypes.struct().field("ORDERTIME", SqlTypes.BIGINT).field("ORDERID", SqlTypes.BIGINT).field("ITEMID", SqlTypes.STRING).field("ITEMINFO", itemInfoSchema).field("ORDERUNITS", SqlTypes.INTEGER).field("ARRAYCOL", SqlTypes.array(SqlTypes.DOUBLE)).field("MAPCOL", SqlTypes.map(SqlTypes.STRING, SqlTypes.DOUBLE)).field("ADDRESS", addressSchema).build();
final LogicalSchema nestedArrayStructMapSchema = LogicalSchema.builder().keyColumn(ColumnName.of("K"), SqlTypes.STRING).valueColumn(ColumnName.of("ARRAYCOL"), SqlTypes.array(itemInfoSchema)).valueColumn(ColumnName.of("MAPCOL"), SqlTypes.map(SqlTypes.STRING, itemInfoSchema)).valueColumn(ColumnName.of("NESTED_ORDER_COL"), nestedOrdersSchema).valueColumn(ColumnName.of("ITEM"), itemInfoSchema).build();
final KsqlTopic nestedArrayStructMapTopic = new KsqlTopic("NestedArrayStructMap_topic", keyFormat, valueFormat);
final KsqlStream<?> nestedArrayStructMapOrders = new KsqlStream<>("sqlexpression", SourceName.of("NESTED_STREAM"), nestedArrayStructMapSchema, Optional.empty(), false, nestedArrayStructMapTopic, false);
metaStore.putSource(nestedArrayStructMapOrders, false);
final KsqlTopic ksqlTopic4 = new KsqlTopic("test4", keyFormat, valueFormat);
final KsqlStream<?> ksqlStream4 = new KsqlStream<>("sqlexpression4", SourceName.of("TEST4"), test1Schema, Optional.empty(), false, ksqlTopic4, false);
metaStore.putSource(ksqlStream4, false);
final LogicalSchema sensorReadingsSchema = LogicalSchema.builder().keyColumn(ColumnName.of("ID"), SqlTypes.BIGINT).valueColumn(ColumnName.of("SENSOR_NAME"), SqlTypes.STRING).valueColumn(ColumnName.of("ARR1"), SqlTypes.array(SqlTypes.BIGINT)).valueColumn(ColumnName.of("ARR2"), SqlTypes.array(SqlTypes.STRING)).build();
final KsqlTopic ksqlTopicSensorReadings = new KsqlTopic("sensor_readings_topic", keyFormat, valueFormat);
final KsqlStream<?> ksqlStreamSensorReadings = new KsqlStream<>("sqlexpression", SourceName.of("SENSOR_READINGS"), sensorReadingsSchema, Optional.empty(), false, ksqlTopicSensorReadings, false);
metaStore.putSource(ksqlStreamSensorReadings, false);
final LogicalSchema testTable5 = LogicalSchema.builder().keyColumn(ColumnName.of("A"), SqlTypes.BOOLEAN).valueColumn(ColumnName.of("B"), SqlTypes.BOOLEAN).valueColumn(ColumnName.of("C"), SqlTypes.BOOLEAN).valueColumn(ColumnName.of("D"), SqlTypes.BOOLEAN).valueColumn(ColumnName.of("E"), SqlTypes.BOOLEAN).valueColumn(ColumnName.of("F"), SqlTypes.BOOLEAN).valueColumn(ColumnName.of("G"), SqlTypes.BOOLEAN).build();
final KsqlTopic ksqlTopic5 = new KsqlTopic("test5", keyFormat, valueFormat);
final KsqlTable<String> ksqlTable5 = new KsqlTable<>("sqlexpression", SourceName.of("TEST5"), testTable5, Optional.empty(), false, ksqlTopic5, false);
metaStore.putSource(ksqlTable5, false);
return metaStore;
}
Aggregations