use of org.apache.flink.table.types.logical.LogicalType in project flink by apache.
the class AndArgumentTypeStrategy method inferArgumentType.
@Override
public Optional<DataType> inferArgumentType(CallContext callContext, int argumentPos, boolean throwOnFailure) {
final DataType actualDataType = callContext.getArgumentDataTypes().get(argumentPos);
final LogicalType actualType = actualDataType.getLogicalType();
Optional<DataType> closestDataType = Optional.empty();
for (ArgumentTypeStrategy strategy : argumentStrategies) {
final Optional<DataType> inferredDataType = strategy.inferArgumentType(callContext, argumentPos, throwOnFailure);
// argument type does not match at all
if (!inferredDataType.isPresent()) {
return Optional.empty();
}
final LogicalType inferredType = inferredDataType.get().getLogicalType();
// a more specific, casted argument type is available
if (!supportsAvoidingCast(actualType, inferredType) && !closestDataType.isPresent()) {
closestDataType = inferredDataType;
}
}
if (closestDataType.isPresent()) {
return closestDataType;
}
return Optional.of(actualDataType);
}
use of org.apache.flink.table.types.logical.LogicalType in project flink by apache.
the class DynamicSinkUtils method validateAndApplyMetadata.
private static void validateAndApplyMetadata(String tableDebugName, DynamicTableSink sink, ResolvedSchema schema, List<SinkAbilitySpec> sinkAbilitySpecs) {
final List<Column> columns = schema.getColumns();
final List<Integer> metadataColumns = extractPersistedMetadataColumns(schema);
if (metadataColumns.isEmpty()) {
return;
}
if (!(sink instanceof SupportsWritingMetadata)) {
throw new ValidationException(String.format("Table '%s' declares persistable metadata columns, but the underlying %s " + "doesn't implement the %s interface. If the column should not " + "be persisted, it can be declared with the VIRTUAL keyword.", tableDebugName, DynamicTableSink.class.getSimpleName(), SupportsWritingMetadata.class.getSimpleName()));
}
final Map<String, DataType> metadataMap = ((SupportsWritingMetadata) sink).listWritableMetadata();
metadataColumns.forEach(pos -> {
final MetadataColumn metadataColumn = (MetadataColumn) columns.get(pos);
final String metadataKey = metadataColumn.getMetadataKey().orElse(metadataColumn.getName());
final LogicalType metadataType = metadataColumn.getDataType().getLogicalType();
final DataType expectedMetadataDataType = metadataMap.get(metadataKey);
// check that metadata key is valid
if (expectedMetadataDataType == null) {
throw new ValidationException(String.format("Invalid metadata key '%s' in column '%s' of table '%s'. " + "The %s class '%s' supports the following metadata keys for writing:\n%s", metadataKey, metadataColumn.getName(), tableDebugName, DynamicTableSink.class.getSimpleName(), sink.getClass().getName(), String.join("\n", metadataMap.keySet())));
}
// check that types are compatible
if (!supportsExplicitCast(metadataType, expectedMetadataDataType.getLogicalType())) {
if (metadataKey.equals(metadataColumn.getName())) {
throw new ValidationException(String.format("Invalid data type for metadata column '%s' of table '%s'. " + "The column cannot be declared as '%s' because the type must be " + "castable to metadata type '%s'.", metadataColumn.getName(), tableDebugName, metadataType, expectedMetadataDataType.getLogicalType()));
} else {
throw new ValidationException(String.format("Invalid data type for metadata column '%s' with metadata key '%s' of table '%s'. " + "The column cannot be declared as '%s' because the type must be " + "castable to metadata type '%s'.", metadataColumn.getName(), metadataKey, tableDebugName, metadataType, expectedMetadataDataType.getLogicalType()));
}
}
});
sinkAbilitySpecs.add(new WritingMetadataSpec(createRequiredMetadataKeys(schema, sink), createConsumedType(schema, sink)));
}
use of org.apache.flink.table.types.logical.LogicalType in project flink by apache.
the class DynamicSinkUtils method validateSchemaAndApplyImplicitCast.
/**
* Checks if the given query can be written into the given sink's table schema.
*
* <p>It checks whether field types are compatible (types should be equal including precisions).
* If types are not compatible, but can be implicitly cast, a cast projection will be applied.
* Otherwise, an exception will be thrown.
*/
public static RelNode validateSchemaAndApplyImplicitCast(RelNode query, ResolvedSchema sinkSchema, String tableDebugName, DataTypeFactory dataTypeFactory, FlinkTypeFactory typeFactory) {
final RowType queryType = FlinkTypeFactory.toLogicalRowType(query.getRowType());
final List<RowField> queryFields = queryType.getFields();
final RowType sinkType = (RowType) fixSinkDataType(dataTypeFactory, sinkSchema.toSinkRowDataType()).getLogicalType();
final List<RowField> sinkFields = sinkType.getFields();
if (queryFields.size() != sinkFields.size()) {
throw createSchemaMismatchException("Different number of columns.", tableDebugName, queryFields, sinkFields);
}
boolean requiresCasting = false;
for (int i = 0; i < sinkFields.size(); i++) {
final LogicalType queryColumnType = queryFields.get(i).getType();
final LogicalType sinkColumnType = sinkFields.get(i).getType();
if (!supportsImplicitCast(queryColumnType, sinkColumnType)) {
throw createSchemaMismatchException(String.format("Incompatible types for sink column '%s' at position %s.", sinkFields.get(i).getName(), i), tableDebugName, queryFields, sinkFields);
}
if (!supportsAvoidingCast(queryColumnType, sinkColumnType)) {
requiresCasting = true;
}
}
if (requiresCasting) {
final RelDataType castRelDataType = typeFactory.buildRelNodeRowType(sinkType);
return RelOptUtil.createCastRel(query, castRelDataType, true);
}
return query;
}
use of org.apache.flink.table.types.logical.LogicalType in project flink by apache.
the class ExternalDynamicSink method getSinkRuntimeProvider.
@Override
public SinkRuntimeProvider getSinkRuntimeProvider(Context context) {
final DynamicTableSink.DataStructureConverter physicalConverter = context.createDataStructureConverter(physicalDataType);
return (TransformationSinkProvider) transformationContext -> {
final Transformation<RowData> input = transformationContext.getInputTransformation();
final LogicalType physicalType = physicalDataType.getLogicalType();
final RowData.FieldGetter atomicFieldGetter;
if (LogicalTypeChecks.isCompositeType(physicalType)) {
atomicFieldGetter = null;
} else {
atomicFieldGetter = RowData.createFieldGetter(physicalType, 0);
}
TransformationMetadata transformationMeta = transformationContext.generateUid(EXTERNAL_DATASTREAM_TRANSFORMATION).map(uid -> new TransformationMetadata(uid, generateOperatorName(), generateOperatorDesc())).orElseGet(() -> new TransformationMetadata(generateOperatorName(), generateOperatorDesc()));
return ExecNodeUtil.createOneInputTransformation(input, transformationMeta, new OutputConversionOperator(atomicFieldGetter, physicalConverter, transformationContext.getRowtimeIndex(), consumeRowtimeMetadata), ExternalTypeInfo.of(physicalDataType), input.getParallelism());
};
}
use of org.apache.flink.table.types.logical.LogicalType in project flink by apache.
the class CatalogSchemaTable method getRowType.
@Override
public RelDataType getRowType(RelDataTypeFactory typeFactory) {
final FlinkTypeFactory flinkTypeFactory = (FlinkTypeFactory) typeFactory;
final ResolvedSchema schema = contextResolvedTable.getResolvedSchema();
// The following block is a workaround to support tables defined by
// TableEnvironment.connect() and
// the actual table sources implement DefinedProctimeAttribute/DefinedRowtimeAttributes.
// It should be removed after we remove DefinedProctimeAttribute/DefinedRowtimeAttributes.
Optional<TableSource<?>> sourceOpt = findAndCreateTableSource();
if (isStreamingMode && sourceOpt.isPresent() && schema.getColumns().stream().allMatch(Column::isPhysical) && schema.getWatermarkSpecs().isEmpty()) {
TableSchema tableSchema = TableSchema.fromResolvedSchema(schema);
TableSource<?> source = sourceOpt.get();
if (TableSourceValidation.hasProctimeAttribute(source) || TableSourceValidation.hasRowtimeAttribute(source)) {
// If the table is defined by TableEnvironment.connect(), and use the legacy
// proctime and rowtime
// descriptors, the TableSchema should fallback to
// ConnectorCatalogTable#calculateSourceSchema
tableSchema = ConnectorCatalogTable.calculateSourceSchema(source, false);
}
return TableSourceUtil.getSourceRowType(flinkTypeFactory, tableSchema, scala.Option.empty(), true);
}
final List<String> fieldNames = schema.getColumnNames();
final List<LogicalType> fieldTypes = schema.getColumnDataTypes().stream().map(DataType::getLogicalType).map(PlannerTypeUtils::removeLegacyTypes).collect(Collectors.toList());
return flinkTypeFactory.buildRelNodeRowType(fieldNames, fieldTypes);
}
Aggregations