Search in sources :

Example 61 with LogicalType

use of org.apache.flink.table.types.logical.LogicalType in project flink by apache.

the class AndArgumentTypeStrategy method inferArgumentType.

@Override
public Optional<DataType> inferArgumentType(CallContext callContext, int argumentPos, boolean throwOnFailure) {
    final DataType actualDataType = callContext.getArgumentDataTypes().get(argumentPos);
    final LogicalType actualType = actualDataType.getLogicalType();
    Optional<DataType> closestDataType = Optional.empty();
    for (ArgumentTypeStrategy strategy : argumentStrategies) {
        final Optional<DataType> inferredDataType = strategy.inferArgumentType(callContext, argumentPos, throwOnFailure);
        // argument type does not match at all
        if (!inferredDataType.isPresent()) {
            return Optional.empty();
        }
        final LogicalType inferredType = inferredDataType.get().getLogicalType();
        // a more specific, casted argument type is available
        if (!supportsAvoidingCast(actualType, inferredType) && !closestDataType.isPresent()) {
            closestDataType = inferredDataType;
        }
    }
    if (closestDataType.isPresent()) {
        return closestDataType;
    }
    return Optional.of(actualDataType);
}
Also used : DataType(org.apache.flink.table.types.DataType) LogicalType(org.apache.flink.table.types.logical.LogicalType) ArgumentTypeStrategy(org.apache.flink.table.types.inference.ArgumentTypeStrategy)

Example 62 with LogicalType

use of org.apache.flink.table.types.logical.LogicalType in project flink by apache.

the class DynamicSinkUtils method validateAndApplyMetadata.

private static void validateAndApplyMetadata(String tableDebugName, DynamicTableSink sink, ResolvedSchema schema, List<SinkAbilitySpec> sinkAbilitySpecs) {
    final List<Column> columns = schema.getColumns();
    final List<Integer> metadataColumns = extractPersistedMetadataColumns(schema);
    if (metadataColumns.isEmpty()) {
        return;
    }
    if (!(sink instanceof SupportsWritingMetadata)) {
        throw new ValidationException(String.format("Table '%s' declares persistable metadata columns, but the underlying %s " + "doesn't implement the %s interface. If the column should not " + "be persisted, it can be declared with the VIRTUAL keyword.", tableDebugName, DynamicTableSink.class.getSimpleName(), SupportsWritingMetadata.class.getSimpleName()));
    }
    final Map<String, DataType> metadataMap = ((SupportsWritingMetadata) sink).listWritableMetadata();
    metadataColumns.forEach(pos -> {
        final MetadataColumn metadataColumn = (MetadataColumn) columns.get(pos);
        final String metadataKey = metadataColumn.getMetadataKey().orElse(metadataColumn.getName());
        final LogicalType metadataType = metadataColumn.getDataType().getLogicalType();
        final DataType expectedMetadataDataType = metadataMap.get(metadataKey);
        // check that metadata key is valid
        if (expectedMetadataDataType == null) {
            throw new ValidationException(String.format("Invalid metadata key '%s' in column '%s' of table '%s'. " + "The %s class '%s' supports the following metadata keys for writing:\n%s", metadataKey, metadataColumn.getName(), tableDebugName, DynamicTableSink.class.getSimpleName(), sink.getClass().getName(), String.join("\n", metadataMap.keySet())));
        }
        // check that types are compatible
        if (!supportsExplicitCast(metadataType, expectedMetadataDataType.getLogicalType())) {
            if (metadataKey.equals(metadataColumn.getName())) {
                throw new ValidationException(String.format("Invalid data type for metadata column '%s' of table '%s'. " + "The column cannot be declared as '%s' because the type must be " + "castable to metadata type '%s'.", metadataColumn.getName(), tableDebugName, metadataType, expectedMetadataDataType.getLogicalType()));
            } else {
                throw new ValidationException(String.format("Invalid data type for metadata column '%s' with metadata key '%s' of table '%s'. " + "The column cannot be declared as '%s' because the type must be " + "castable to metadata type '%s'.", metadataColumn.getName(), metadataKey, tableDebugName, metadataType, expectedMetadataDataType.getLogicalType()));
            }
        }
    });
    sinkAbilitySpecs.add(new WritingMetadataSpec(createRequiredMetadataKeys(schema, sink), createConsumedType(schema, sink)));
}
Also used : WritingMetadataSpec(org.apache.flink.table.planner.plan.abilities.sink.WritingMetadataSpec) MetadataColumn(org.apache.flink.table.catalog.Column.MetadataColumn) ValidationException(org.apache.flink.table.api.ValidationException) MetadataColumn(org.apache.flink.table.catalog.Column.MetadataColumn) Column(org.apache.flink.table.catalog.Column) SupportsWritingMetadata(org.apache.flink.table.connector.sink.abilities.SupportsWritingMetadata) DataType(org.apache.flink.table.types.DataType) RelDataType(org.apache.calcite.rel.type.RelDataType) LogicalType(org.apache.flink.table.types.logical.LogicalType)

Example 63 with LogicalType

use of org.apache.flink.table.types.logical.LogicalType in project flink by apache.

the class DynamicSinkUtils method validateSchemaAndApplyImplicitCast.

/**
 * Checks if the given query can be written into the given sink's table schema.
 *
 * <p>It checks whether field types are compatible (types should be equal including precisions).
 * If types are not compatible, but can be implicitly cast, a cast projection will be applied.
 * Otherwise, an exception will be thrown.
 */
public static RelNode validateSchemaAndApplyImplicitCast(RelNode query, ResolvedSchema sinkSchema, String tableDebugName, DataTypeFactory dataTypeFactory, FlinkTypeFactory typeFactory) {
    final RowType queryType = FlinkTypeFactory.toLogicalRowType(query.getRowType());
    final List<RowField> queryFields = queryType.getFields();
    final RowType sinkType = (RowType) fixSinkDataType(dataTypeFactory, sinkSchema.toSinkRowDataType()).getLogicalType();
    final List<RowField> sinkFields = sinkType.getFields();
    if (queryFields.size() != sinkFields.size()) {
        throw createSchemaMismatchException("Different number of columns.", tableDebugName, queryFields, sinkFields);
    }
    boolean requiresCasting = false;
    for (int i = 0; i < sinkFields.size(); i++) {
        final LogicalType queryColumnType = queryFields.get(i).getType();
        final LogicalType sinkColumnType = sinkFields.get(i).getType();
        if (!supportsImplicitCast(queryColumnType, sinkColumnType)) {
            throw createSchemaMismatchException(String.format("Incompatible types for sink column '%s' at position %s.", sinkFields.get(i).getName(), i), tableDebugName, queryFields, sinkFields);
        }
        if (!supportsAvoidingCast(queryColumnType, sinkColumnType)) {
            requiresCasting = true;
        }
    }
    if (requiresCasting) {
        final RelDataType castRelDataType = typeFactory.buildRelNodeRowType(sinkType);
        return RelOptUtil.createCastRel(query, castRelDataType, true);
    }
    return query;
}
Also used : RowField(org.apache.flink.table.types.logical.RowType.RowField) RowType(org.apache.flink.table.types.logical.RowType) LogicalType(org.apache.flink.table.types.logical.LogicalType) RelDataType(org.apache.calcite.rel.type.RelDataType) RelHint(org.apache.calcite.rel.hint.RelHint)

Example 64 with LogicalType

use of org.apache.flink.table.types.logical.LogicalType in project flink by apache.

the class ExternalDynamicSink method getSinkRuntimeProvider.

@Override
public SinkRuntimeProvider getSinkRuntimeProvider(Context context) {
    final DynamicTableSink.DataStructureConverter physicalConverter = context.createDataStructureConverter(physicalDataType);
    return (TransformationSinkProvider) transformationContext -> {
        final Transformation<RowData> input = transformationContext.getInputTransformation();
        final LogicalType physicalType = physicalDataType.getLogicalType();
        final RowData.FieldGetter atomicFieldGetter;
        if (LogicalTypeChecks.isCompositeType(physicalType)) {
            atomicFieldGetter = null;
        } else {
            atomicFieldGetter = RowData.createFieldGetter(physicalType, 0);
        }
        TransformationMetadata transformationMeta = transformationContext.generateUid(EXTERNAL_DATASTREAM_TRANSFORMATION).map(uid -> new TransformationMetadata(uid, generateOperatorName(), generateOperatorDesc())).orElseGet(() -> new TransformationMetadata(generateOperatorName(), generateOperatorDesc()));
        return ExecNodeUtil.createOneInputTransformation(input, transformationMeta, new OutputConversionOperator(atomicFieldGetter, physicalConverter, transformationContext.getRowtimeIndex(), consumeRowtimeMetadata), ExternalTypeInfo.of(physicalDataType), input.getParallelism());
    };
}
Also used : TransformationMetadata(org.apache.flink.table.planner.plan.nodes.exec.utils.TransformationMetadata) DataType(org.apache.flink.table.types.DataType) OutputConversionOperator(org.apache.flink.table.runtime.operators.sink.OutputConversionOperator) RowData(org.apache.flink.table.data.RowData) DynamicTableSink(org.apache.flink.table.connector.sink.DynamicTableSink) ChangelogMode(org.apache.flink.table.connector.ChangelogMode) SupportsWritingMetadata(org.apache.flink.table.connector.sink.abilities.SupportsWritingMetadata) DataTypes(org.apache.flink.table.api.DataTypes) ExternalTypeInfo(org.apache.flink.table.runtime.typeutils.ExternalTypeInfo) DataStream(org.apache.flink.streaming.api.datastream.DataStream) ExecNodeUtil(org.apache.flink.table.planner.plan.nodes.exec.utils.ExecNodeUtil) List(java.util.List) LogicalType(org.apache.flink.table.types.logical.LogicalType) Map(java.util.Map) Internal(org.apache.flink.annotation.Internal) Transformation(org.apache.flink.api.dag.Transformation) Collections(java.util.Collections) Nullable(javax.annotation.Nullable) LogicalTypeChecks(org.apache.flink.table.types.logical.utils.LogicalTypeChecks) RowData(org.apache.flink.table.data.RowData) TransformationMetadata(org.apache.flink.table.planner.plan.nodes.exec.utils.TransformationMetadata) OutputConversionOperator(org.apache.flink.table.runtime.operators.sink.OutputConversionOperator) LogicalType(org.apache.flink.table.types.logical.LogicalType) DynamicTableSink(org.apache.flink.table.connector.sink.DynamicTableSink)

Example 65 with LogicalType

use of org.apache.flink.table.types.logical.LogicalType in project flink by apache.

the class CatalogSchemaTable method getRowType.

@Override
public RelDataType getRowType(RelDataTypeFactory typeFactory) {
    final FlinkTypeFactory flinkTypeFactory = (FlinkTypeFactory) typeFactory;
    final ResolvedSchema schema = contextResolvedTable.getResolvedSchema();
    // The following block is a workaround to support tables defined by
    // TableEnvironment.connect() and
    // the actual table sources implement DefinedProctimeAttribute/DefinedRowtimeAttributes.
    // It should be removed after we remove DefinedProctimeAttribute/DefinedRowtimeAttributes.
    Optional<TableSource<?>> sourceOpt = findAndCreateTableSource();
    if (isStreamingMode && sourceOpt.isPresent() && schema.getColumns().stream().allMatch(Column::isPhysical) && schema.getWatermarkSpecs().isEmpty()) {
        TableSchema tableSchema = TableSchema.fromResolvedSchema(schema);
        TableSource<?> source = sourceOpt.get();
        if (TableSourceValidation.hasProctimeAttribute(source) || TableSourceValidation.hasRowtimeAttribute(source)) {
            // If the table is defined by TableEnvironment.connect(), and use the legacy
            // proctime and rowtime
            // descriptors, the TableSchema should fallback to
            // ConnectorCatalogTable#calculateSourceSchema
            tableSchema = ConnectorCatalogTable.calculateSourceSchema(source, false);
        }
        return TableSourceUtil.getSourceRowType(flinkTypeFactory, tableSchema, scala.Option.empty(), true);
    }
    final List<String> fieldNames = schema.getColumnNames();
    final List<LogicalType> fieldTypes = schema.getColumnDataTypes().stream().map(DataType::getLogicalType).map(PlannerTypeUtils::removeLegacyTypes).collect(Collectors.toList());
    return flinkTypeFactory.buildRelNodeRowType(fieldNames, fieldTypes);
}
Also used : TableSource(org.apache.flink.table.sources.TableSource) StreamTableSource(org.apache.flink.table.sources.StreamTableSource) FlinkTypeFactory(org.apache.flink.table.planner.calcite.FlinkTypeFactory) TableSchema(org.apache.flink.table.api.TableSchema) Column(org.apache.flink.table.catalog.Column) LogicalType(org.apache.flink.table.types.logical.LogicalType) DataType(org.apache.flink.table.types.DataType) RelDataType(org.apache.calcite.rel.type.RelDataType) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema)

Aggregations

LogicalType (org.apache.flink.table.types.logical.LogicalType)192 DataType (org.apache.flink.table.types.DataType)53 RowType (org.apache.flink.table.types.logical.RowType)53 RowData (org.apache.flink.table.data.RowData)45 List (java.util.List)29 ArrayList (java.util.ArrayList)28 TableException (org.apache.flink.table.api.TableException)25 TimestampType (org.apache.flink.table.types.logical.TimestampType)25 Internal (org.apache.flink.annotation.Internal)21 IntType (org.apache.flink.table.types.logical.IntType)21 Map (java.util.Map)20 ValidationException (org.apache.flink.table.api.ValidationException)20 ArrayType (org.apache.flink.table.types.logical.ArrayType)19 DecimalType (org.apache.flink.table.types.logical.DecimalType)19 LocalZonedTimestampType (org.apache.flink.table.types.logical.LocalZonedTimestampType)17 Test (org.junit.Test)17 BigIntType (org.apache.flink.table.types.logical.BigIntType)16 LegacyTypeInformationType (org.apache.flink.table.types.logical.LegacyTypeInformationType)16 GenericRowData (org.apache.flink.table.data.GenericRowData)15 Arrays (java.util.Arrays)14