Search in sources :

Example 1 with SupportsWritingMetadata

use of org.apache.flink.table.connector.sink.abilities.SupportsWritingMetadata in project flink by apache.

the class WritingMetadataSpec method apply.

@Override
public void apply(DynamicTableSink tableSink) {
    if (tableSink instanceof SupportsWritingMetadata) {
        DataType consumedDataType = TypeConversions.fromLogicalToDataType(consumedType);
        ((SupportsWritingMetadata) tableSink).applyWritableMetadata(metadataKeys, consumedDataType);
    } else {
        throw new TableException(String.format("%s does not support SupportsWritingMetadata.", tableSink.getClass().getName()));
    }
}
Also used : TableException(org.apache.flink.table.api.TableException) SupportsWritingMetadata(org.apache.flink.table.connector.sink.abilities.SupportsWritingMetadata) DataType(org.apache.flink.table.types.DataType)

Example 2 with SupportsWritingMetadata

use of org.apache.flink.table.connector.sink.abilities.SupportsWritingMetadata in project flink by apache.

the class DynamicSinkUtils method validateAndApplyMetadata.

private static void validateAndApplyMetadata(String tableDebugName, DynamicTableSink sink, ResolvedSchema schema, List<SinkAbilitySpec> sinkAbilitySpecs) {
    final List<Column> columns = schema.getColumns();
    final List<Integer> metadataColumns = extractPersistedMetadataColumns(schema);
    if (metadataColumns.isEmpty()) {
        return;
    }
    if (!(sink instanceof SupportsWritingMetadata)) {
        throw new ValidationException(String.format("Table '%s' declares persistable metadata columns, but the underlying %s " + "doesn't implement the %s interface. If the column should not " + "be persisted, it can be declared with the VIRTUAL keyword.", tableDebugName, DynamicTableSink.class.getSimpleName(), SupportsWritingMetadata.class.getSimpleName()));
    }
    final Map<String, DataType> metadataMap = ((SupportsWritingMetadata) sink).listWritableMetadata();
    metadataColumns.forEach(pos -> {
        final MetadataColumn metadataColumn = (MetadataColumn) columns.get(pos);
        final String metadataKey = metadataColumn.getMetadataKey().orElse(metadataColumn.getName());
        final LogicalType metadataType = metadataColumn.getDataType().getLogicalType();
        final DataType expectedMetadataDataType = metadataMap.get(metadataKey);
        // check that metadata key is valid
        if (expectedMetadataDataType == null) {
            throw new ValidationException(String.format("Invalid metadata key '%s' in column '%s' of table '%s'. " + "The %s class '%s' supports the following metadata keys for writing:\n%s", metadataKey, metadataColumn.getName(), tableDebugName, DynamicTableSink.class.getSimpleName(), sink.getClass().getName(), String.join("\n", metadataMap.keySet())));
        }
        // check that types are compatible
        if (!supportsExplicitCast(metadataType, expectedMetadataDataType.getLogicalType())) {
            if (metadataKey.equals(metadataColumn.getName())) {
                throw new ValidationException(String.format("Invalid data type for metadata column '%s' of table '%s'. " + "The column cannot be declared as '%s' because the type must be " + "castable to metadata type '%s'.", metadataColumn.getName(), tableDebugName, metadataType, expectedMetadataDataType.getLogicalType()));
            } else {
                throw new ValidationException(String.format("Invalid data type for metadata column '%s' with metadata key '%s' of table '%s'. " + "The column cannot be declared as '%s' because the type must be " + "castable to metadata type '%s'.", metadataColumn.getName(), metadataKey, tableDebugName, metadataType, expectedMetadataDataType.getLogicalType()));
            }
        }
    });
    sinkAbilitySpecs.add(new WritingMetadataSpec(createRequiredMetadataKeys(schema, sink), createConsumedType(schema, sink)));
}
Also used : WritingMetadataSpec(org.apache.flink.table.planner.plan.abilities.sink.WritingMetadataSpec) MetadataColumn(org.apache.flink.table.catalog.Column.MetadataColumn) ValidationException(org.apache.flink.table.api.ValidationException) MetadataColumn(org.apache.flink.table.catalog.Column.MetadataColumn) Column(org.apache.flink.table.catalog.Column) SupportsWritingMetadata(org.apache.flink.table.connector.sink.abilities.SupportsWritingMetadata) DataType(org.apache.flink.table.types.DataType) RelDataType(org.apache.calcite.rel.type.RelDataType) LogicalType(org.apache.flink.table.types.logical.LogicalType)

Example 3 with SupportsWritingMetadata

use of org.apache.flink.table.connector.sink.abilities.SupportsWritingMetadata in project flink by apache.

the class DynamicSinkUtils method pushMetadataProjection.

/**
 * Creates a projection that reorders physical and metadata columns according to the consumed
 * data type of the sink. It casts metadata columns into the expected data type.
 *
 * @see SupportsWritingMetadata
 */
private static void pushMetadataProjection(FlinkRelBuilder relBuilder, FlinkTypeFactory typeFactory, ResolvedSchema schema, DynamicTableSink sink) {
    final RexBuilder rexBuilder = relBuilder.getRexBuilder();
    final List<Column> columns = schema.getColumns();
    final List<Integer> physicalColumns = extractPhysicalColumns(schema);
    final Map<String, Integer> keyToMetadataColumn = extractPersistedMetadataColumns(schema).stream().collect(Collectors.toMap(pos -> {
        final MetadataColumn metadataColumn = (MetadataColumn) columns.get(pos);
        return metadataColumn.getMetadataKey().orElse(metadataColumn.getName());
    }, Function.identity()));
    final List<Integer> metadataColumns = createRequiredMetadataKeys(schema, sink).stream().map(keyToMetadataColumn::get).collect(Collectors.toList());
    final List<String> fieldNames = Stream.concat(physicalColumns.stream().map(columns::get).map(Column::getName), metadataColumns.stream().map(columns::get).map(MetadataColumn.class::cast).map(c -> c.getMetadataKey().orElse(c.getName()))).collect(Collectors.toList());
    final Map<String, DataType> metadataMap = extractMetadataMap(sink);
    final List<RexNode> fieldNodes = Stream.concat(physicalColumns.stream().map(pos -> {
        final int posAdjusted = adjustByVirtualColumns(columns, pos);
        return relBuilder.field(posAdjusted);
    }), metadataColumns.stream().map(pos -> {
        final MetadataColumn metadataColumn = (MetadataColumn) columns.get(pos);
        final String metadataKey = metadataColumn.getMetadataKey().orElse(metadataColumn.getName());
        final LogicalType expectedType = metadataMap.get(metadataKey).getLogicalType();
        final RelDataType expectedRelDataType = typeFactory.createFieldTypeFromLogicalType(expectedType);
        final int posAdjusted = adjustByVirtualColumns(columns, pos);
        return rexBuilder.makeAbstractCast(expectedRelDataType, relBuilder.field(posAdjusted));
    })).collect(Collectors.toList());
    relBuilder.projectNamed(fieldNodes, fieldNames, true);
}
Also used : DataType(org.apache.flink.table.types.DataType) Schema(org.apache.flink.table.api.Schema) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema) SupportsWritingMetadata(org.apache.flink.table.connector.sink.abilities.SupportsWritingMetadata) FlinkTypeFactory(org.apache.flink.table.planner.calcite.FlinkTypeFactory) TableConfigOptions(org.apache.flink.table.api.config.TableConfigOptions) CollectSinkOperatorFactory(org.apache.flink.streaming.api.operators.collect.CollectSinkOperatorFactory) MetadataColumn(org.apache.flink.table.catalog.Column.MetadataColumn) SupportsPartitioning(org.apache.flink.table.connector.sink.abilities.SupportsPartitioning) RexNode(org.apache.calcite.rex.RexNode) RowField(org.apache.flink.table.types.logical.RowType.RowField) RelHint(org.apache.calcite.rel.hint.RelHint) Map(java.util.Map) LogicalTypeCasts.supportsExplicitCast(org.apache.flink.table.types.logical.utils.LogicalTypeCasts.supportsExplicitCast) LogicalTypeCasts.supportsAvoidingCast(org.apache.flink.table.types.logical.utils.LogicalTypeCasts.supportsAvoidingCast) SinkModifyOperation(org.apache.flink.table.operations.SinkModifyOperation) DynamicTableSink(org.apache.flink.table.connector.sink.DynamicTableSink) Set(java.util.Set) OverwriteSpec(org.apache.flink.table.planner.plan.abilities.sink.OverwriteSpec) Collectors(java.util.stream.Collectors) ZoneId(java.time.ZoneId) SinkAbilitySpec(org.apache.flink.table.planner.plan.abilities.sink.SinkAbilitySpec) List(java.util.List) Stream(java.util.stream.Stream) LogicalType(org.apache.flink.table.types.logical.LogicalType) ValidationException(org.apache.flink.table.api.ValidationException) CollectModifyOperation(org.apache.flink.table.operations.CollectModifyOperation) TableResult(org.apache.flink.table.api.TableResult) TypeConversions(org.apache.flink.table.types.utils.TypeConversions) LogicalTypeCasts.supportsImplicitCast(org.apache.flink.table.types.logical.utils.LogicalTypeCasts.supportsImplicitCast) DataTypeFactory(org.apache.flink.table.catalog.DataTypeFactory) IntStream(java.util.stream.IntStream) ShortcutUtils.unwrapTypeFactory(org.apache.flink.table.planner.utils.ShortcutUtils.unwrapTypeFactory) WritingMetadataSpec(org.apache.flink.table.planner.plan.abilities.sink.WritingMetadataSpec) Column(org.apache.flink.table.catalog.Column) RowType(org.apache.flink.table.types.logical.RowType) RelOptUtil(org.apache.calcite.plan.RelOptUtil) Function(java.util.function.Function) FlinkRelBuilder(org.apache.flink.table.planner.calcite.FlinkRelBuilder) ArrayList(java.util.ArrayList) ReadableConfig(org.apache.flink.configuration.ReadableConfig) SupportsOverwrite(org.apache.flink.table.connector.sink.abilities.SupportsOverwrite) ExternalModifyOperation(org.apache.flink.table.operations.ExternalModifyOperation) ResolvedCatalogTable(org.apache.flink.table.catalog.ResolvedCatalogTable) ContextResolvedTable(org.apache.flink.table.catalog.ContextResolvedTable) LogicalSink(org.apache.flink.table.planner.plan.nodes.calcite.LogicalSink) DataTypeUtils(org.apache.flink.table.types.utils.DataTypeUtils) RelDataType(org.apache.calcite.rel.type.RelDataType) RexBuilder(org.apache.calcite.rex.RexBuilder) TableException(org.apache.flink.table.api.TableException) ShortcutUtils.unwrapContext(org.apache.flink.table.planner.utils.ShortcutUtils.unwrapContext) TypeTransformations(org.apache.flink.table.types.inference.TypeTransformations) RelNode(org.apache.calcite.rel.RelNode) DataStream(org.apache.flink.streaming.api.datastream.DataStream) ExternalCatalogTable(org.apache.flink.table.catalog.ExternalCatalogTable) Internal(org.apache.flink.annotation.Internal) ExecutionConfigOptions(org.apache.flink.table.api.config.ExecutionConfigOptions) Collections(java.util.Collections) LogicalType(org.apache.flink.table.types.logical.LogicalType) RelDataType(org.apache.calcite.rel.type.RelDataType) RelHint(org.apache.calcite.rel.hint.RelHint) MetadataColumn(org.apache.flink.table.catalog.Column.MetadataColumn) MetadataColumn(org.apache.flink.table.catalog.Column.MetadataColumn) Column(org.apache.flink.table.catalog.Column) RexBuilder(org.apache.calcite.rex.RexBuilder) DataType(org.apache.flink.table.types.DataType) RelDataType(org.apache.calcite.rel.type.RelDataType) RexNode(org.apache.calcite.rex.RexNode)

Aggregations

SupportsWritingMetadata (org.apache.flink.table.connector.sink.abilities.SupportsWritingMetadata)3 RelDataType (org.apache.calcite.rel.type.RelDataType)2 TableException (org.apache.flink.table.api.TableException)2 ValidationException (org.apache.flink.table.api.ValidationException)2 Column (org.apache.flink.table.catalog.Column)2 MetadataColumn (org.apache.flink.table.catalog.Column.MetadataColumn)2 DataType (org.apache.flink.table.types.DataType)2 ZoneId (java.time.ZoneId)1 ArrayList (java.util.ArrayList)1 Collections (java.util.Collections)1 List (java.util.List)1 Map (java.util.Map)1 Set (java.util.Set)1 Function (java.util.function.Function)1 Collectors (java.util.stream.Collectors)1 IntStream (java.util.stream.IntStream)1 Stream (java.util.stream.Stream)1 RelOptUtil (org.apache.calcite.plan.RelOptUtil)1 RelNode (org.apache.calcite.rel.RelNode)1 RelHint (org.apache.calcite.rel.hint.RelHint)1