Search in sources :

Example 1 with WritingMetadataSpec

use of org.apache.flink.table.planner.plan.abilities.sink.WritingMetadataSpec in project flink by apache.

the class DynamicSinkUtils method validateAndApplyMetadata.

private static void validateAndApplyMetadata(String tableDebugName, DynamicTableSink sink, ResolvedSchema schema, List<SinkAbilitySpec> sinkAbilitySpecs) {
    final List<Column> columns = schema.getColumns();
    final List<Integer> metadataColumns = extractPersistedMetadataColumns(schema);
    if (metadataColumns.isEmpty()) {
        return;
    }
    if (!(sink instanceof SupportsWritingMetadata)) {
        throw new ValidationException(String.format("Table '%s' declares persistable metadata columns, but the underlying %s " + "doesn't implement the %s interface. If the column should not " + "be persisted, it can be declared with the VIRTUAL keyword.", tableDebugName, DynamicTableSink.class.getSimpleName(), SupportsWritingMetadata.class.getSimpleName()));
    }
    final Map<String, DataType> metadataMap = ((SupportsWritingMetadata) sink).listWritableMetadata();
    metadataColumns.forEach(pos -> {
        final MetadataColumn metadataColumn = (MetadataColumn) columns.get(pos);
        final String metadataKey = metadataColumn.getMetadataKey().orElse(metadataColumn.getName());
        final LogicalType metadataType = metadataColumn.getDataType().getLogicalType();
        final DataType expectedMetadataDataType = metadataMap.get(metadataKey);
        // check that metadata key is valid
        if (expectedMetadataDataType == null) {
            throw new ValidationException(String.format("Invalid metadata key '%s' in column '%s' of table '%s'. " + "The %s class '%s' supports the following metadata keys for writing:\n%s", metadataKey, metadataColumn.getName(), tableDebugName, DynamicTableSink.class.getSimpleName(), sink.getClass().getName(), String.join("\n", metadataMap.keySet())));
        }
        // check that types are compatible
        if (!supportsExplicitCast(metadataType, expectedMetadataDataType.getLogicalType())) {
            if (metadataKey.equals(metadataColumn.getName())) {
                throw new ValidationException(String.format("Invalid data type for metadata column '%s' of table '%s'. " + "The column cannot be declared as '%s' because the type must be " + "castable to metadata type '%s'.", metadataColumn.getName(), tableDebugName, metadataType, expectedMetadataDataType.getLogicalType()));
            } else {
                throw new ValidationException(String.format("Invalid data type for metadata column '%s' with metadata key '%s' of table '%s'. " + "The column cannot be declared as '%s' because the type must be " + "castable to metadata type '%s'.", metadataColumn.getName(), metadataKey, tableDebugName, metadataType, expectedMetadataDataType.getLogicalType()));
            }
        }
    });
    sinkAbilitySpecs.add(new WritingMetadataSpec(createRequiredMetadataKeys(schema, sink), createConsumedType(schema, sink)));
}
Also used : WritingMetadataSpec(org.apache.flink.table.planner.plan.abilities.sink.WritingMetadataSpec) MetadataColumn(org.apache.flink.table.catalog.Column.MetadataColumn) ValidationException(org.apache.flink.table.api.ValidationException) MetadataColumn(org.apache.flink.table.catalog.Column.MetadataColumn) Column(org.apache.flink.table.catalog.Column) SupportsWritingMetadata(org.apache.flink.table.connector.sink.abilities.SupportsWritingMetadata) DataType(org.apache.flink.table.types.DataType) RelDataType(org.apache.calcite.rel.type.RelDataType) LogicalType(org.apache.flink.table.types.logical.LogicalType)

Example 2 with WritingMetadataSpec

use of org.apache.flink.table.planner.plan.abilities.sink.WritingMetadataSpec in project flink by apache.

the class DynamicTableSinkSpecSerdeTest method testDynamicTableSinkSpecSerde.

static Stream<DynamicTableSinkSpec> testDynamicTableSinkSpecSerde() {
    Map<String, String> options1 = new HashMap<>();
    options1.put("connector", FileSystemTableFactory.IDENTIFIER);
    options1.put("format", TestCsvFormatFactory.IDENTIFIER);
    options1.put("path", "/tmp");
    final ResolvedSchema resolvedSchema1 = new ResolvedSchema(Collections.singletonList(Column.physical("a", DataTypes.BIGINT())), Collections.emptyList(), null);
    final CatalogTable catalogTable1 = CatalogTable.of(Schema.newBuilder().fromResolvedSchema(resolvedSchema1).build(), null, Collections.emptyList(), options1);
    DynamicTableSinkSpec spec1 = new DynamicTableSinkSpec(ContextResolvedTable.temporary(ObjectIdentifier.of(DEFAULT_BUILTIN_CATALOG, DEFAULT_BUILTIN_DATABASE, "MyTable"), new ResolvedCatalogTable(catalogTable1, resolvedSchema1)), null);
    Map<String, String> options2 = new HashMap<>();
    options2.put("connector", FileSystemTableFactory.IDENTIFIER);
    options2.put("format", TestCsvFormatFactory.IDENTIFIER);
    options2.put("path", "/tmp");
    final ResolvedSchema resolvedSchema2 = new ResolvedSchema(Arrays.asList(Column.physical("a", DataTypes.BIGINT()), Column.physical("b", DataTypes.INT()), Column.physical("p", DataTypes.STRING())), Collections.emptyList(), null);
    final CatalogTable catalogTable2 = CatalogTable.of(Schema.newBuilder().fromResolvedSchema(resolvedSchema2).build(), null, Collections.emptyList(), options2);
    DynamicTableSinkSpec spec2 = new DynamicTableSinkSpec(ContextResolvedTable.temporary(ObjectIdentifier.of(DEFAULT_BUILTIN_CATALOG, DEFAULT_BUILTIN_DATABASE, "MyTable"), new ResolvedCatalogTable(catalogTable2, resolvedSchema2)), Arrays.asList(new OverwriteSpec(true), new PartitioningSpec(new HashMap<String, String>() {

        {
            put("p", "A");
        }
    })));
    Map<String, String> options3 = new HashMap<>();
    options3.put("connector", TestValuesTableFactory.IDENTIFIER);
    options3.put("writable-metadata", "m:STRING");
    final ResolvedSchema resolvedSchema3 = new ResolvedSchema(Arrays.asList(Column.physical("a", DataTypes.BIGINT()), Column.physical("b", DataTypes.INT()), Column.metadata("m", DataTypes.STRING(), null, false)), Collections.emptyList(), null);
    final CatalogTable catalogTable3 = CatalogTable.of(Schema.newBuilder().fromResolvedSchema(resolvedSchema3).build(), null, Collections.emptyList(), options3);
    DynamicTableSinkSpec spec3 = new DynamicTableSinkSpec(ContextResolvedTable.temporary(ObjectIdentifier.of(DEFAULT_BUILTIN_CATALOG, DEFAULT_BUILTIN_DATABASE, "MyTable"), new ResolvedCatalogTable(catalogTable3, resolvedSchema3)), Collections.singletonList(new WritingMetadataSpec(Collections.singletonList("m"), RowType.of(new BigIntType(), new IntType()))));
    return Stream.of(spec1, spec2, spec3);
}
Also used : WritingMetadataSpec(org.apache.flink.table.planner.plan.abilities.sink.WritingMetadataSpec) HashMap(java.util.HashMap) BigIntType(org.apache.flink.table.types.logical.BigIntType) OverwriteSpec(org.apache.flink.table.planner.plan.abilities.sink.OverwriteSpec) CatalogTable(org.apache.flink.table.catalog.CatalogTable) ResolvedCatalogTable(org.apache.flink.table.catalog.ResolvedCatalogTable) DynamicTableSinkSpec(org.apache.flink.table.planner.plan.nodes.exec.spec.DynamicTableSinkSpec) PartitioningSpec(org.apache.flink.table.planner.plan.abilities.sink.PartitioningSpec) IntType(org.apache.flink.table.types.logical.IntType) BigIntType(org.apache.flink.table.types.logical.BigIntType) ResolvedCatalogTable(org.apache.flink.table.catalog.ResolvedCatalogTable) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema)

Aggregations

WritingMetadataSpec (org.apache.flink.table.planner.plan.abilities.sink.WritingMetadataSpec)2 HashMap (java.util.HashMap)1 RelDataType (org.apache.calcite.rel.type.RelDataType)1 ValidationException (org.apache.flink.table.api.ValidationException)1 CatalogTable (org.apache.flink.table.catalog.CatalogTable)1 Column (org.apache.flink.table.catalog.Column)1 MetadataColumn (org.apache.flink.table.catalog.Column.MetadataColumn)1 ResolvedCatalogTable (org.apache.flink.table.catalog.ResolvedCatalogTable)1 ResolvedSchema (org.apache.flink.table.catalog.ResolvedSchema)1 SupportsWritingMetadata (org.apache.flink.table.connector.sink.abilities.SupportsWritingMetadata)1 OverwriteSpec (org.apache.flink.table.planner.plan.abilities.sink.OverwriteSpec)1 PartitioningSpec (org.apache.flink.table.planner.plan.abilities.sink.PartitioningSpec)1 DynamicTableSinkSpec (org.apache.flink.table.planner.plan.nodes.exec.spec.DynamicTableSinkSpec)1 DataType (org.apache.flink.table.types.DataType)1 BigIntType (org.apache.flink.table.types.logical.BigIntType)1 IntType (org.apache.flink.table.types.logical.IntType)1 LogicalType (org.apache.flink.table.types.logical.LogicalType)1