Search in sources :

Example 26 with ResolvedSchema

use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.

the class DynamicSinkUtils method convertCollectToRel.

/**
 * Converts an {@link TableResult#collect()} sink to a {@link RelNode}.
 */
public static RelNode convertCollectToRel(FlinkRelBuilder relBuilder, RelNode input, CollectModifyOperation collectModifyOperation, ReadableConfig configuration, ClassLoader classLoader) {
    final DataTypeFactory dataTypeFactory = unwrapContext(relBuilder).getCatalogManager().getDataTypeFactory();
    final ResolvedSchema childSchema = collectModifyOperation.getChild().getResolvedSchema();
    final ResolvedSchema schema = ResolvedSchema.physical(childSchema.getColumnNames(), childSchema.getColumnDataTypes());
    final ResolvedCatalogTable catalogTable = new ResolvedCatalogTable(new ExternalCatalogTable(Schema.newBuilder().fromResolvedSchema(schema).build()), schema);
    final ContextResolvedTable contextResolvedTable = ContextResolvedTable.anonymous("collect", catalogTable);
    final DataType consumedDataType = fixCollectDataType(dataTypeFactory, schema);
    final String zone = configuration.get(TableConfigOptions.LOCAL_TIME_ZONE);
    final ZoneId zoneId = TableConfigOptions.LOCAL_TIME_ZONE.defaultValue().equals(zone) ? ZoneId.systemDefault() : ZoneId.of(zone);
    final CollectDynamicSink tableSink = new CollectDynamicSink(contextResolvedTable.getIdentifier(), consumedDataType, configuration.get(CollectSinkOperatorFactory.MAX_BATCH_SIZE), configuration.get(CollectSinkOperatorFactory.SOCKET_TIMEOUT), classLoader, zoneId, configuration.get(ExecutionConfigOptions.TABLE_EXEC_LEGACY_CAST_BEHAVIOUR).isEnabled());
    collectModifyOperation.setSelectResultProvider(tableSink.getSelectResultProvider());
    collectModifyOperation.setConsumedDataType(consumedDataType);
    return convertSinkToRel(relBuilder, input, // dynamicOptions
    Collections.emptyMap(), contextResolvedTable, // staticPartitions
    Collections.emptyMap(), false, tableSink);
}
Also used : ResolvedCatalogTable(org.apache.flink.table.catalog.ResolvedCatalogTable) ZoneId(java.time.ZoneId) ExternalCatalogTable(org.apache.flink.table.catalog.ExternalCatalogTable) DataType(org.apache.flink.table.types.DataType) RelDataType(org.apache.calcite.rel.type.RelDataType) DataTypeFactory(org.apache.flink.table.catalog.DataTypeFactory) ContextResolvedTable(org.apache.flink.table.catalog.ContextResolvedTable) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema)

Example 27 with ResolvedSchema

use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.

the class CatalogSchemaTable method getRowType.

@Override
public RelDataType getRowType(RelDataTypeFactory typeFactory) {
    final FlinkTypeFactory flinkTypeFactory = (FlinkTypeFactory) typeFactory;
    final ResolvedSchema schema = contextResolvedTable.getResolvedSchema();
    // The following block is a workaround to support tables defined by
    // TableEnvironment.connect() and
    // the actual table sources implement DefinedProctimeAttribute/DefinedRowtimeAttributes.
    // It should be removed after we remove DefinedProctimeAttribute/DefinedRowtimeAttributes.
    Optional<TableSource<?>> sourceOpt = findAndCreateTableSource();
    if (isStreamingMode && sourceOpt.isPresent() && schema.getColumns().stream().allMatch(Column::isPhysical) && schema.getWatermarkSpecs().isEmpty()) {
        TableSchema tableSchema = TableSchema.fromResolvedSchema(schema);
        TableSource<?> source = sourceOpt.get();
        if (TableSourceValidation.hasProctimeAttribute(source) || TableSourceValidation.hasRowtimeAttribute(source)) {
            // If the table is defined by TableEnvironment.connect(), and use the legacy
            // proctime and rowtime
            // descriptors, the TableSchema should fallback to
            // ConnectorCatalogTable#calculateSourceSchema
            tableSchema = ConnectorCatalogTable.calculateSourceSchema(source, false);
        }
        return TableSourceUtil.getSourceRowType(flinkTypeFactory, tableSchema, scala.Option.empty(), true);
    }
    final List<String> fieldNames = schema.getColumnNames();
    final List<LogicalType> fieldTypes = schema.getColumnDataTypes().stream().map(DataType::getLogicalType).map(PlannerTypeUtils::removeLegacyTypes).collect(Collectors.toList());
    return flinkTypeFactory.buildRelNodeRowType(fieldNames, fieldTypes);
}
Also used : TableSource(org.apache.flink.table.sources.TableSource) StreamTableSource(org.apache.flink.table.sources.StreamTableSource) FlinkTypeFactory(org.apache.flink.table.planner.calcite.FlinkTypeFactory) TableSchema(org.apache.flink.table.api.TableSchema) Column(org.apache.flink.table.catalog.Column) LogicalType(org.apache.flink.table.types.logical.LogicalType) DataType(org.apache.flink.table.types.DataType) RelDataType(org.apache.calcite.rel.type.RelDataType) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema)

Example 28 with ResolvedSchema

use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.

the class DynamicSourceUtils method pushGeneratedProjection.

/**
 * Creates a projection that adds computed columns and finalizes the table schema.
 */
private static void pushGeneratedProjection(FlinkRelBuilder relBuilder, ResolvedSchema schema) {
    final ExpressionConverter converter = new ExpressionConverter(relBuilder);
    final List<RexNode> projection = schema.getColumns().stream().map(c -> {
        if (c instanceof ComputedColumn) {
            final ComputedColumn computedColumn = (ComputedColumn) c;
            return computedColumn.getExpression().accept(converter);
        } else {
            return relBuilder.field(c.getName());
        }
    }).collect(Collectors.toList());
    relBuilder.projectNamed(projection, schema.getColumns().stream().map(Column::getName).collect(Collectors.toList()), true);
}
Also used : DataType(org.apache.flink.table.types.DataType) ScanRuntimeProvider(org.apache.flink.table.connector.source.ScanTableSource.ScanRuntimeProvider) ChangelogMode(org.apache.flink.table.connector.ChangelogMode) Column(org.apache.flink.table.catalog.Column) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema) ShortcutUtils(org.apache.flink.table.planner.utils.ShortcutUtils) RowType(org.apache.flink.table.types.logical.RowType) ScanTableSource(org.apache.flink.table.connector.source.ScanTableSource) FlinkRelBuilder(org.apache.flink.table.planner.calcite.FlinkRelBuilder) MetadataColumn(org.apache.flink.table.catalog.Column.MetadataColumn) ReadableConfig(org.apache.flink.configuration.ReadableConfig) RexNode(org.apache.calcite.rex.RexNode) RowField(org.apache.flink.table.types.logical.RowType.RowField) RelHint(org.apache.calcite.rel.hint.RelHint) Map(java.util.Map) LogicalTypeCasts.supportsExplicitCast(org.apache.flink.table.types.logical.utils.LogicalTypeCasts.supportsExplicitCast) ResolvedCatalogTable(org.apache.flink.table.catalog.ResolvedCatalogTable) ContextResolvedTable(org.apache.flink.table.catalog.ContextResolvedTable) RelDataType(org.apache.calcite.rel.type.RelDataType) DynamicTableSource(org.apache.flink.table.connector.source.DynamicTableSource) TableConfig(org.apache.flink.table.api.TableConfig) WatermarkSpec(org.apache.flink.table.catalog.WatermarkSpec) RexBuilder(org.apache.calcite.rex.RexBuilder) TableException(org.apache.flink.table.api.TableException) Set(java.util.Set) ExpressionConverter(org.apache.flink.table.planner.expressions.converter.ExpressionConverter) RelNode(org.apache.calcite.rel.RelNode) Collectors(java.util.stream.Collectors) SourceAbilitySpec(org.apache.flink.table.planner.plan.abilities.source.SourceAbilitySpec) TableSourceTable(org.apache.flink.table.planner.plan.schema.TableSourceTable) ComputedColumn(org.apache.flink.table.catalog.Column.ComputedColumn) DataStream(org.apache.flink.streaming.api.datastream.DataStream) List(java.util.List) Stream(java.util.stream.Stream) LogicalType(org.apache.flink.table.types.logical.LogicalType) FlinkStatistic(org.apache.flink.table.planner.plan.stats.FlinkStatistic) RowKind(org.apache.flink.types.RowKind) ValidationException(org.apache.flink.table.api.ValidationException) SupportsReadingMetadata(org.apache.flink.table.connector.source.abilities.SupportsReadingMetadata) Internal(org.apache.flink.annotation.Internal) TypeConversions(org.apache.flink.table.types.utils.TypeConversions) ExecutionConfigOptions(org.apache.flink.table.api.config.ExecutionConfigOptions) Collections(java.util.Collections) ScanRuntimeProviderContext(org.apache.flink.table.runtime.connector.source.ScanRuntimeProviderContext) LogicalTableScan(org.apache.calcite.rel.logical.LogicalTableScan) Column(org.apache.flink.table.catalog.Column) MetadataColumn(org.apache.flink.table.catalog.Column.MetadataColumn) ComputedColumn(org.apache.flink.table.catalog.Column.ComputedColumn) ExpressionConverter(org.apache.flink.table.planner.expressions.converter.ExpressionConverter) ComputedColumn(org.apache.flink.table.catalog.Column.ComputedColumn) RexNode(org.apache.calcite.rex.RexNode)

Example 29 with ResolvedSchema

use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.

the class DynamicSourceUtils method prepareDynamicSource.

/**
 * Prepares the given {@link DynamicTableSource}. It check whether the source is compatible with
 * the given schema and applies initial parameters.
 */
public static void prepareDynamicSource(String tableDebugName, ResolvedCatalogTable table, DynamicTableSource source, boolean isBatchMode, ReadableConfig config) {
    final ResolvedSchema schema = table.getResolvedSchema();
    validateAndApplyMetadata(tableDebugName, schema, source);
    if (source instanceof ScanTableSource) {
        validateScanSource(tableDebugName, schema, (ScanTableSource) source, isBatchMode, config);
    }
// lookup table source is validated in LookupJoin node
}
Also used : ScanTableSource(org.apache.flink.table.connector.source.ScanTableSource) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema)

Example 30 with ResolvedSchema

use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.

the class RawFormatFactoryTest method testInvalidSchema.

@Test
public void testInvalidSchema() {
    ResolvedSchema invalidSchema = ResolvedSchema.of(Column.physical("f0", DataTypes.STRING()), Column.physical("f1", DataTypes.BIGINT()));
    String expectedError = "The 'raw' format only supports single physical column. " + "However the defined schema contains multiple physical columns: [`f0` STRING, `f1` BIGINT]";
    try {
        createDeserializationSchema(invalidSchema, getBasicOptions());
        fail();
    } catch (Exception e) {
        assertThat(e, hasMessage(equalTo(expectedError)));
    }
    try {
        createSerializationSchema(invalidSchema, getBasicOptions());
        fail();
    } catch (Exception e) {
        assertThat(e, hasMessage(equalTo(expectedError)));
    }
}
Also used : ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema) Test(org.junit.Test)

Aggregations

ResolvedSchema (org.apache.flink.table.catalog.ResolvedSchema)84 Test (org.junit.Test)50 DynamicTableSink (org.apache.flink.table.connector.sink.DynamicTableSink)20 DataType (org.apache.flink.table.types.DataType)20 RowData (org.apache.flink.table.data.RowData)17 ValidationException (org.apache.flink.table.api.ValidationException)14 ResolvedCatalogTable (org.apache.flink.table.catalog.ResolvedCatalogTable)14 List (java.util.List)11 SinkRuntimeProviderContext (org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext)11 DynamicTableSource (org.apache.flink.table.connector.source.DynamicTableSource)10 Column (org.apache.flink.table.catalog.Column)9 LogicalType (org.apache.flink.table.types.logical.LogicalType)9 RowType (org.apache.flink.table.types.logical.RowType)9 HashMap (java.util.HashMap)8 Collectors (java.util.stream.Collectors)8 RelDataType (org.apache.calcite.rel.type.RelDataType)8 Internal (org.apache.flink.annotation.Internal)8 HBaseWriteOptions (org.apache.flink.connector.hbase.options.HBaseWriteOptions)6 FlinkTypeFactory (org.apache.flink.table.planner.calcite.FlinkTypeFactory)6 Row (org.apache.flink.types.Row)6