use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.
the class DynamicSinkUtils method convertSinkToRel.
private static RelNode convertSinkToRel(FlinkRelBuilder relBuilder, RelNode input, Map<String, String> dynamicOptions, ContextResolvedTable contextResolvedTable, Map<String, String> staticPartitions, boolean isOverwrite, DynamicTableSink sink) {
final DataTypeFactory dataTypeFactory = unwrapContext(relBuilder).getCatalogManager().getDataTypeFactory();
final FlinkTypeFactory typeFactory = unwrapTypeFactory(relBuilder);
final ResolvedSchema schema = contextResolvedTable.getResolvedSchema();
final String tableDebugName = contextResolvedTable.getIdentifier().asSummaryString();
List<SinkAbilitySpec> sinkAbilitySpecs = new ArrayList<>();
// 1. prepare table sink
prepareDynamicSink(tableDebugName, staticPartitions, isOverwrite, sink, contextResolvedTable.getResolvedTable(), sinkAbilitySpecs);
sinkAbilitySpecs.forEach(spec -> spec.apply(sink));
// 2. validate the query schema to the sink's table schema and apply cast if possible
final RelNode query = validateSchemaAndApplyImplicitCast(input, schema, tableDebugName, dataTypeFactory, typeFactory);
relBuilder.push(query);
// 3. convert the sink's table schema to the consumed data type of the sink
final List<Integer> metadataColumns = extractPersistedMetadataColumns(schema);
if (!metadataColumns.isEmpty()) {
pushMetadataProjection(relBuilder, typeFactory, schema, sink);
}
List<RelHint> hints = new ArrayList<>();
if (!dynamicOptions.isEmpty()) {
hints.add(RelHint.builder("OPTIONS").hintOptions(dynamicOptions).build());
}
final RelNode finalQuery = relBuilder.build();
return LogicalSink.create(finalQuery, hints, contextResolvedTable, sink, staticPartitions, sinkAbilitySpecs.toArray(new SinkAbilitySpec[0]));
}
use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.
the class DynamicSinkUtils method pushMetadataProjection.
/**
* Creates a projection that reorders physical and metadata columns according to the consumed
* data type of the sink. It casts metadata columns into the expected data type.
*
* @see SupportsWritingMetadata
*/
private static void pushMetadataProjection(FlinkRelBuilder relBuilder, FlinkTypeFactory typeFactory, ResolvedSchema schema, DynamicTableSink sink) {
final RexBuilder rexBuilder = relBuilder.getRexBuilder();
final List<Column> columns = schema.getColumns();
final List<Integer> physicalColumns = extractPhysicalColumns(schema);
final Map<String, Integer> keyToMetadataColumn = extractPersistedMetadataColumns(schema).stream().collect(Collectors.toMap(pos -> {
final MetadataColumn metadataColumn = (MetadataColumn) columns.get(pos);
return metadataColumn.getMetadataKey().orElse(metadataColumn.getName());
}, Function.identity()));
final List<Integer> metadataColumns = createRequiredMetadataKeys(schema, sink).stream().map(keyToMetadataColumn::get).collect(Collectors.toList());
final List<String> fieldNames = Stream.concat(physicalColumns.stream().map(columns::get).map(Column::getName), metadataColumns.stream().map(columns::get).map(MetadataColumn.class::cast).map(c -> c.getMetadataKey().orElse(c.getName()))).collect(Collectors.toList());
final Map<String, DataType> metadataMap = extractMetadataMap(sink);
final List<RexNode> fieldNodes = Stream.concat(physicalColumns.stream().map(pos -> {
final int posAdjusted = adjustByVirtualColumns(columns, pos);
return relBuilder.field(posAdjusted);
}), metadataColumns.stream().map(pos -> {
final MetadataColumn metadataColumn = (MetadataColumn) columns.get(pos);
final String metadataKey = metadataColumn.getMetadataKey().orElse(metadataColumn.getName());
final LogicalType expectedType = metadataMap.get(metadataKey).getLogicalType();
final RelDataType expectedRelDataType = typeFactory.createFieldTypeFromLogicalType(expectedType);
final int posAdjusted = adjustByVirtualColumns(columns, pos);
return rexBuilder.makeAbstractCast(expectedRelDataType, relBuilder.field(posAdjusted));
})).collect(Collectors.toList());
relBuilder.projectNamed(fieldNodes, fieldNames, true);
}
use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.
the class DynamicSourceUtils method pushMetadataProjection.
/**
* Creates a projection that reorders physical and metadata columns according to the given
* schema. It casts metadata columns into the expected data type to be accessed by computed
* columns in the next step. Computed columns are ignored here.
*
* @see SupportsReadingMetadata
*/
private static void pushMetadataProjection(FlinkRelBuilder relBuilder, ResolvedSchema schema) {
final RexBuilder rexBuilder = relBuilder.getRexBuilder();
final List<String> fieldNames = schema.getColumns().stream().filter(c -> !(c instanceof ComputedColumn)).map(Column::getName).collect(Collectors.toList());
final List<RexNode> fieldNodes = schema.getColumns().stream().filter(c -> !(c instanceof ComputedColumn)).map(c -> {
final RelDataType relDataType = relBuilder.getTypeFactory().createFieldTypeFromLogicalType(c.getDataType().getLogicalType());
if (c instanceof MetadataColumn) {
final MetadataColumn metadataColumn = (MetadataColumn) c;
final String metadataKey = metadataColumn.getMetadataKey().orElse(metadataColumn.getName());
return rexBuilder.makeAbstractCast(relDataType, relBuilder.field(metadataKey));
} else {
return relBuilder.field(c.getName());
}
}).collect(Collectors.toList());
relBuilder.projectNamed(fieldNodes, fieldNames, true);
}
use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.
the class DynamicSourceUtils method convertSourceToRel.
/**
* Converts a given {@link DynamicTableSource} to a {@link RelNode}. It adds helper projections
* if necessary.
*/
public static RelNode convertSourceToRel(boolean isBatchMode, ReadableConfig config, FlinkRelBuilder relBuilder, ContextResolvedTable contextResolvedTable, FlinkStatistic statistic, List<RelHint> hints, DynamicTableSource tableSource) {
final String tableDebugName = contextResolvedTable.getIdentifier().asSummaryString();
final ResolvedCatalogTable resolvedCatalogTable = contextResolvedTable.getResolvedTable();
// 1. prepare table source
prepareDynamicSource(tableDebugName, resolvedCatalogTable, tableSource, isBatchMode, config);
// 2. push table scan
pushTableScan(isBatchMode, relBuilder, contextResolvedTable, statistic, hints, tableSource);
// 3. push project for non-physical columns
final ResolvedSchema schema = contextResolvedTable.getResolvedSchema();
if (!schema.getColumns().stream().allMatch(Column::isPhysical)) {
pushMetadataProjection(relBuilder, schema);
pushGeneratedProjection(relBuilder, schema);
}
// 4. push watermark assigner
if (!isBatchMode && !schema.getWatermarkSpecs().isEmpty()) {
pushWatermarkAssigner(relBuilder, schema);
}
return relBuilder.build();
}
Aggregations