use of org.apache.flink.table.connector.sink.abilities.SupportsWritingMetadata in project flink by apache.
the class WritingMetadataSpec method apply.
@Override
public void apply(DynamicTableSink tableSink) {
if (tableSink instanceof SupportsWritingMetadata) {
DataType consumedDataType = TypeConversions.fromLogicalToDataType(consumedType);
((SupportsWritingMetadata) tableSink).applyWritableMetadata(metadataKeys, consumedDataType);
} else {
throw new TableException(String.format("%s does not support SupportsWritingMetadata.", tableSink.getClass().getName()));
}
}
use of org.apache.flink.table.connector.sink.abilities.SupportsWritingMetadata in project flink by apache.
the class DynamicSinkUtils method validateAndApplyMetadata.
private static void validateAndApplyMetadata(String tableDebugName, DynamicTableSink sink, ResolvedSchema schema, List<SinkAbilitySpec> sinkAbilitySpecs) {
final List<Column> columns = schema.getColumns();
final List<Integer> metadataColumns = extractPersistedMetadataColumns(schema);
if (metadataColumns.isEmpty()) {
return;
}
if (!(sink instanceof SupportsWritingMetadata)) {
throw new ValidationException(String.format("Table '%s' declares persistable metadata columns, but the underlying %s " + "doesn't implement the %s interface. If the column should not " + "be persisted, it can be declared with the VIRTUAL keyword.", tableDebugName, DynamicTableSink.class.getSimpleName(), SupportsWritingMetadata.class.getSimpleName()));
}
final Map<String, DataType> metadataMap = ((SupportsWritingMetadata) sink).listWritableMetadata();
metadataColumns.forEach(pos -> {
final MetadataColumn metadataColumn = (MetadataColumn) columns.get(pos);
final String metadataKey = metadataColumn.getMetadataKey().orElse(metadataColumn.getName());
final LogicalType metadataType = metadataColumn.getDataType().getLogicalType();
final DataType expectedMetadataDataType = metadataMap.get(metadataKey);
// check that metadata key is valid
if (expectedMetadataDataType == null) {
throw new ValidationException(String.format("Invalid metadata key '%s' in column '%s' of table '%s'. " + "The %s class '%s' supports the following metadata keys for writing:\n%s", metadataKey, metadataColumn.getName(), tableDebugName, DynamicTableSink.class.getSimpleName(), sink.getClass().getName(), String.join("\n", metadataMap.keySet())));
}
// check that types are compatible
if (!supportsExplicitCast(metadataType, expectedMetadataDataType.getLogicalType())) {
if (metadataKey.equals(metadataColumn.getName())) {
throw new ValidationException(String.format("Invalid data type for metadata column '%s' of table '%s'. " + "The column cannot be declared as '%s' because the type must be " + "castable to metadata type '%s'.", metadataColumn.getName(), tableDebugName, metadataType, expectedMetadataDataType.getLogicalType()));
} else {
throw new ValidationException(String.format("Invalid data type for metadata column '%s' with metadata key '%s' of table '%s'. " + "The column cannot be declared as '%s' because the type must be " + "castable to metadata type '%s'.", metadataColumn.getName(), metadataKey, tableDebugName, metadataType, expectedMetadataDataType.getLogicalType()));
}
}
});
sinkAbilitySpecs.add(new WritingMetadataSpec(createRequiredMetadataKeys(schema, sink), createConsumedType(schema, sink)));
}
use of org.apache.flink.table.connector.sink.abilities.SupportsWritingMetadata in project flink by apache.
the class DynamicSinkUtils method pushMetadataProjection.
/**
* Creates a projection that reorders physical and metadata columns according to the consumed
* data type of the sink. It casts metadata columns into the expected data type.
*
* @see SupportsWritingMetadata
*/
private static void pushMetadataProjection(FlinkRelBuilder relBuilder, FlinkTypeFactory typeFactory, ResolvedSchema schema, DynamicTableSink sink) {
final RexBuilder rexBuilder = relBuilder.getRexBuilder();
final List<Column> columns = schema.getColumns();
final List<Integer> physicalColumns = extractPhysicalColumns(schema);
final Map<String, Integer> keyToMetadataColumn = extractPersistedMetadataColumns(schema).stream().collect(Collectors.toMap(pos -> {
final MetadataColumn metadataColumn = (MetadataColumn) columns.get(pos);
return metadataColumn.getMetadataKey().orElse(metadataColumn.getName());
}, Function.identity()));
final List<Integer> metadataColumns = createRequiredMetadataKeys(schema, sink).stream().map(keyToMetadataColumn::get).collect(Collectors.toList());
final List<String> fieldNames = Stream.concat(physicalColumns.stream().map(columns::get).map(Column::getName), metadataColumns.stream().map(columns::get).map(MetadataColumn.class::cast).map(c -> c.getMetadataKey().orElse(c.getName()))).collect(Collectors.toList());
final Map<String, DataType> metadataMap = extractMetadataMap(sink);
final List<RexNode> fieldNodes = Stream.concat(physicalColumns.stream().map(pos -> {
final int posAdjusted = adjustByVirtualColumns(columns, pos);
return relBuilder.field(posAdjusted);
}), metadataColumns.stream().map(pos -> {
final MetadataColumn metadataColumn = (MetadataColumn) columns.get(pos);
final String metadataKey = metadataColumn.getMetadataKey().orElse(metadataColumn.getName());
final LogicalType expectedType = metadataMap.get(metadataKey).getLogicalType();
final RelDataType expectedRelDataType = typeFactory.createFieldTypeFromLogicalType(expectedType);
final int posAdjusted = adjustByVirtualColumns(columns, pos);
return rexBuilder.makeAbstractCast(expectedRelDataType, relBuilder.field(posAdjusted));
})).collect(Collectors.toList());
relBuilder.projectNamed(fieldNodes, fieldNames, true);
}
Aggregations