use of org.apache.flink.table.types.logical.RowType in project flink by apache.
the class DynamicSinkUtils method createConsumedType.
/**
* Returns the {@link DataType} that a sink should consume as the output from the runtime.
*
* <p>The format looks as follows: {@code PHYSICAL COLUMNS + PERSISTED METADATA COLUMNS}
*/
private static RowType createConsumedType(ResolvedSchema schema, DynamicTableSink sink) {
final Map<String, DataType> metadataMap = extractMetadataMap(sink);
final Stream<RowField> physicalFields = schema.getColumns().stream().filter(Column::isPhysical).map(c -> new RowField(c.getName(), c.getDataType().getLogicalType()));
final Stream<RowField> metadataFields = createRequiredMetadataKeys(schema, sink).stream().map(k -> new RowField(k, metadataMap.get(k).getLogicalType()));
final List<RowField> rowFields = Stream.concat(physicalFields, metadataFields).collect(Collectors.toList());
return new RowType(false, rowFields);
}
use of org.apache.flink.table.types.logical.RowType in project flink by apache.
the class DynamicSinkUtils method validateSchemaAndApplyImplicitCast.
/**
* Checks if the given query can be written into the given sink's table schema.
*
* <p>It checks whether field types are compatible (types should be equal including precisions).
* If types are not compatible, but can be implicitly cast, a cast projection will be applied.
* Otherwise, an exception will be thrown.
*/
public static RelNode validateSchemaAndApplyImplicitCast(RelNode query, ResolvedSchema sinkSchema, String tableDebugName, DataTypeFactory dataTypeFactory, FlinkTypeFactory typeFactory) {
final RowType queryType = FlinkTypeFactory.toLogicalRowType(query.getRowType());
final List<RowField> queryFields = queryType.getFields();
final RowType sinkType = (RowType) fixSinkDataType(dataTypeFactory, sinkSchema.toSinkRowDataType()).getLogicalType();
final List<RowField> sinkFields = sinkType.getFields();
if (queryFields.size() != sinkFields.size()) {
throw createSchemaMismatchException("Different number of columns.", tableDebugName, queryFields, sinkFields);
}
boolean requiresCasting = false;
for (int i = 0; i < sinkFields.size(); i++) {
final LogicalType queryColumnType = queryFields.get(i).getType();
final LogicalType sinkColumnType = sinkFields.get(i).getType();
if (!supportsImplicitCast(queryColumnType, sinkColumnType)) {
throw createSchemaMismatchException(String.format("Incompatible types for sink column '%s' at position %s.", sinkFields.get(i).getName(), i), tableDebugName, queryFields, sinkFields);
}
if (!supportsAvoidingCast(queryColumnType, sinkColumnType)) {
requiresCasting = true;
}
}
if (requiresCasting) {
final RelDataType castRelDataType = typeFactory.buildRelNodeRowType(sinkType);
return RelOptUtil.createCastRel(query, castRelDataType, true);
}
return query;
}
use of org.apache.flink.table.types.logical.RowType in project flink by apache.
the class LogicalTypeMerging method findCommonRowType.
@Nullable
private static LogicalType findCommonRowType(List<LogicalType> normalizedTypes) {
final List<LogicalType> children = findCommonChildrenTypes(normalizedTypes);
if (children == null) {
return null;
}
final RowType firstType = (RowType) normalizedTypes.get(0);
final List<RowType.RowField> newFields = IntStream.range(0, children.size()).mapToObj(pos -> {
final LogicalType newType = children.get(pos);
final RowType.RowField originalField = firstType.getFields().get(pos);
if (originalField.getDescription().isPresent()) {
return new RowType.RowField(originalField.getName(), newType, originalField.getDescription().get());
} else {
return new RowType.RowField(originalField.getName(), newType);
}
}).collect(Collectors.toList());
return new RowType(newFields);
}
use of org.apache.flink.table.types.logical.RowType in project flink by apache.
the class LogicalTypeUtils method renameRowFields.
/**
* Renames the fields of the given {@link RowType}.
*/
public static RowType renameRowFields(RowType rowType, List<String> newFieldNames) {
Preconditions.checkArgument(rowType.getFieldCount() == newFieldNames.size(), "Row length and new names must match.");
final List<RowField> newFields = IntStream.range(0, rowType.getFieldCount()).mapToObj(pos -> {
final RowField oldField = rowType.getFields().get(pos);
return new RowField(newFieldNames.get(pos), oldField.getType(), oldField.getDescription().orElse(null));
}).collect(Collectors.toList());
return new RowType(rowType.isNullable(), newFields);
}
use of org.apache.flink.table.types.logical.RowType in project flink by apache.
the class RandomGeneratorVisitor method visit.
@Override
public DataGeneratorContainer visit(RowType rowType) {
List<DataGeneratorContainer> fieldContainers = rowType.getFields().stream().map(field -> {
String fieldName = name + "." + field.getName();
return field.getType().accept(new RandomGeneratorVisitor(fieldName, config));
}).collect(Collectors.toList());
ConfigOption<?>[] options = fieldContainers.stream().flatMap(container -> container.getOptions().stream()).toArray(ConfigOption[]::new);
DataGenerator[] generators = fieldContainers.stream().map(DataGeneratorContainer::getGenerator).toArray(DataGenerator[]::new);
return DataGeneratorContainer.of(new RowDataGenerator(generators, rowType.getFieldNames()), options);
}
Aggregations