use of org.apache.flink.table.api.Schema.UnresolvedColumn in project flink by apache.
the class SchemaTranslator method createProducingResult.
/**
* Converts the given {@link DataType} into the final {@link ProducingResult}.
*
* <p>This method serves three types of use cases:
*
* <ul>
* <li>1. Derive physical columns from the input schema.
* <li>2. Derive physical columns from the input schema but enrich with metadata column and
* primary key.
* <li>3. Entirely use declared schema.
* </ul>
*/
public static ProducingResult createProducingResult(ResolvedSchema inputSchema, @Nullable Schema declaredSchema) {
// the schema will be entirely derived from the input
if (declaredSchema == null) {
// go through data type to erase time attributes
final DataType physicalDataType = inputSchema.toSourceRowDataType();
final Schema schema = Schema.newBuilder().fromRowDataType(physicalDataType).build();
return new ProducingResult(null, schema, null);
}
final List<UnresolvedColumn> declaredColumns = declaredSchema.getColumns();
// thus, it only replaces physical columns with metadata rowtime or adds a primary key
if (declaredColumns.stream().noneMatch(SchemaTranslator::isPhysical)) {
// go through data type to erase time attributes
final DataType sourceDataType = inputSchema.toSourceRowDataType();
final DataType physicalDataType = patchDataTypeWithoutMetadataRowtime(sourceDataType, declaredColumns);
final Schema.Builder builder = Schema.newBuilder();
builder.fromRowDataType(physicalDataType);
builder.fromSchema(declaredSchema);
return new ProducingResult(null, builder.build(), null);
}
return new ProducingResult(null, declaredSchema, null);
}
use of org.apache.flink.table.api.Schema.UnresolvedColumn in project flink by apache.
the class SchemaTranslator method createConsumingResult.
/**
* Converts the given {@link DataType} and an optional declared {@link Schema} (possibly
* incomplete) into the final {@link ConsumingResult}.
*
* <p>This method serves three types of use cases:
*
* <ul>
* <li>1. Derive physical columns from the input data type.
* <li>2. Derive physical columns but merge them with declared computed columns and other
* schema information.
* <li>3. Derive and enrich physical columns and merge other schema information (only if
* {@param mergePhysicalSchema} is set to {@code true}).
* </ul>
*/
public static ConsumingResult createConsumingResult(DataTypeFactory dataTypeFactory, DataType inputDataType, @Nullable Schema declaredSchema, boolean mergePhysicalSchema) {
final LogicalType inputType = inputDataType.getLogicalType();
// we don't allow modifying the number of columns during enrichment, therefore we preserve
// whether the original type was qualified as a top-level record or not
final boolean isTopLevelRecord = LogicalTypeChecks.isCompositeType(inputType);
// the schema will be entirely derived from the input
if (declaredSchema == null) {
final Schema.Builder builder = Schema.newBuilder();
addPhysicalSourceDataTypeFields(builder, inputDataType, null);
return new ConsumingResult(inputDataType, isTopLevelRecord, builder.build(), null);
}
final List<UnresolvedColumn> declaredColumns = declaredSchema.getColumns();
final UnresolvedPrimaryKey declaredPrimaryKey = declaredSchema.getPrimaryKey().orElse(null);
// thus, it only enriches the non-physical column parts
if (declaredColumns.stream().noneMatch(SchemaTranslator::isPhysical)) {
final Schema.Builder builder = Schema.newBuilder();
addPhysicalSourceDataTypeFields(builder, inputDataType, declaredPrimaryKey);
builder.fromSchema(declaredSchema);
return new ConsumingResult(inputDataType, isTopLevelRecord, builder.build(), null);
}
if (!mergePhysicalSchema) {
return new ConsumingResult(inputDataType, isTopLevelRecord, declaredSchema, null);
}
// the declared schema enriches the physical data type and the derived schema,
// it possibly projects the result
final DataType patchedDataType = patchDataTypeFromDeclaredSchema(dataTypeFactory, inputDataType, declaredColumns);
final Schema patchedSchema = createPatchedSchema(isTopLevelRecord, patchedDataType, declaredSchema);
final List<String> projections = extractProjections(patchedSchema, declaredSchema);
return new ConsumingResult(patchedDataType, isTopLevelRecord, patchedSchema, projections);
}
Aggregations