use of org.apache.flink.table.api.TableSchema in project flink by apache.
the class DescriptorProperties method getOptionalTableSchema.
/**
* Returns a table schema under the given key if it exists.
*/
public Optional<TableSchema> getOptionalTableSchema(String key) {
// filter for number of fields
final int fieldCount = properties.keySet().stream().filter((k) -> k.startsWith(key) && // "key." is the prefix.
SCHEMA_COLUMN_NAME_SUFFIX.matcher(k.substring(key.length() + 1)).matches()).mapToInt((k) -> 1).sum();
if (fieldCount == 0) {
return Optional.empty();
}
// validate fields and build schema
final TableSchema.Builder schemaBuilder = TableSchema.builder();
for (int i = 0; i < fieldCount; i++) {
final String nameKey = key + '.' + i + '.' + NAME;
final String legacyTypeKey = key + '.' + i + '.' + TYPE;
final String typeKey = key + '.' + i + '.' + DATA_TYPE;
final String exprKey = key + '.' + i + '.' + EXPR;
final String metadataKey = key + '.' + i + '.' + METADATA;
final String virtualKey = key + '.' + i + '.' + VIRTUAL;
final String name = optionalGet(nameKey).orElseThrow(exceptionSupplier(nameKey));
final DataType type;
if (containsKey(typeKey)) {
type = getDataType(typeKey);
} else if (containsKey(legacyTypeKey)) {
type = TypeConversions.fromLegacyInfoToDataType(getType(legacyTypeKey));
} else {
throw exceptionSupplier(typeKey).get();
}
final Optional<String> expr = optionalGet(exprKey);
final Optional<String> metadata = optionalGet(metadataKey);
final boolean virtual = getOptionalBoolean(virtualKey).orElse(false);
// computed column
if (expr.isPresent()) {
schemaBuilder.add(TableColumn.computed(name, type, expr.get()));
} else // metadata column
if (metadata.isPresent()) {
final String metadataAlias = metadata.get();
if (metadataAlias.equals(name)) {
schemaBuilder.add(TableColumn.metadata(name, type, virtual));
} else {
schemaBuilder.add(TableColumn.metadata(name, type, metadataAlias, virtual));
}
} else // physical column
{
schemaBuilder.add(TableColumn.physical(name, type));
}
}
// extract watermark information
// filter for number of fields
String watermarkPrefixKey = key + '.' + WATERMARK;
final int watermarkCount = properties.keySet().stream().filter((k) -> k.startsWith(watermarkPrefixKey) && k.endsWith('.' + WATERMARK_ROWTIME)).mapToInt((k) -> 1).sum();
if (watermarkCount > 0) {
for (int i = 0; i < watermarkCount; i++) {
final String rowtimeKey = watermarkPrefixKey + '.' + i + '.' + WATERMARK_ROWTIME;
final String exprKey = watermarkPrefixKey + '.' + i + '.' + WATERMARK_STRATEGY_EXPR;
final String typeKey = watermarkPrefixKey + '.' + i + '.' + WATERMARK_STRATEGY_DATA_TYPE;
final String rowtime = optionalGet(rowtimeKey).orElseThrow(exceptionSupplier(rowtimeKey));
final String exprString = optionalGet(exprKey).orElseThrow(exceptionSupplier(exprKey));
final String typeString = optionalGet(typeKey).orElseThrow(exceptionSupplier(typeKey));
final DataType exprType = TypeConversions.fromLogicalToDataType(LogicalTypeParser.parse(typeString));
schemaBuilder.watermark(rowtime, exprString, exprType);
}
}
// Extract unique constraints.
String pkConstraintNameKey = key + '.' + PRIMARY_KEY_NAME;
final Optional<String> pkConstraintNameOpt = optionalGet(pkConstraintNameKey);
if (pkConstraintNameOpt.isPresent()) {
final String pkColumnsKey = key + '.' + PRIMARY_KEY_COLUMNS;
final String columns = optionalGet(pkColumnsKey).orElseThrow(exceptionSupplier(pkColumnsKey));
schemaBuilder.primaryKey(pkConstraintNameOpt.get(), columns.split(","));
}
return Optional.of(schemaBuilder.build());
}
use of org.apache.flink.table.api.TableSchema in project flink by apache.
the class DescriptorProperties method putTableSchema.
/**
* Adds a table schema under the given key.
*/
public void putTableSchema(String key, TableSchema schema) {
checkNotNull(key);
checkNotNull(schema);
final String[] fieldNames = schema.getFieldNames();
final DataType[] fieldTypes = schema.getFieldDataTypes();
final String[] fieldExpressions = schema.getTableColumns().stream().map(column -> {
if (column instanceof ComputedColumn) {
return ((ComputedColumn) column).getExpression();
}
return null;
}).toArray(String[]::new);
final String[] fieldMetadata = schema.getTableColumns().stream().map(column -> {
if (column instanceof MetadataColumn) {
return ((MetadataColumn) column).getMetadataAlias().orElse(column.getName());
}
return null;
}).toArray(String[]::new);
final String[] fieldVirtual = schema.getTableColumns().stream().map(column -> {
if (column instanceof MetadataColumn) {
return Boolean.toString(((MetadataColumn) column).isVirtual());
}
return null;
}).toArray(String[]::new);
final List<List<String>> values = new ArrayList<>();
for (int i = 0; i < schema.getFieldCount(); i++) {
values.add(Arrays.asList(fieldNames[i], fieldTypes[i].getLogicalType().asSerializableString(), fieldExpressions[i], fieldMetadata[i], fieldVirtual[i]));
}
putIndexedOptionalProperties(key, Arrays.asList(NAME, DATA_TYPE, EXPR, METADATA, VIRTUAL), values);
if (!schema.getWatermarkSpecs().isEmpty()) {
final List<List<String>> watermarkValues = new ArrayList<>();
for (WatermarkSpec spec : schema.getWatermarkSpecs()) {
watermarkValues.add(Arrays.asList(spec.getRowtimeAttribute(), spec.getWatermarkExpr(), spec.getWatermarkExprOutputType().getLogicalType().asSerializableString()));
}
putIndexedFixedProperties(key + '.' + WATERMARK, Arrays.asList(WATERMARK_ROWTIME, WATERMARK_STRATEGY_EXPR, WATERMARK_STRATEGY_DATA_TYPE), watermarkValues);
}
schema.getPrimaryKey().ifPresent(pk -> {
putString(key + '.' + PRIMARY_KEY_NAME, pk.getName());
putString(key + '.' + PRIMARY_KEY_COLUMNS, String.join(",", pk.getColumns()));
});
}
use of org.apache.flink.table.api.TableSchema in project flink by apache.
the class TableSchemaUtilsTest method testBuilderWithGivenSchema.
@Test
public void testBuilderWithGivenSchema() {
TableSchema oriSchema = TableSchema.builder().field("a", DataTypes.INT().notNull()).field("b", DataTypes.STRING()).field("c", DataTypes.INT(), "a + 1").field("t", DataTypes.TIMESTAMP(3)).primaryKey("ct1", new String[] { "a" }).watermark("t", "t", DataTypes.TIMESTAMP(3)).build();
TableSchema newSchema = TableSchemaUtils.builderWithGivenSchema(oriSchema).build();
assertEquals(oriSchema, newSchema);
}
use of org.apache.flink.table.api.TableSchema in project flink by apache.
the class TypeMappingUtilsTest method testCheckPhysicalLogicalTypeCompatible.
@Test
public void testCheckPhysicalLogicalTypeCompatible() {
TableSchema tableSchema = TableSchema.builder().field("a", DataTypes.VARCHAR(2)).field("b", DataTypes.DECIMAL(20, 2)).build();
TableSink tableSink = new TestTableSink(tableSchema);
LegacyTypeInformationType legacyDataType = (LegacyTypeInformationType) tableSink.getConsumedDataType().getLogicalType();
TypeInformation legacyTypeInfo = ((TupleTypeInfo) legacyDataType.getTypeInformation()).getTypeAt(1);
DataType physicalType = TypeConversions.fromLegacyInfoToDataType(legacyTypeInfo);
ResolvedSchema physicSchema = DataTypeUtils.expandCompositeTypeToSchema(physicalType);
DataType[] logicalDataTypes = tableSchema.getFieldDataTypes();
List<DataType> physicalDataTypes = physicSchema.getColumnDataTypes();
for (int i = 0; i < logicalDataTypes.length; i++) {
TypeMappingUtils.checkPhysicalLogicalTypeCompatible(physicalDataTypes.get(i).getLogicalType(), logicalDataTypes[i].getLogicalType(), "physicalField", "logicalField", false);
}
}
use of org.apache.flink.table.api.TableSchema in project flink by apache.
the class TableFormatFactoryBaseTest method testSchemaDerivationWithRowtime.
@Test
public void testSchemaDerivationWithRowtime() {
final Map<String, String> properties = new HashMap<>();
properties.put("schema.0.name", "otherField");
properties.put("schema.0.type", "VARCHAR");
properties.put("schema.0.from", "csvField");
properties.put("schema.1.name", "abcField");
properties.put("schema.1.type", "VARCHAR");
properties.put("schema.2.name", "p");
properties.put("schema.2.type", "TIMESTAMP");
properties.put("schema.2.proctime", "true");
properties.put("schema.3.name", "r");
properties.put("schema.3.type", "TIMESTAMP");
// from-field strategy
properties.put("schema.3.rowtime.timestamps.type", "from-field");
properties.put("schema.3.rowtime.timestamps.from", "myTime");
properties.put("schema.3.rowtime.watermarks.type", "from-source");
final TableSchema actualSchema = TableFormatFactoryBase.deriveSchema(properties);
final TableSchema expectedSchema = TableSchema.builder().field("csvField", // aliased
Types.STRING).field("abcField", Types.STRING).field("myTime", Types.SQL_TIMESTAMP).build();
assertEquals(expectedSchema, actualSchema);
}
Aggregations