use of org.apache.flink.table.api.TableSchema in project flink by apache.
the class TableFormatFactoryBaseTest method testSchemaDerivation.
@Test
public void testSchemaDerivation() {
final Map<String, String> properties = new HashMap<>();
properties.put("schema.0.name", "otherField");
properties.put("schema.0.type", "VARCHAR");
properties.put("schema.0.from", "csvField");
properties.put("schema.1.name", "abcField");
properties.put("schema.1.type", "VARCHAR");
properties.put("schema.2.name", "p");
properties.put("schema.2.type", "TIMESTAMP");
properties.put("schema.2.proctime", "true");
properties.put("schema.3.name", "r");
properties.put("schema.3.type", "TIMESTAMP");
properties.put("schema.3.rowtime.timestamps.type", "from-source");
properties.put("schema.3.rowtime.watermarks.type", "from-source");
final TableSchema actualSchema = TableFormatFactoryBase.deriveSchema(properties);
final TableSchema expectedSchema = TableSchema.builder().field("csvField", // aliased
Types.STRING).field("abcField", Types.STRING).build();
assertEquals(expectedSchema, actualSchema);
}
use of org.apache.flink.table.api.TableSchema in project flink by apache.
the class CatalogTableImpl method fromProperties.
/**
* Construct a {@link CatalogTableImpl} from complete properties that contains table schema.
*/
public static CatalogTableImpl fromProperties(Map<String, String> properties) {
DescriptorProperties descriptorProperties = new DescriptorProperties(false);
descriptorProperties.putProperties(properties);
TableSchema tableSchema = descriptorProperties.getTableSchema(SCHEMA);
List<String> partitionKeys = descriptorProperties.getPartitionKeys();
return new CatalogTableImpl(tableSchema, partitionKeys, removeRedundant(properties, tableSchema, partitionKeys), "");
}
use of org.apache.flink.table.api.TableSchema in project flink by apache.
the class TableFormatFactoryBase method deriveSchema.
// --------------------------------------------------------------------------------------------
/**
* Finds the table schema that can be used for a format schema (without time attributes and
* generated columns).
*/
public static TableSchema deriveSchema(Map<String, String> properties) {
final DescriptorProperties descriptorProperties = new DescriptorProperties();
descriptorProperties.putProperties(properties);
final TableSchema.Builder builder = TableSchema.builder();
final TableSchema tableSchema = descriptorProperties.getTableSchema(SCHEMA);
for (int i = 0; i < tableSchema.getFieldCount(); i++) {
final TableColumn tableColumn = tableSchema.getTableColumns().get(i);
final String fieldName = tableColumn.getName();
final DataType dataType = tableColumn.getType();
if (!tableColumn.isPhysical()) {
// skip non-physical columns
continue;
}
final boolean isProctime = descriptorProperties.getOptionalBoolean(SCHEMA + '.' + i + '.' + SCHEMA_PROCTIME).orElse(false);
final String timestampKey = SCHEMA + '.' + i + '.' + ROWTIME_TIMESTAMPS_TYPE;
final boolean isRowtime = descriptorProperties.containsKey(timestampKey);
if (!isProctime && !isRowtime) {
// check for aliasing
final String aliasName = descriptorProperties.getOptionalString(SCHEMA + '.' + i + '.' + SCHEMA_FROM).orElse(fieldName);
builder.field(aliasName, dataType);
} else // only use the rowtime attribute if it references a field
if (isRowtime && descriptorProperties.isValue(timestampKey, ROWTIME_TIMESTAMPS_TYPE_VALUE_FROM_FIELD)) {
final String aliasName = descriptorProperties.getString(SCHEMA + '.' + i + '.' + ROWTIME_TIMESTAMPS_FROM);
builder.field(aliasName, dataType);
}
}
return builder.build();
}
use of org.apache.flink.table.api.TableSchema in project flink by apache.
the class CatalogSchemaTable method getRowType.
@Override
public RelDataType getRowType(RelDataTypeFactory typeFactory) {
final FlinkTypeFactory flinkTypeFactory = (FlinkTypeFactory) typeFactory;
final ResolvedSchema schema = contextResolvedTable.getResolvedSchema();
// The following block is a workaround to support tables defined by
// TableEnvironment.connect() and
// the actual table sources implement DefinedProctimeAttribute/DefinedRowtimeAttributes.
// It should be removed after we remove DefinedProctimeAttribute/DefinedRowtimeAttributes.
Optional<TableSource<?>> sourceOpt = findAndCreateTableSource();
if (isStreamingMode && sourceOpt.isPresent() && schema.getColumns().stream().allMatch(Column::isPhysical) && schema.getWatermarkSpecs().isEmpty()) {
TableSchema tableSchema = TableSchema.fromResolvedSchema(schema);
TableSource<?> source = sourceOpt.get();
if (TableSourceValidation.hasProctimeAttribute(source) || TableSourceValidation.hasRowtimeAttribute(source)) {
// If the table is defined by TableEnvironment.connect(), and use the legacy
// proctime and rowtime
// descriptors, the TableSchema should fallback to
// ConnectorCatalogTable#calculateSourceSchema
tableSchema = ConnectorCatalogTable.calculateSourceSchema(source, false);
}
return TableSourceUtil.getSourceRowType(flinkTypeFactory, tableSchema, scala.Option.empty(), true);
}
final List<String> fieldNames = schema.getColumnNames();
final List<LogicalType> fieldTypes = schema.getColumnDataTypes().stream().map(DataType::getLogicalType).map(PlannerTypeUtils::removeLegacyTypes).collect(Collectors.toList());
return flinkTypeFactory.buildRelNodeRowType(fieldNames, fieldTypes);
}
use of org.apache.flink.table.api.TableSchema in project flink by apache.
the class SqlToOperationConverterTest method testCreateTableWithMetadataColumn.
@Test
public void testCreateTableWithMetadataColumn() {
final String sql = "CREATE TABLE tbl1 (\n" + " a INT,\n" + " b STRING,\n" + " c INT METADATA,\n" + " d INT METADATA FROM 'other.key',\n" + " e INT METADATA VIRTUAL\n" + ")\n" + " WITH (\n" + " 'connector' = 'kafka',\n" + " 'kafka.topic' = 'log.test'\n" + ")\n";
final FlinkPlannerImpl planner = getPlannerBySqlDialect(SqlDialect.DEFAULT);
final Operation operation = parse(sql, planner, getParserBySqlDialect(SqlDialect.DEFAULT));
assertThat(operation).isInstanceOf(CreateTableOperation.class);
final CreateTableOperation op = (CreateTableOperation) operation;
final TableSchema actualSchema = op.getCatalogTable().getSchema();
final TableSchema expectedSchema = TableSchema.builder().add(TableColumn.physical("a", DataTypes.INT())).add(TableColumn.physical("b", DataTypes.STRING())).add(TableColumn.metadata("c", DataTypes.INT())).add(TableColumn.metadata("d", DataTypes.INT(), "other.key")).add(TableColumn.metadata("e", DataTypes.INT(), true)).build();
assertThat(actualSchema).isEqualTo(expectedSchema);
}
Aggregations