use of org.apache.flink.table.api.TableSchema in project flink by apache.
the class SqlCreateTableConverter method createCatalogTable.
private CatalogTable createCatalogTable(SqlCreateTable sqlCreateTable) {
final TableSchema sourceTableSchema;
final List<String> sourcePartitionKeys;
final List<SqlTableLike.SqlTableLikeOption> likeOptions;
final Map<String, String> sourceProperties;
if (sqlCreateTable.getTableLike().isPresent()) {
SqlTableLike sqlTableLike = sqlCreateTable.getTableLike().get();
CatalogTable table = lookupLikeSourceTable(sqlTableLike);
sourceTableSchema = TableSchema.fromResolvedSchema(table.getUnresolvedSchema().resolve(catalogManager.getSchemaResolver()));
sourcePartitionKeys = table.getPartitionKeys();
likeOptions = sqlTableLike.getOptions();
sourceProperties = table.getOptions();
} else {
sourceTableSchema = TableSchema.builder().build();
sourcePartitionKeys = Collections.emptyList();
likeOptions = Collections.emptyList();
sourceProperties = Collections.emptyMap();
}
Map<SqlTableLike.FeatureOption, SqlTableLike.MergingStrategy> mergingStrategies = mergeTableLikeUtil.computeMergingStrategies(likeOptions);
Map<String, String> mergedOptions = mergeOptions(sqlCreateTable, sourceProperties, mergingStrategies);
Optional<SqlTableConstraint> primaryKey = sqlCreateTable.getFullConstraints().stream().filter(SqlTableConstraint::isPrimaryKey).findAny();
TableSchema mergedSchema = mergeTableLikeUtil.mergeTables(mergingStrategies, sourceTableSchema, sqlCreateTable.getColumnList().getList(), sqlCreateTable.getWatermark().map(Collections::singletonList).orElseGet(Collections::emptyList), primaryKey.orElse(null));
List<String> partitionKeys = mergePartitions(sourcePartitionKeys, sqlCreateTable.getPartitionKeyList(), mergingStrategies);
verifyPartitioningColumnsExist(mergedSchema, partitionKeys);
String tableComment = sqlCreateTable.getComment().map(comment -> comment.getNlsString().getValue()).orElse(null);
return new CatalogTableImpl(mergedSchema, partitionKeys, mergedOptions, tableComment);
}
use of org.apache.flink.table.api.TableSchema in project flink by apache.
the class DescriptorPropertiesTest method testLegacyTableSchema.
@Test
public void testLegacyTableSchema() {
DescriptorProperties properties = new DescriptorProperties();
Map<String, String> map = new HashMap<>();
map.put("schema.0.name", "f0");
map.put("schema.0.type", "VARCHAR");
map.put("schema.1.name", "f1");
map.put("schema.1.type", "BIGINT");
map.put("schema.2.name", "f2");
map.put("schema.2.type", "DECIMAL");
map.put("schema.3.name", "f3");
map.put("schema.3.type", "TIMESTAMP");
map.put("schema.4.name", "f4");
map.put("schema.4.type", "MAP<TINYINT, SMALLINT>");
map.put("schema.5.name", "f5");
map.put("schema.5.type", "ANY<java.lang.Class>");
map.put("schema.6.name", "f6");
map.put("schema.6.type", "PRIMITIVE_ARRAY<DOUBLE>");
map.put("schema.7.name", "f7");
map.put("schema.7.type", "OBJECT_ARRAY<TIME>");
map.put("schema.8.name", "f8");
map.put("schema.8.type", "ROW<q1 VARCHAR, q2 DATE>");
map.put("schema.9.name", "f9");
map.put("schema.9.type", "POJO<org.apache.flink.table.types.LogicalTypeParserTest$MyPojo>");
properties.putProperties(map);
TableSchema restored = properties.getTableSchema("schema");
TableSchema expected = TableSchema.builder().field("f0", Types.STRING).field("f1", Types.LONG).field("f2", Types.BIG_DEC).field("f3", Types.SQL_TIMESTAMP).field("f4", Types.MAP(Types.BYTE, Types.SHORT)).field("f5", Types.GENERIC(Class.class)).field("f6", Types.PRIMITIVE_ARRAY(Types.DOUBLE)).field("f7", Types.OBJECT_ARRAY(Types.SQL_TIME)).field("f8", Types.ROW_NAMED(new String[] { "q1", "q2" }, Types.STRING, Types.SQL_DATE)).field("f9", Types.POJO(LogicalTypeParserTest.MyPojo.class)).build();
assertEquals(expected, restored);
}
use of org.apache.flink.table.api.TableSchema in project flink by apache.
the class DescriptorPropertiesTest method testTableSchema.
@Test
public void testTableSchema() {
TableSchema schema = TableSchema.builder().add(TableColumn.physical("f0", DataTypes.BIGINT().notNull())).add(TableColumn.physical("f1", DataTypes.ROW(DataTypes.FIELD("q1", DataTypes.STRING()), DataTypes.FIELD("q2", DataTypes.TIMESTAMP(9))))).add(TableColumn.physical("f2", DataTypes.STRING().notNull())).add(TableColumn.computed("f3", DataTypes.BIGINT().notNull(), "f0 + 1")).add(TableColumn.physical("f4", DataTypes.DECIMAL(10, 3))).add(TableColumn.metadata("f5", DataTypes.DECIMAL(10, 3))).add(TableColumn.metadata("f6", DataTypes.INT(), "other.key")).add(TableColumn.metadata("f7", DataTypes.INT(), true)).add(TableColumn.metadata("f8", DataTypes.INT(), "other.key", true)).watermark("f1.q2", "`f1`.`q2` - INTERVAL '5' SECOND", DataTypes.TIMESTAMP(3)).primaryKey("constraint1", new String[] { "f0", "f2" }).build();
DescriptorProperties properties = new DescriptorProperties();
properties.putTableSchema("schema", schema);
Map<String, String> actual = properties.asMap();
Map<String, String> expected = new HashMap<>();
expected.put("schema.0.name", "f0");
expected.put("schema.0.data-type", "BIGINT NOT NULL");
expected.put("schema.1.name", "f1");
expected.put("schema.1.data-type", "ROW<`q1` VARCHAR(2147483647), `q2` TIMESTAMP(9)>");
expected.put("schema.2.name", "f2");
expected.put("schema.2.data-type", "VARCHAR(2147483647) NOT NULL");
expected.put("schema.3.name", "f3");
expected.put("schema.3.data-type", "BIGINT NOT NULL");
expected.put("schema.3.expr", "f0 + 1");
expected.put("schema.4.name", "f4");
expected.put("schema.4.data-type", "DECIMAL(10, 3)");
expected.put("schema.5.name", "f5");
expected.put("schema.5.data-type", "DECIMAL(10, 3)");
expected.put("schema.5.metadata", "f5");
expected.put("schema.5.virtual", "false");
expected.put("schema.6.name", "f6");
expected.put("schema.6.data-type", "INT");
expected.put("schema.6.metadata", "other.key");
expected.put("schema.6.virtual", "false");
expected.put("schema.7.name", "f7");
expected.put("schema.7.data-type", "INT");
expected.put("schema.7.metadata", "f7");
expected.put("schema.7.virtual", "true");
expected.put("schema.8.name", "f8");
expected.put("schema.8.data-type", "INT");
expected.put("schema.8.metadata", "other.key");
expected.put("schema.8.virtual", "true");
expected.put("schema.watermark.0.rowtime", "f1.q2");
expected.put("schema.watermark.0.strategy.expr", "`f1`.`q2` - INTERVAL '5' SECOND");
expected.put("schema.watermark.0.strategy.data-type", "TIMESTAMP(3)");
expected.put("schema.primary-key.name", "constraint1");
expected.put("schema.primary-key.columns", "f0,f2");
assertEquals(expected, actual);
TableSchema restored = properties.getTableSchema("schema");
assertEquals(schema, restored);
}
use of org.apache.flink.table.api.TableSchema in project flink by apache.
the class CatalogConstraintTest method testWithoutPrimaryKey.
@Test
public void testWithoutPrimaryKey() throws Exception {
TableSchema tableSchema = TableSchema.builder().fields(new String[] { "a", "b", "c" }, new DataType[] { DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.INT() }).build();
Map<String, String> properties = buildCatalogTableProperties(tableSchema);
catalog.createTable(new ObjectPath(databaseName, "T1"), new CatalogTableImpl(tableSchema, properties, ""), false);
RelNode t1 = TableTestUtil.toRelNode(tEnv.sqlQuery("select * from T1"));
FlinkRelMetadataQuery mq = FlinkRelMetadataQuery.reuseOrCreate(t1.getCluster().getMetadataQuery());
assertEquals(ImmutableSet.of(), mq.getUniqueKeys(t1));
}
use of org.apache.flink.table.api.TableSchema in project flink by apache.
the class CatalogConstraintTest method testWithPrimaryKey.
@Test
public void testWithPrimaryKey() throws Exception {
TableSchema tableSchema = TableSchema.builder().fields(new String[] { "a", "b", "c" }, new DataType[] { DataTypes.STRING(), DataTypes.BIGINT().notNull(), DataTypes.INT() }).primaryKey("b").build();
Map<String, String> properties = buildCatalogTableProperties(tableSchema);
catalog.createTable(new ObjectPath(databaseName, "T1"), new CatalogTableImpl(tableSchema, properties, ""), false);
RelNode t1 = TableTestUtil.toRelNode(tEnv.sqlQuery("select * from T1"));
FlinkRelMetadataQuery mq = FlinkRelMetadataQuery.reuseOrCreate(t1.getCluster().getMetadataQuery());
assertEquals(ImmutableSet.of(ImmutableBitSet.of(1)), mq.getUniqueKeys(t1));
}
Aggregations