use of org.apache.flink.table.catalog.CatalogTableImpl in project flink by apache.
the class SqlCreateTableConverter method createCatalogTable.
private CatalogTable createCatalogTable(SqlCreateTable sqlCreateTable) {
final TableSchema sourceTableSchema;
final List<String> sourcePartitionKeys;
final List<SqlTableLike.SqlTableLikeOption> likeOptions;
final Map<String, String> sourceProperties;
if (sqlCreateTable.getTableLike().isPresent()) {
SqlTableLike sqlTableLike = sqlCreateTable.getTableLike().get();
CatalogTable table = lookupLikeSourceTable(sqlTableLike);
sourceTableSchema = TableSchema.fromResolvedSchema(table.getUnresolvedSchema().resolve(catalogManager.getSchemaResolver()));
sourcePartitionKeys = table.getPartitionKeys();
likeOptions = sqlTableLike.getOptions();
sourceProperties = table.getOptions();
} else {
sourceTableSchema = TableSchema.builder().build();
sourcePartitionKeys = Collections.emptyList();
likeOptions = Collections.emptyList();
sourceProperties = Collections.emptyMap();
}
Map<SqlTableLike.FeatureOption, SqlTableLike.MergingStrategy> mergingStrategies = mergeTableLikeUtil.computeMergingStrategies(likeOptions);
Map<String, String> mergedOptions = mergeOptions(sqlCreateTable, sourceProperties, mergingStrategies);
Optional<SqlTableConstraint> primaryKey = sqlCreateTable.getFullConstraints().stream().filter(SqlTableConstraint::isPrimaryKey).findAny();
TableSchema mergedSchema = mergeTableLikeUtil.mergeTables(mergingStrategies, sourceTableSchema, sqlCreateTable.getColumnList().getList(), sqlCreateTable.getWatermark().map(Collections::singletonList).orElseGet(Collections::emptyList), primaryKey.orElse(null));
List<String> partitionKeys = mergePartitions(sourcePartitionKeys, sqlCreateTable.getPartitionKeyList(), mergingStrategies);
verifyPartitioningColumnsExist(mergedSchema, partitionKeys);
String tableComment = sqlCreateTable.getComment().map(comment -> comment.getNlsString().getValue()).orElse(null);
return new CatalogTableImpl(mergedSchema, partitionKeys, mergedOptions, tableComment);
}
use of org.apache.flink.table.catalog.CatalogTableImpl in project flink by apache.
the class CatalogConstraintTest method testWithoutPrimaryKey.
@Test
public void testWithoutPrimaryKey() throws Exception {
TableSchema tableSchema = TableSchema.builder().fields(new String[] { "a", "b", "c" }, new DataType[] { DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.INT() }).build();
Map<String, String> properties = buildCatalogTableProperties(tableSchema);
catalog.createTable(new ObjectPath(databaseName, "T1"), new CatalogTableImpl(tableSchema, properties, ""), false);
RelNode t1 = TableTestUtil.toRelNode(tEnv.sqlQuery("select * from T1"));
FlinkRelMetadataQuery mq = FlinkRelMetadataQuery.reuseOrCreate(t1.getCluster().getMetadataQuery());
assertEquals(ImmutableSet.of(), mq.getUniqueKeys(t1));
}
use of org.apache.flink.table.catalog.CatalogTableImpl in project flink by apache.
the class CatalogConstraintTest method testWithPrimaryKey.
@Test
public void testWithPrimaryKey() throws Exception {
TableSchema tableSchema = TableSchema.builder().fields(new String[] { "a", "b", "c" }, new DataType[] { DataTypes.STRING(), DataTypes.BIGINT().notNull(), DataTypes.INT() }).primaryKey("b").build();
Map<String, String> properties = buildCatalogTableProperties(tableSchema);
catalog.createTable(new ObjectPath(databaseName, "T1"), new CatalogTableImpl(tableSchema, properties, ""), false);
RelNode t1 = TableTestUtil.toRelNode(tEnv.sqlQuery("select * from T1"));
FlinkRelMetadataQuery mq = FlinkRelMetadataQuery.reuseOrCreate(t1.getCluster().getMetadataQuery());
assertEquals(ImmutableSet.of(ImmutableBitSet.of(1)), mq.getUniqueKeys(t1));
}
use of org.apache.flink.table.catalog.CatalogTableImpl in project flink by apache.
the class CatalogStatisticsTest method testGetStatsFromCatalogForCatalogTableImpl.
@Test
public void testGetStatsFromCatalogForCatalogTableImpl() throws Exception {
Map<String, String> properties = new HashMap<>();
properties.put("connector.type", "filesystem");
properties.put("connector.property-version", "1");
properties.put("connector.path", "/path/to/csv");
properties.put("format.type", "csv");
properties.put("format.property-version", "1");
properties.put("format.field-delimiter", ";");
catalog.createTable(new ObjectPath(databaseName, "T1"), new CatalogTableImpl(tableSchema, properties, ""), false);
catalog.createTable(new ObjectPath(databaseName, "T2"), new CatalogTableImpl(tableSchema, properties, ""), false);
alterTableStatistics(catalog, "T1");
assertStatistics(tEnv, "T1");
alterTableStatisticsWithUnknownRowCount(catalog, "T2");
assertTableStatisticsWithUnknownRowCount(tEnv, "T2");
}
Aggregations