use of org.apache.flink.table.catalog.CatalogTable in project flink by apache.
the class SqlToOperationConverterTest method before.
@Before
public void before() throws TableAlreadyExistException, DatabaseNotExistException {
catalogManager.initSchemaResolver(isStreamingMode, ExpressionResolverMocks.basicResolver(catalogManager, functionCatalog, parser));
final ObjectPath path1 = new ObjectPath(catalogManager.getCurrentDatabase(), "t1");
final ObjectPath path2 = new ObjectPath(catalogManager.getCurrentDatabase(), "t2");
final TableSchema tableSchema = TableSchema.builder().field("a", DataTypes.BIGINT()).field("b", DataTypes.VARCHAR(Integer.MAX_VALUE)).field("c", DataTypes.INT()).field("d", DataTypes.VARCHAR(Integer.MAX_VALUE)).build();
Map<String, String> options = new HashMap<>();
options.put("connector", "COLLECTION");
final CatalogTable catalogTable = new CatalogTableImpl(tableSchema, options, "");
catalog.createTable(path1, catalogTable, true);
catalog.createTable(path2, catalogTable, true);
}
use of org.apache.flink.table.catalog.CatalogTable in project flink by apache.
the class SqlToOperationConverterTest method testCreateTableLikeWithFullPath.
@Test
public void testCreateTableLikeWithFullPath() {
Map<String, String> sourceProperties = new HashMap<>();
sourceProperties.put("connector.type", "kafka");
sourceProperties.put("format.type", "json");
CatalogTable catalogTable = CatalogTable.of(Schema.newBuilder().column("f0", DataTypes.INT().notNull()).column("f1", DataTypes.TIMESTAMP(3)).build(), null, Collections.emptyList(), sourceProperties);
catalogManager.createTable(catalogTable, ObjectIdentifier.of("builtin", "default", "sourceTable"), false);
final String sql = "create table mytable like `builtin`.`default`.sourceTable";
Operation operation = parseAndConvert(sql);
assertThat(operation).is(new HamcrestCondition<>(isCreateTableOperation(withSchema(Schema.newBuilder().column("f0", DataTypes.INT().notNull()).column("f1", DataTypes.TIMESTAMP(3)).build()), withOptions(entry("connector.type", "kafka"), entry("format.type", "json")))));
}
use of org.apache.flink.table.catalog.CatalogTable in project flink by apache.
the class SqlToOperationConverterTest method testCreateTableWithPrimaryKey.
@Test
public void testCreateTableWithPrimaryKey() {
final String sql = "CREATE TABLE tbl1 (\n" + " a bigint,\n" + " b varchar, \n" + " c int, \n" + " d varchar, \n" + " constraint ct1 primary key(a, b) not enforced\n" + ") with (\n" + " 'connector' = 'kafka', \n" + " 'kafka.topic' = 'log.test'\n" + ")\n";
FlinkPlannerImpl planner = getPlannerBySqlDialect(SqlDialect.DEFAULT);
final CalciteParser parser = getParserBySqlDialect(SqlDialect.DEFAULT);
Operation operation = parse(sql, planner, parser);
assertThat(operation).isInstanceOf(CreateTableOperation.class);
CreateTableOperation op = (CreateTableOperation) operation;
CatalogTable catalogTable = op.getCatalogTable();
TableSchema tableSchema = catalogTable.getSchema();
assertThat(tableSchema.getPrimaryKey().map(UniqueConstraint::asSummaryString).orElse("fakeVal")).isEqualTo("CONSTRAINT ct1 PRIMARY KEY (a, b)");
assertThat(tableSchema.getFieldNames()).isEqualTo(new String[] { "a", "b", "c", "d" });
assertThat(tableSchema.getFieldDataTypes()).isEqualTo(new DataType[] { DataTypes.BIGINT().notNull(), DataTypes.STRING().notNull(), DataTypes.INT(), DataTypes.STRING() });
}
use of org.apache.flink.table.catalog.CatalogTable in project flink by apache.
the class DynamicTableSinkSpecSerdeTest method testDynamicTableSinkSpecSerde.
static Stream<DynamicTableSinkSpec> testDynamicTableSinkSpecSerde() {
Map<String, String> options1 = new HashMap<>();
options1.put("connector", FileSystemTableFactory.IDENTIFIER);
options1.put("format", TestCsvFormatFactory.IDENTIFIER);
options1.put("path", "/tmp");
final ResolvedSchema resolvedSchema1 = new ResolvedSchema(Collections.singletonList(Column.physical("a", DataTypes.BIGINT())), Collections.emptyList(), null);
final CatalogTable catalogTable1 = CatalogTable.of(Schema.newBuilder().fromResolvedSchema(resolvedSchema1).build(), null, Collections.emptyList(), options1);
DynamicTableSinkSpec spec1 = new DynamicTableSinkSpec(ContextResolvedTable.temporary(ObjectIdentifier.of(DEFAULT_BUILTIN_CATALOG, DEFAULT_BUILTIN_DATABASE, "MyTable"), new ResolvedCatalogTable(catalogTable1, resolvedSchema1)), null);
Map<String, String> options2 = new HashMap<>();
options2.put("connector", FileSystemTableFactory.IDENTIFIER);
options2.put("format", TestCsvFormatFactory.IDENTIFIER);
options2.put("path", "/tmp");
final ResolvedSchema resolvedSchema2 = new ResolvedSchema(Arrays.asList(Column.physical("a", DataTypes.BIGINT()), Column.physical("b", DataTypes.INT()), Column.physical("p", DataTypes.STRING())), Collections.emptyList(), null);
final CatalogTable catalogTable2 = CatalogTable.of(Schema.newBuilder().fromResolvedSchema(resolvedSchema2).build(), null, Collections.emptyList(), options2);
DynamicTableSinkSpec spec2 = new DynamicTableSinkSpec(ContextResolvedTable.temporary(ObjectIdentifier.of(DEFAULT_BUILTIN_CATALOG, DEFAULT_BUILTIN_DATABASE, "MyTable"), new ResolvedCatalogTable(catalogTable2, resolvedSchema2)), Arrays.asList(new OverwriteSpec(true), new PartitioningSpec(new HashMap<String, String>() {
{
put("p", "A");
}
})));
Map<String, String> options3 = new HashMap<>();
options3.put("connector", TestValuesTableFactory.IDENTIFIER);
options3.put("writable-metadata", "m:STRING");
final ResolvedSchema resolvedSchema3 = new ResolvedSchema(Arrays.asList(Column.physical("a", DataTypes.BIGINT()), Column.physical("b", DataTypes.INT()), Column.metadata("m", DataTypes.STRING(), null, false)), Collections.emptyList(), null);
final CatalogTable catalogTable3 = CatalogTable.of(Schema.newBuilder().fromResolvedSchema(resolvedSchema3).build(), null, Collections.emptyList(), options3);
DynamicTableSinkSpec spec3 = new DynamicTableSinkSpec(ContextResolvedTable.temporary(ObjectIdentifier.of(DEFAULT_BUILTIN_CATALOG, DEFAULT_BUILTIN_DATABASE, "MyTable"), new ResolvedCatalogTable(catalogTable3, resolvedSchema3)), Collections.singletonList(new WritingMetadataSpec(Collections.singletonList("m"), RowType.of(new BigIntType(), new IntType()))));
return Stream.of(spec1, spec2, spec3);
}
use of org.apache.flink.table.catalog.CatalogTable in project flink by apache.
the class SqlToOperationConverterTest method testCreateTableWithComputedColumn.
@Test
public void testCreateTableWithComputedColumn() {
final String sql = "CREATE TABLE tbl1 (\n" + " a int,\n" + " b varchar, \n" + " c as a - 1, \n" + " d as b || '$$', \n" + " e as my_udf1(a)," + " f as `default`.my_udf2(a) + 1," + " g as builtin.`default`.my_udf3(a) || '##'\n" + ")\n" + " with (\n" + " 'connector' = 'kafka', \n" + " 'kafka.topic' = 'log.test'\n" + ")\n";
functionCatalog.registerTempCatalogScalarFunction(ObjectIdentifier.of("builtin", "default", "my_udf1"), Func0$.MODULE$);
functionCatalog.registerTempCatalogScalarFunction(ObjectIdentifier.of("builtin", "default", "my_udf2"), Func1$.MODULE$);
functionCatalog.registerTempCatalogScalarFunction(ObjectIdentifier.of("builtin", "default", "my_udf3"), Func8$.MODULE$);
FlinkPlannerImpl planner = getPlannerBySqlDialect(SqlDialect.DEFAULT);
Operation operation = parse(sql, planner, getParserBySqlDialect(SqlDialect.DEFAULT));
assertThat(operation).isInstanceOf(CreateTableOperation.class);
CreateTableOperation op = (CreateTableOperation) operation;
CatalogTable catalogTable = op.getCatalogTable();
assertThat(catalogTable.getSchema().getFieldNames()).isEqualTo(new String[] { "a", "b", "c", "d", "e", "f", "g" });
assertThat(catalogTable.getSchema().getFieldDataTypes()).isEqualTo(new DataType[] { DataTypes.INT(), DataTypes.STRING(), DataTypes.INT(), DataTypes.STRING(), DataTypes.INT().notNull(), DataTypes.INT(), DataTypes.STRING() });
String[] columnExpressions = catalogTable.getSchema().getTableColumns().stream().filter(ComputedColumn.class::isInstance).map(ComputedColumn.class::cast).map(ComputedColumn::getExpression).toArray(String[]::new);
String[] expected = new String[] { "`a` - 1", "`b` || '$$'", "`builtin`.`default`.`my_udf1`(`a`)", "`builtin`.`default`.`my_udf2`(`a`) + 1", "`builtin`.`default`.`my_udf3`(`a`) || '##'" };
assertThat(columnExpressions).isEqualTo(expected);
}
Aggregations