Search in sources :

Example 36 with CatalogTable

use of org.apache.flink.table.catalog.CatalogTable in project flink by apache.

the class SqlToOperationConverterTest method testCreateTableWithMinusInOptionKey.

@Test
public void testCreateTableWithMinusInOptionKey() {
    final String sql = "create table source_table(\n" + "  a int,\n" + "  b bigint,\n" + "  c varchar\n" + ") with (\n" + "  'a-B-c-d124' = 'Ab',\n" + "  'a.b-c-d.e-f.g' = 'ada',\n" + "  'a.b-c-d.e-f1231.g' = 'ada',\n" + "  'a.b-c-d.*' = 'adad')\n";
    final FlinkPlannerImpl planner = getPlannerBySqlDialect(SqlDialect.DEFAULT);
    final CalciteParser parser = getParserBySqlDialect(SqlDialect.DEFAULT);
    SqlNode node = parser.parse(sql);
    assertThat(node).isInstanceOf(SqlCreateTable.class);
    Operation operation = SqlToOperationConverter.convert(planner, catalogManager, node).get();
    assertThat(operation).isInstanceOf(CreateTableOperation.class);
    CreateTableOperation op = (CreateTableOperation) operation;
    CatalogTable catalogTable = op.getCatalogTable();
    Map<String, String> options = catalogTable.getOptions().entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
    Map<String, String> sortedProperties = new TreeMap<>(options);
    final String expected = "{a-B-c-d124=Ab, " + "a.b-c-d.*=adad, " + "a.b-c-d.e-f.g=ada, " + "a.b-c-d.e-f1231.g=ada}";
    assertThat(sortedProperties.toString()).isEqualTo(expected);
}
Also used : FlinkPlannerImpl(org.apache.flink.table.planner.calcite.FlinkPlannerImpl) OperationMatchers.isCreateTableOperation(org.apache.flink.table.planner.utils.OperationMatchers.isCreateTableOperation) DropDatabaseOperation(org.apache.flink.table.operations.ddl.DropDatabaseOperation) SinkModifyOperation(org.apache.flink.table.operations.SinkModifyOperation) AlterTableOptionsOperation(org.apache.flink.table.operations.ddl.AlterTableOptionsOperation) AlterTableDropConstraintOperation(org.apache.flink.table.operations.ddl.AlterTableDropConstraintOperation) UseCatalogOperation(org.apache.flink.table.operations.UseCatalogOperation) UseDatabaseOperation(org.apache.flink.table.operations.UseDatabaseOperation) CreateViewOperation(org.apache.flink.table.operations.ddl.CreateViewOperation) ShowJarsOperation(org.apache.flink.table.operations.command.ShowJarsOperation) AlterDatabaseOperation(org.apache.flink.table.operations.ddl.AlterDatabaseOperation) QueryOperation(org.apache.flink.table.operations.QueryOperation) EndStatementSetOperation(org.apache.flink.table.operations.EndStatementSetOperation) UseModulesOperation(org.apache.flink.table.operations.UseModulesOperation) ShowFunctionsOperation(org.apache.flink.table.operations.ShowFunctionsOperation) CreateDatabaseOperation(org.apache.flink.table.operations.ddl.CreateDatabaseOperation) SetOperation(org.apache.flink.table.operations.command.SetOperation) LoadModuleOperation(org.apache.flink.table.operations.LoadModuleOperation) Operation(org.apache.flink.table.operations.Operation) ShowModulesOperation(org.apache.flink.table.operations.ShowModulesOperation) SourceQueryOperation(org.apache.flink.table.operations.SourceQueryOperation) UnloadModuleOperation(org.apache.flink.table.operations.UnloadModuleOperation) CreateTableOperation(org.apache.flink.table.operations.ddl.CreateTableOperation) RemoveJarOperation(org.apache.flink.table.operations.command.RemoveJarOperation) BeginStatementSetOperation(org.apache.flink.table.operations.BeginStatementSetOperation) AddJarOperation(org.apache.flink.table.operations.command.AddJarOperation) AlterTableAddConstraintOperation(org.apache.flink.table.operations.ddl.AlterTableAddConstraintOperation) ExplainOperation(org.apache.flink.table.operations.ExplainOperation) ResetOperation(org.apache.flink.table.operations.command.ResetOperation) StatementSetOperation(org.apache.flink.table.operations.StatementSetOperation) AlterTableRenameOperation(org.apache.flink.table.operations.ddl.AlterTableRenameOperation) OperationMatchers.isCreateTableOperation(org.apache.flink.table.planner.utils.OperationMatchers.isCreateTableOperation) CreateTableOperation(org.apache.flink.table.operations.ddl.CreateTableOperation) CatalogTable(org.apache.flink.table.catalog.CatalogTable) TreeMap(java.util.TreeMap) Map(java.util.Map) TreeMap(java.util.TreeMap) HashMap(java.util.HashMap) CalciteParser(org.apache.flink.table.planner.parse.CalciteParser) SqlNode(org.apache.calcite.sql.SqlNode) Test(org.junit.Test)

Example 37 with CatalogTable

use of org.apache.flink.table.catalog.CatalogTable in project flink by apache.

the class SqlToOperationConverterTest method testCreateTableLikeInvalidWatermark.

@Test
public void testCreateTableLikeInvalidWatermark() {
    CatalogTable catalogTable = CatalogTable.of(Schema.newBuilder().column("f0", DataTypes.INT().notNull()).build(), null, Collections.emptyList(), Collections.emptyMap());
    catalogManager.createTable(catalogTable, ObjectIdentifier.of("builtin", "default", "sourceTable"), false);
    final String sql = "create table derivedTable(\n" + "  a int,\n" + "  watermark for f1 as `f1` - interval '5' second\n" + ")\n" + "like sourceTable";
    assertThatThrownBy(() -> parseAndConvert(sql)).isInstanceOf(ValidationException.class).hasMessageContaining("The rowtime attribute field 'f1' is not defined in the table schema," + " at line 3, column 17\n" + "Available fields: ['f0', 'a']");
}
Also used : ValidationException(org.apache.flink.table.api.ValidationException) CatalogTable(org.apache.flink.table.catalog.CatalogTable) Test(org.junit.Test)

Example 38 with CatalogTable

use of org.apache.flink.table.catalog.CatalogTable in project flink by apache.

the class SqlToOperationConverterTest method testBasicCreateTableLike.

@Test
public void testBasicCreateTableLike() {
    Map<String, String> sourceProperties = new HashMap<>();
    sourceProperties.put("format.type", "json");
    CatalogTable catalogTable = CatalogTable.of(Schema.newBuilder().column("f0", DataTypes.INT().notNull()).column("f1", DataTypes.TIMESTAMP(3)).build(), null, Collections.emptyList(), sourceProperties);
    catalogManager.createTable(catalogTable, ObjectIdentifier.of("builtin", "default", "sourceTable"), false);
    final String sql = "create table derivedTable(\n" + "  a int,\n" + "  watermark for f1 as `f1` - interval '5' second\n" + ")\n" + "PARTITIONED BY (a, f0)\n" + "with (\n" + "  'connector.type' = 'kafka'" + ")\n" + "like sourceTable";
    Operation operation = parseAndConvert(sql);
    assertThat(operation).is(new HamcrestCondition<>(isCreateTableOperation(withSchema(Schema.newBuilder().column("f0", DataTypes.INT().notNull()).column("f1", DataTypes.TIMESTAMP(3)).column("a", DataTypes.INT()).watermark("f1", "`f1` - INTERVAL '5' SECOND").build()), withOptions(entry("connector.type", "kafka"), entry("format.type", "json")), partitionedBy("a", "f0"))));
}
Also used : HashMap(java.util.HashMap) CatalogTable(org.apache.flink.table.catalog.CatalogTable) OperationMatchers.isCreateTableOperation(org.apache.flink.table.planner.utils.OperationMatchers.isCreateTableOperation) DropDatabaseOperation(org.apache.flink.table.operations.ddl.DropDatabaseOperation) SinkModifyOperation(org.apache.flink.table.operations.SinkModifyOperation) AlterTableOptionsOperation(org.apache.flink.table.operations.ddl.AlterTableOptionsOperation) AlterTableDropConstraintOperation(org.apache.flink.table.operations.ddl.AlterTableDropConstraintOperation) UseCatalogOperation(org.apache.flink.table.operations.UseCatalogOperation) UseDatabaseOperation(org.apache.flink.table.operations.UseDatabaseOperation) CreateViewOperation(org.apache.flink.table.operations.ddl.CreateViewOperation) ShowJarsOperation(org.apache.flink.table.operations.command.ShowJarsOperation) AlterDatabaseOperation(org.apache.flink.table.operations.ddl.AlterDatabaseOperation) QueryOperation(org.apache.flink.table.operations.QueryOperation) EndStatementSetOperation(org.apache.flink.table.operations.EndStatementSetOperation) UseModulesOperation(org.apache.flink.table.operations.UseModulesOperation) ShowFunctionsOperation(org.apache.flink.table.operations.ShowFunctionsOperation) CreateDatabaseOperation(org.apache.flink.table.operations.ddl.CreateDatabaseOperation) SetOperation(org.apache.flink.table.operations.command.SetOperation) LoadModuleOperation(org.apache.flink.table.operations.LoadModuleOperation) Operation(org.apache.flink.table.operations.Operation) ShowModulesOperation(org.apache.flink.table.operations.ShowModulesOperation) SourceQueryOperation(org.apache.flink.table.operations.SourceQueryOperation) UnloadModuleOperation(org.apache.flink.table.operations.UnloadModuleOperation) CreateTableOperation(org.apache.flink.table.operations.ddl.CreateTableOperation) RemoveJarOperation(org.apache.flink.table.operations.command.RemoveJarOperation) BeginStatementSetOperation(org.apache.flink.table.operations.BeginStatementSetOperation) AddJarOperation(org.apache.flink.table.operations.command.AddJarOperation) AlterTableAddConstraintOperation(org.apache.flink.table.operations.ddl.AlterTableAddConstraintOperation) ExplainOperation(org.apache.flink.table.operations.ExplainOperation) ResetOperation(org.apache.flink.table.operations.command.ResetOperation) StatementSetOperation(org.apache.flink.table.operations.StatementSetOperation) AlterTableRenameOperation(org.apache.flink.table.operations.ddl.AlterTableRenameOperation) Test(org.junit.Test)

Example 39 with CatalogTable

use of org.apache.flink.table.catalog.CatalogTable in project flink by apache.

the class DynamicTableSourceSpecSerdeTest method testDynamicTableSinkSpecSerde.

public static Stream<DynamicTableSourceSpec> testDynamicTableSinkSpecSerde() {
    Map<String, String> options1 = new HashMap<>();
    options1.put("connector", FileSystemTableFactory.IDENTIFIER);
    options1.put("format", TestCsvFormatFactory.IDENTIFIER);
    options1.put("path", "/tmp");
    final ResolvedSchema resolvedSchema1 = new ResolvedSchema(Collections.singletonList(Column.physical("a", DataTypes.BIGINT())), Collections.emptyList(), null);
    final CatalogTable catalogTable1 = CatalogTable.of(Schema.newBuilder().fromResolvedSchema(resolvedSchema1).build(), null, Collections.emptyList(), options1);
    DynamicTableSourceSpec spec1 = new DynamicTableSourceSpec(ContextResolvedTable.temporary(ObjectIdentifier.of(DEFAULT_BUILTIN_CATALOG, DEFAULT_BUILTIN_DATABASE, "MyTable"), new ResolvedCatalogTable(catalogTable1, resolvedSchema1)), null);
    Map<String, String> options2 = new HashMap<>();
    options2.put("connector", TestValuesTableFactory.IDENTIFIER);
    options2.put("disable-lookup", "true");
    options2.put("enable-watermark-push-down", "true");
    options2.put("filterable-fields", "b");
    options2.put("bounded", "false");
    options2.put("readable-metadata", "m1:INT, m2:STRING");
    final ResolvedSchema resolvedSchema2 = new ResolvedSchema(Arrays.asList(Column.physical("a", DataTypes.BIGINT()), Column.physical("b", DataTypes.INT()), Column.physical("c", DataTypes.STRING()), Column.physical("p", DataTypes.STRING()), Column.metadata("m1", DataTypes.INT(), null, false), Column.metadata("m2", DataTypes.STRING(), null, false), Column.physical("ts", DataTypes.TIMESTAMP(3))), Collections.emptyList(), null);
    final CatalogTable catalogTable2 = CatalogTable.of(Schema.newBuilder().fromResolvedSchema(resolvedSchema2).build(), null, Collections.emptyList(), options2);
    FlinkTypeFactory factory = FlinkTypeFactory.INSTANCE();
    RexBuilder rexBuilder = new RexBuilder(factory);
    DynamicTableSourceSpec spec2 = new DynamicTableSourceSpec(ContextResolvedTable.temporary(ObjectIdentifier.of(DEFAULT_BUILTIN_CATALOG, DEFAULT_BUILTIN_DATABASE, "MyTable"), new ResolvedCatalogTable(catalogTable2, resolvedSchema2)), Arrays.asList(new ProjectPushDownSpec(new int[][] { { 0 }, { 1 }, { 4 }, { 6 } }, RowType.of(new LogicalType[] { new BigIntType(), new IntType(), new IntType(), new TimestampType(3) }, new String[] { "a", "b", "m1", "ts" })), new ReadingMetadataSpec(Arrays.asList("m1", "m2"), RowType.of(new LogicalType[] { new BigIntType(), new IntType(), new IntType(), new TimestampType(3) }, new String[] { "a", "b", "m1", "ts" })), new FilterPushDownSpec(Collections.singletonList(// b >= 10
    rexBuilder.makeCall(SqlStdOperatorTable.GREATER_THAN_OR_EQUAL, rexBuilder.makeInputRef(factory.createSqlType(SqlTypeName.INTEGER), 1), rexBuilder.makeExactLiteral(new BigDecimal(10))))), new WatermarkPushDownSpec(rexBuilder.makeCall(SqlStdOperatorTable.MINUS, rexBuilder.makeInputRef(factory.createSqlType(SqlTypeName.TIMESTAMP, 3), 3), rexBuilder.makeIntervalLiteral(BigDecimal.valueOf(1000), new SqlIntervalQualifier(TimeUnit.SECOND, 2, TimeUnit.SECOND, 6, SqlParserPos.ZERO))), 5000, RowType.of(new BigIntType(), new IntType(), new IntType(), new TimestampType(false, TimestampKind.ROWTIME, 3))), new SourceWatermarkSpec(true, RowType.of(new BigIntType(), new IntType(), new IntType(), new TimestampType(false, TimestampKind.ROWTIME, 3))), new LimitPushDownSpec(100), new PartitionPushDownSpec(Arrays.asList(new HashMap<String, String>() {

        {
            put("p", "A");
        }
    }, new HashMap<String, String>() {

        {
            put("p", "B");
        }
    }))));
    return Stream.of(spec1, spec2);
}
Also used : WatermarkPushDownSpec(org.apache.flink.table.planner.plan.abilities.source.WatermarkPushDownSpec) HashMap(java.util.HashMap) ProjectPushDownSpec(org.apache.flink.table.planner.plan.abilities.source.ProjectPushDownSpec) SqlIntervalQualifier(org.apache.calcite.sql.SqlIntervalQualifier) DynamicTableSourceSpec(org.apache.flink.table.planner.plan.nodes.exec.spec.DynamicTableSourceSpec) LogicalType(org.apache.flink.table.types.logical.LogicalType) BigIntType(org.apache.flink.table.types.logical.BigIntType) CatalogTable(org.apache.flink.table.catalog.CatalogTable) ResolvedCatalogTable(org.apache.flink.table.catalog.ResolvedCatalogTable) ReadingMetadataSpec(org.apache.flink.table.planner.plan.abilities.source.ReadingMetadataSpec) BigDecimal(java.math.BigDecimal) IntType(org.apache.flink.table.types.logical.IntType) BigIntType(org.apache.flink.table.types.logical.BigIntType) SourceWatermarkSpec(org.apache.flink.table.planner.plan.abilities.source.SourceWatermarkSpec) LimitPushDownSpec(org.apache.flink.table.planner.plan.abilities.source.LimitPushDownSpec) PartitionPushDownSpec(org.apache.flink.table.planner.plan.abilities.source.PartitionPushDownSpec) FilterPushDownSpec(org.apache.flink.table.planner.plan.abilities.source.FilterPushDownSpec) ResolvedCatalogTable(org.apache.flink.table.catalog.ResolvedCatalogTable) FlinkTypeFactory(org.apache.flink.table.planner.calcite.FlinkTypeFactory) RexBuilder(org.apache.calcite.rex.RexBuilder) TimestampType(org.apache.flink.table.types.logical.TimestampType) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema)

Example 40 with CatalogTable

use of org.apache.flink.table.catalog.CatalogTable in project flink by apache.

the class SqlToOperationConverterTest method testMergingCreateTableLike.

@Test
public void testMergingCreateTableLike() {
    Map<String, String> sourceProperties = new HashMap<>();
    sourceProperties.put("format.type", "json");
    CatalogTable catalogTable = CatalogTable.of(Schema.newBuilder().column("f0", DataTypes.INT().notNull()).column("f1", DataTypes.TIMESTAMP(3)).columnByExpression("f2", "`f0` + 12345").watermark("f1", "`f1` - interval '1' second").build(), null, Arrays.asList("f0", "f1"), sourceProperties);
    catalogManager.createTable(catalogTable, ObjectIdentifier.of("builtin", "default", "sourceTable"), false);
    final String sql = "create table derivedTable(\n" + "  a int,\n" + "  watermark for f1 as `f1` - interval '5' second\n" + ")\n" + "PARTITIONED BY (a, f0)\n" + "with (\n" + "  'connector.type' = 'kafka'" + ")\n" + "like sourceTable (\n" + "   EXCLUDING GENERATED\n" + "   EXCLUDING PARTITIONS\n" + "   OVERWRITING OPTIONS\n" + "   OVERWRITING WATERMARKS" + ")";
    Operation operation = parseAndConvert(sql);
    assertThat(operation).is(new HamcrestCondition<>(isCreateTableOperation(withSchema(Schema.newBuilder().column("f0", DataTypes.INT().notNull()).column("f1", DataTypes.TIMESTAMP(3)).column("a", DataTypes.INT()).watermark("f1", "`f1` - INTERVAL '5' SECOND").build()), withOptions(entry("connector.type", "kafka"), entry("format.type", "json")), partitionedBy("a", "f0"))));
}
Also used : HashMap(java.util.HashMap) CatalogTable(org.apache.flink.table.catalog.CatalogTable) OperationMatchers.isCreateTableOperation(org.apache.flink.table.planner.utils.OperationMatchers.isCreateTableOperation) DropDatabaseOperation(org.apache.flink.table.operations.ddl.DropDatabaseOperation) SinkModifyOperation(org.apache.flink.table.operations.SinkModifyOperation) AlterTableOptionsOperation(org.apache.flink.table.operations.ddl.AlterTableOptionsOperation) AlterTableDropConstraintOperation(org.apache.flink.table.operations.ddl.AlterTableDropConstraintOperation) UseCatalogOperation(org.apache.flink.table.operations.UseCatalogOperation) UseDatabaseOperation(org.apache.flink.table.operations.UseDatabaseOperation) CreateViewOperation(org.apache.flink.table.operations.ddl.CreateViewOperation) ShowJarsOperation(org.apache.flink.table.operations.command.ShowJarsOperation) AlterDatabaseOperation(org.apache.flink.table.operations.ddl.AlterDatabaseOperation) QueryOperation(org.apache.flink.table.operations.QueryOperation) EndStatementSetOperation(org.apache.flink.table.operations.EndStatementSetOperation) UseModulesOperation(org.apache.flink.table.operations.UseModulesOperation) ShowFunctionsOperation(org.apache.flink.table.operations.ShowFunctionsOperation) CreateDatabaseOperation(org.apache.flink.table.operations.ddl.CreateDatabaseOperation) SetOperation(org.apache.flink.table.operations.command.SetOperation) LoadModuleOperation(org.apache.flink.table.operations.LoadModuleOperation) Operation(org.apache.flink.table.operations.Operation) ShowModulesOperation(org.apache.flink.table.operations.ShowModulesOperation) SourceQueryOperation(org.apache.flink.table.operations.SourceQueryOperation) UnloadModuleOperation(org.apache.flink.table.operations.UnloadModuleOperation) CreateTableOperation(org.apache.flink.table.operations.ddl.CreateTableOperation) RemoveJarOperation(org.apache.flink.table.operations.command.RemoveJarOperation) BeginStatementSetOperation(org.apache.flink.table.operations.BeginStatementSetOperation) AddJarOperation(org.apache.flink.table.operations.command.AddJarOperation) AlterTableAddConstraintOperation(org.apache.flink.table.operations.ddl.AlterTableAddConstraintOperation) ExplainOperation(org.apache.flink.table.operations.ExplainOperation) ResetOperation(org.apache.flink.table.operations.command.ResetOperation) StatementSetOperation(org.apache.flink.table.operations.StatementSetOperation) AlterTableRenameOperation(org.apache.flink.table.operations.ddl.AlterTableRenameOperation) Test(org.junit.Test)

Aggregations

CatalogTable (org.apache.flink.table.catalog.CatalogTable)68 Test (org.junit.Test)35 HashMap (java.util.HashMap)30 CatalogTableImpl (org.apache.flink.table.catalog.CatalogTableImpl)24 TableSchema (org.apache.flink.table.api.TableSchema)17 ObjectIdentifier (org.apache.flink.table.catalog.ObjectIdentifier)17 CreateTableOperation (org.apache.flink.table.operations.ddl.CreateTableOperation)14 ValidationException (org.apache.flink.table.api.ValidationException)13 ObjectPath (org.apache.flink.table.catalog.ObjectPath)13 CatalogBaseTable (org.apache.flink.table.catalog.CatalogBaseTable)12 Operation (org.apache.flink.table.operations.Operation)12 AlterTableAddConstraintOperation (org.apache.flink.table.operations.ddl.AlterTableAddConstraintOperation)12 AlterTableDropConstraintOperation (org.apache.flink.table.operations.ddl.AlterTableDropConstraintOperation)12 AlterTableOptionsOperation (org.apache.flink.table.operations.ddl.AlterTableOptionsOperation)12 AlterTableRenameOperation (org.apache.flink.table.operations.ddl.AlterTableRenameOperation)12 ExplainOperation (org.apache.flink.table.operations.ExplainOperation)11 LoadModuleOperation (org.apache.flink.table.operations.LoadModuleOperation)11 QueryOperation (org.apache.flink.table.operations.QueryOperation)11 ShowFunctionsOperation (org.apache.flink.table.operations.ShowFunctionsOperation)11 ShowModulesOperation (org.apache.flink.table.operations.ShowModulesOperation)11