use of org.apache.flink.table.operations.Operation in project flink by apache.
the class SqlToOperationConverterTest method testMergingCreateTableLike.
@Test
public void testMergingCreateTableLike() {
Map<String, String> sourceProperties = new HashMap<>();
sourceProperties.put("format.type", "json");
CatalogTable catalogTable = CatalogTable.of(Schema.newBuilder().column("f0", DataTypes.INT().notNull()).column("f1", DataTypes.TIMESTAMP(3)).columnByExpression("f2", "`f0` + 12345").watermark("f1", "`f1` - interval '1' second").build(), null, Arrays.asList("f0", "f1"), sourceProperties);
catalogManager.createTable(catalogTable, ObjectIdentifier.of("builtin", "default", "sourceTable"), false);
final String sql = "create table derivedTable(\n" + " a int,\n" + " watermark for f1 as `f1` - interval '5' second\n" + ")\n" + "PARTITIONED BY (a, f0)\n" + "with (\n" + " 'connector.type' = 'kafka'" + ")\n" + "like sourceTable (\n" + " EXCLUDING GENERATED\n" + " EXCLUDING PARTITIONS\n" + " OVERWRITING OPTIONS\n" + " OVERWRITING WATERMARKS" + ")";
Operation operation = parseAndConvert(sql);
assertThat(operation).is(new HamcrestCondition<>(isCreateTableOperation(withSchema(Schema.newBuilder().column("f0", DataTypes.INT().notNull()).column("f1", DataTypes.TIMESTAMP(3)).column("a", DataTypes.INT()).watermark("f1", "`f1` - INTERVAL '5' SECOND").build()), withOptions(entry("connector.type", "kafka"), entry("format.type", "json")), partitionedBy("a", "f0"))));
}
use of org.apache.flink.table.operations.Operation in project flink by apache.
the class SqlToOperationConverterTest method testEnd.
@Test
public void testEnd() {
final String sql = "END";
Operation operation = parse(sql, SqlDialect.DEFAULT);
assertThat(operation).isInstanceOf(EndStatementSetOperation.class);
final EndStatementSetOperation endStatementSetOperation = (EndStatementSetOperation) operation;
assertThat(endStatementSetOperation.asSummaryString()).isEqualTo("END");
}
use of org.apache.flink.table.operations.Operation in project flink by apache.
the class SqlToOperationConverterTest method assertShowFunctions.
private void assertShowFunctions(String sql, String expectedSummary, FunctionScope expectedScope) {
Operation operation = parse(sql, SqlDialect.DEFAULT);
assertThat(operation).isInstanceOf(ShowFunctionsOperation.class);
final ShowFunctionsOperation showFunctionsOperation = (ShowFunctionsOperation) operation;
assertThat(showFunctionsOperation.getFunctionScope()).isEqualTo(expectedScope);
assertThat(showFunctionsOperation.asSummaryString()).isEqualTo(expectedSummary);
}
use of org.apache.flink.table.operations.Operation in project flink by apache.
the class SqlToOperationConverterTest method testSqlRichExplainWithStatementSet.
@Test
public void testSqlRichExplainWithStatementSet() {
final String sql = "explain plan for statement set begin " + "insert into t1 select a, b, c, d from t2 where a > 1;" + "insert into t1 select a, b, c, d from t2 where a > 2;" + "end";
FlinkPlannerImpl planner = getPlannerBySqlDialect(SqlDialect.DEFAULT);
final CalciteParser parser = getParserBySqlDialect(SqlDialect.DEFAULT);
Operation operation = parse(sql, planner, parser);
assertThat(operation).isInstanceOf(ExplainOperation.class);
}
use of org.apache.flink.table.operations.Operation in project flink by apache.
the class SqlToOperationConverterTest method testDropDatabase.
@Test
public void testDropDatabase() {
final String[] dropDatabaseSqls = new String[] { "drop database db1", "drop database if exists db1", "drop database if exists cat1.db1 CASCADE", "drop database if exists cat1.db1 RESTRICT" };
final String[] expectedCatalogs = new String[] { "builtin", "builtin", "cat1", "cat1" };
final String expectedDatabase = "db1";
final boolean[] expectedIfExists = new boolean[] { false, true, true, true };
final boolean[] expectedIsCascades = new boolean[] { false, false, true, false };
for (int i = 0; i < dropDatabaseSqls.length; i++) {
Operation operation = parse(dropDatabaseSqls[i], SqlDialect.DEFAULT);
assertThat(operation).isInstanceOf(DropDatabaseOperation.class);
final DropDatabaseOperation dropDatabaseOperation = (DropDatabaseOperation) operation;
assertThat(dropDatabaseOperation.getCatalogName()).isEqualTo(expectedCatalogs[i]);
assertThat(dropDatabaseOperation.getDatabaseName()).isEqualTo(expectedDatabase);
assertThat(dropDatabaseOperation.isIfExists()).isEqualTo(expectedIfExists[i]);
assertThat(dropDatabaseOperation.isCascade()).isEqualTo(expectedIsCascades[i]);
}
}
Aggregations