use of org.apache.flink.table.operations.Operation in project flink by apache.
the class SqlToOperationConverterTest method testBeginStatementSet.
@Test
public void testBeginStatementSet() {
final String sql = "BEGIN STATEMENT SET";
Operation operation = parse(sql, SqlDialect.DEFAULT);
assertThat(operation).isInstanceOf(BeginStatementSetOperation.class);
final BeginStatementSetOperation beginStatementSetOperation = (BeginStatementSetOperation) operation;
assertThat(beginStatementSetOperation.asSummaryString()).isEqualTo("BEGIN STATEMENT SET");
}
use of org.apache.flink.table.operations.Operation in project flink by apache.
the class SqlToOperationConverterTest method testSet.
@Test
public void testSet() {
Operation operation1 = parse("SET", SqlDialect.DEFAULT);
assertThat(operation1).isInstanceOf(SetOperation.class);
SetOperation setOperation1 = (SetOperation) operation1;
assertThat(setOperation1.getKey()).isNotPresent();
assertThat(setOperation1.getValue()).isNotPresent();
Operation operation2 = parse("SET 'test-key' = 'test-value'", SqlDialect.DEFAULT);
assertThat(operation2).isInstanceOf(SetOperation.class);
SetOperation setOperation2 = (SetOperation) operation2;
assertThat(setOperation2.getKey()).hasValue("test-key");
assertThat(setOperation2.getValue()).hasValue("test-value");
}
use of org.apache.flink.table.operations.Operation in project flink by apache.
the class SqlToOperationConverterTest method testBasicCreateTableLike.
@Test
public void testBasicCreateTableLike() {
Map<String, String> sourceProperties = new HashMap<>();
sourceProperties.put("format.type", "json");
CatalogTable catalogTable = CatalogTable.of(Schema.newBuilder().column("f0", DataTypes.INT().notNull()).column("f1", DataTypes.TIMESTAMP(3)).build(), null, Collections.emptyList(), sourceProperties);
catalogManager.createTable(catalogTable, ObjectIdentifier.of("builtin", "default", "sourceTable"), false);
final String sql = "create table derivedTable(\n" + " a int,\n" + " watermark for f1 as `f1` - interval '5' second\n" + ")\n" + "PARTITIONED BY (a, f0)\n" + "with (\n" + " 'connector.type' = 'kafka'" + ")\n" + "like sourceTable";
Operation operation = parseAndConvert(sql);
assertThat(operation).is(new HamcrestCondition<>(isCreateTableOperation(withSchema(Schema.newBuilder().column("f0", DataTypes.INT().notNull()).column("f1", DataTypes.TIMESTAMP(3)).column("a", DataTypes.INT()).watermark("f1", "`f1` - INTERVAL '5' SECOND").build()), withOptions(entry("connector.type", "kafka"), entry("format.type", "json")), partitionedBy("a", "f0"))));
}
use of org.apache.flink.table.operations.Operation in project flink by apache.
the class SqlToOperationConverterTest method testUnloadModule.
@Test
public void testUnloadModule() {
final String sql = "UNLOAD MODULE dummy";
final String expectedModuleName = "dummy";
Operation operation = parse(sql, SqlDialect.DEFAULT);
assertThat(operation).isInstanceOf(UnloadModuleOperation.class);
final UnloadModuleOperation unloadModuleOperation = (UnloadModuleOperation) operation;
assertThat(unloadModuleOperation.getModuleName()).isEqualTo(expectedModuleName);
}
use of org.apache.flink.table.operations.Operation in project flink by apache.
the class SqlToOperationConverterTest method checkExplainSql.
private void checkExplainSql(String sql) {
FlinkPlannerImpl planner = getPlannerBySqlDialect(SqlDialect.DEFAULT);
CalciteParser parser = getParserBySqlDialect(SqlDialect.DEFAULT);
SqlNode node = parser.parse(sql);
assertThat(node).isInstanceOf(SqlRichExplain.class);
Operation operation = SqlToOperationConverter.convert(planner, catalogManager, node).get();
assertThat(operation).isInstanceOf(ExplainOperation.class);
}
Aggregations