Search in sources :

Example 26 with StreamTableEnvironment

use of org.apache.flink.table.api.bridge.java.StreamTableEnvironment in project flink by apache.

the class TransformationsTest method testLegacyStreamSource.

@Test
public void testLegacyStreamSource() {
    final JavaStreamTableTestUtil util = javaStreamTestUtil();
    final StreamTableEnvironment env = util.tableEnv();
    final Table table = env.from(TableDescriptor.forConnector("values").option("bounded", "false").schema(dummySchema()).build());
    final LegacySourceTransformation<?> sourceTransform = toLegacySourceTransformation(env, table);
    assertBoundedness(Boundedness.CONTINUOUS_UNBOUNDED, sourceTransform);
    assertTrue(sourceTransform.getOperator().emitsProgressiveWatermarks());
}
Also used : Table(org.apache.flink.table.api.Table) JavaStreamTableTestUtil(org.apache.flink.table.planner.utils.JavaStreamTableTestUtil) StreamTableEnvironment(org.apache.flink.table.api.bridge.java.StreamTableEnvironment) Test(org.junit.Test)

Example 27 with StreamTableEnvironment

use of org.apache.flink.table.api.bridge.java.StreamTableEnvironment in project flink by apache.

the class CommonExecSinkITCase method testUnifiedSinksAreUsableWithDataStreamSinkProvider.

@Test
public void testUnifiedSinksAreUsableWithDataStreamSinkProvider() throws ExecutionException, InterruptedException {
    final StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
    final SharedReference<List<RowData>> fetched = sharedObjects.add(new ArrayList<>());
    final List<Row> rows = Arrays.asList(Row.of(1), Row.of(2));
    final TableDescriptor sourceDescriptor = TableFactoryHarness.newBuilder().schema(Schema.newBuilder().column("a", INT()).build()).source(new TestSource(rows)).sink(buildDataStreamSinkProvider(fetched)).build();
    tableEnv.createTable("T1", sourceDescriptor);
    final String sqlStmt = "INSERT INTO T1 SELECT * FROM T1";
    tableEnv.executeSql(sqlStmt).await();
    final List<Integer> fetchedRows = fetched.get().stream().map(r -> r.getInt(0)).sorted().collect(Collectors.toList());
    assertEquals(fetchedRows.get(0).intValue(), 1);
    assertEquals(fetchedRows.get(1).intValue(), 2);
}
Also used : List(java.util.List) ArrayList(java.util.ArrayList) StreamTableEnvironment(org.apache.flink.table.api.bridge.java.StreamTableEnvironment) Row(org.apache.flink.types.Row) TableDescriptor(org.apache.flink.table.api.TableDescriptor) Test(org.junit.Test)

Example 28 with StreamTableEnvironment

use of org.apache.flink.table.api.bridge.java.StreamTableEnvironment in project flink by apache.

the class CommonExecSinkITCase method testNullEnforcer.

@Test
public void testNullEnforcer() throws ExecutionException, InterruptedException {
    final StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
    final List<Row> rows = Arrays.asList(Row.of(1, "Apache", 11), Row.of(2, null, 22), Row.of(null, "Flink", 33), Row.of(null, null, 44));
    final SharedReference<List<RowData>> results = sharedObjects.add(new ArrayList<>());
    tableEnv.createTable("T1", TableFactoryHarness.newBuilder().schema(schemaForNotNullEnforcer()).source(new TestSource(rows)).sink(buildRuntimeSinkProvider(new RecordWriter(results))).build());
    // Default config - ignore (no trim)
    final ExecutionException ee = assertThrows(ExecutionException.class, () -> tableEnv.executeSql("INSERT INTO T1 SELECT * FROM T1").await());
    assertThat(ExceptionUtils.findThrowableWithMessage(ee, "Column 'b' is NOT NULL, however, a null value is being written into it. " + "You can set job configuration 'table.exec.sink.not-null-enforcer'='DROP' " + "to suppress this exception and drop such records silently.").isPresent()).isTrue();
    // Test not including a NOT NULL column
    results.get().clear();
    final ValidationException ve = assertThrows(ValidationException.class, () -> tableEnv.executeSql("INSERT INTO T1(a, b) SELECT (a, b) FROM T1").await());
    assertThat(ve.getMessage()).isEqualTo("SQL validation failed. At line 0, column 0: Column 'c' has no default " + "value and does not allow NULLs");
    // Change config option to "drop", to drop the columns instead of throwing errors
    try {
        tableEnv.getConfig().set(TABLE_EXEC_SINK_NOT_NULL_ENFORCER.key(), ExecutionConfigOptions.NotNullEnforcer.DROP.name());
        results.get().clear();
        tableEnv.executeSql("INSERT INTO T1 SELECT * FROM T1").await();
        assertThat(results.get().size()).isEqualTo(2);
        assertThat(results.get().get(0).getInt(0)).isEqualTo(1);
        assertThat(results.get().get(0).getString(1).toString()).isEqualTo("Apache");
        assertThat(results.get().get(0).getInt(2)).isEqualTo(11);
        assertThat(results.get().get(1).isNullAt(0)).isTrue();
        assertThat(results.get().get(1).getString(1).toString()).isEqualTo("Flink");
        assertThat(results.get().get(1).getInt(2)).isEqualTo(33);
    } finally {
        tableEnv.getConfig().set(TABLE_EXEC_SINK_NOT_NULL_ENFORCER.key(), ExecutionConfigOptions.NotNullEnforcer.ERROR.name());
    }
}
Also used : ValidationException(org.apache.flink.table.api.ValidationException) List(java.util.List) ArrayList(java.util.ArrayList) StreamTableEnvironment(org.apache.flink.table.api.bridge.java.StreamTableEnvironment) Row(org.apache.flink.types.Row) ExecutionException(java.util.concurrent.ExecutionException) Test(org.junit.Test)

Example 29 with StreamTableEnvironment

use of org.apache.flink.table.api.bridge.java.StreamTableEnvironment in project flink by apache.

the class CommonExecSinkITCase method testCharLengthEnforcer.

@Test
public void testCharLengthEnforcer() throws ExecutionException, InterruptedException {
    final StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
    final List<Row> rows = Arrays.asList(Row.of(1, "Apache Flink", "SQL RuleZ", 11, 111, "SQL"), Row.of(2, "Apache", "SQL", 22, 222, "Flink"), Row.of(3, "Apache", "Flink", 33, 333, "Apache Flink SQL"), Row.of(4, "Flink Project", "SQL or SeQueL?", 44, 444, "Apache Flink SQL"));
    final TableDescriptor sourceDescriptor = TableFactoryHarness.newBuilder().schema(schemaForCharLengthEnforcer()).source(new TestSource(rows)).build();
    tableEnv.createTable("T1", sourceDescriptor);
    // Default config - ignore (no trim)
    TableResult result = tableEnv.executeSql("SELECT * FROM T1");
    result.await();
    final List<Row> results = new ArrayList<>();
    result.collect().forEachRemaining(results::add);
    assertThat(results).containsExactlyInAnyOrderElementsOf(rows);
    // accordingly, based on their type length
    try {
        tableEnv.getConfig().set(TABLE_EXEC_SINK_TYPE_LENGTH_ENFORCER.key(), ExecutionConfigOptions.TypeLengthEnforcer.TRIM_PAD.name());
        result = tableEnv.executeSql("SELECT * FROM T1");
        result.await();
        final List<Row> expected = Arrays.asList(Row.of(1, "Apache F", "SQL Ru", 11, 111, "SQL"), Row.of(2, "Apache  ", "SQL   ", 22, 222, "Flink"), Row.of(3, "Apache  ", "Flink ", 33, 333, "Apache"), Row.of(4, "Flink Pr", "SQL or", 44, 444, "Apache"));
        final List<Row> resultsTrimmed = new ArrayList<>();
        result.collect().forEachRemaining(resultsTrimmed::add);
        assertThat(resultsTrimmed).containsExactlyInAnyOrderElementsOf(expected);
    } finally {
        tableEnv.getConfig().set(TABLE_EXEC_SINK_TYPE_LENGTH_ENFORCER.key(), ExecutionConfigOptions.TypeLengthEnforcer.IGNORE.name());
    }
}
Also used : TableResult(org.apache.flink.table.api.TableResult) ArrayList(java.util.ArrayList) StreamTableEnvironment(org.apache.flink.table.api.bridge.java.StreamTableEnvironment) Row(org.apache.flink.types.Row) TableDescriptor(org.apache.flink.table.api.TableDescriptor) Test(org.junit.Test)

Example 30 with StreamTableEnvironment

use of org.apache.flink.table.api.bridge.java.StreamTableEnvironment in project flink by apache.

the class DataStreamJavaITCase method testFromAndToDataStreamWithPojo.

@Test
public void testFromAndToDataStreamWithPojo() throws Exception {
    final StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
    final ComplexPojo[] pojos = { ComplexPojo.of(42, "hello", new ImmutablePojo(42.0, null)), ComplexPojo.of(42, null, null) };
    final DataStream<ComplexPojo> dataStream = env.fromElements(pojos);
    // reorders columns and enriches the immutable type
    final Table table = tableEnv.fromDataStream(dataStream, Schema.newBuilder().column("c", INT()).column("a", STRING()).column("p", DataTypes.of(ImmutablePojo.class)).build());
    testSchema(table, Column.physical("c", INT()), Column.physical("a", STRING()), Column.physical("p", STRUCTURED(ImmutablePojo.class, FIELD("d", DOUBLE()), FIELD("b", BOOLEAN()))));
    tableEnv.createTemporaryView("t", table);
    final TableResult result = tableEnv.executeSql("SELECT p, p.d, p.b FROM t");
    testResult(result, Row.of(new ImmutablePojo(42.0, null), 42.0, null), Row.of(null, null, null));
    testResult(tableEnv.toDataStream(table, ComplexPojo.class), pojos);
}
Also used : Table(org.apache.flink.table.api.Table) TableResult(org.apache.flink.table.api.TableResult) StreamTableEnvironment(org.apache.flink.table.api.bridge.java.StreamTableEnvironment) Test(org.junit.Test)

Aggregations

StreamTableEnvironment (org.apache.flink.table.api.bridge.java.StreamTableEnvironment)64 Test (org.junit.Test)53 StreamExecutionEnvironment (org.apache.flink.streaming.api.environment.StreamExecutionEnvironment)41 Row (org.apache.flink.types.Row)38 Table (org.apache.flink.table.api.Table)36 ArrayList (java.util.ArrayList)19 TableResult (org.apache.flink.table.api.TableResult)18 List (java.util.List)10 TableDescriptor (org.apache.flink.table.api.TableDescriptor)10 Arrays (java.util.Arrays)6 Collections (java.util.Collections)6 AbstractTestBase (org.apache.flink.test.util.AbstractTestBase)6 IOException (java.io.IOException)5 Tuple2 (org.apache.flink.api.java.tuple.Tuple2)5 ResolvedSchema (org.apache.flink.table.catalog.ResolvedSchema)5 Either (org.apache.flink.types.Either)5 LocalDateTime (java.time.LocalDateTime)4 ZoneId (java.time.ZoneId)4 TypeHint (org.apache.flink.api.common.typeinfo.TypeHint)4 TypeInformation (org.apache.flink.api.common.typeinfo.TypeInformation)4