Search in sources :

Example 21 with TableDescriptor

use of org.apache.flink.table.api.TableDescriptor in project flink by apache.

the class CommonExecSinkITCase method testBinaryLengthEnforcer.

@Test
public void testBinaryLengthEnforcer() throws ExecutionException, InterruptedException {
    final StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
    final List<Row> rows = Arrays.asList(Row.of(1, new byte[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 }, new byte[] { 1, 2, 3, 4, 5, 6, 7, 8 }, 11, 111, new byte[] { 1, 2, 3 }), Row.of(2, new byte[] { 1, 2, 3, 4, 5 }, new byte[] { 1, 2, 3 }, 22, 222, new byte[] { 1, 2, 3, 4, 5, 6 }), Row.of(3, new byte[] { 1, 2, 3, 4, 5, 6 }, new byte[] { 1, 2, 3, 4, 5 }, 33, 333, new byte[] { 1, 2, 3, 4, 5, 6, 7, 8 }), Row.of(4, new byte[] { 1, 2, 3, 4, 5, 6, 7, 8 }, new byte[] { 1, 2, 3, 4, 5, 6 }, 44, 444, new byte[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }));
    final TableDescriptor sourceDescriptor = TableFactoryHarness.newBuilder().schema(schemaForBinaryLengthEnforcer()).source(new TestSource(rows)).build();
    tableEnv.createTable("T1", sourceDescriptor);
    // Default config - ignore (no trim)
    TableResult result = tableEnv.executeSql("SELECT * FROM T1");
    result.await();
    final List<Row> results = new ArrayList<>();
    result.collect().forEachRemaining(results::add);
    assertThat(results).containsExactlyInAnyOrderElementsOf(rows);
    // accordingly, based on their type length
    try {
        tableEnv.getConfig().set(TABLE_EXEC_SINK_TYPE_LENGTH_ENFORCER.key(), ExecutionConfigOptions.TypeLengthEnforcer.TRIM_PAD.name());
        result = tableEnv.executeSql("SELECT * FROM T1");
        result.await();
        final List<Row> expected = Arrays.asList(Row.of(1, new byte[] { 1, 2, 3, 4, 5, 6, 7, 8 }, new byte[] { 1, 2, 3, 4, 5, 6 }, 11, 111, new byte[] { 1, 2, 3 }), Row.of(2, new byte[] { 1, 2, 3, 4, 5, 0, 0, 0 }, new byte[] { 1, 2, 3, 0, 0, 0 }, 22, 222, new byte[] { 1, 2, 3, 4, 5, 6 }), Row.of(3, new byte[] { 1, 2, 3, 4, 5, 6, 0, 0 }, new byte[] { 1, 2, 3, 4, 5, 0 }, 33, 333, new byte[] { 1, 2, 3, 4, 5, 6 }), Row.of(4, new byte[] { 1, 2, 3, 4, 5, 6, 7, 8 }, new byte[] { 1, 2, 3, 4, 5, 6 }, 44, 444, new byte[] { 1, 2, 3, 4, 5, 6 }));
        final List<Row> resultsTrimmed = new ArrayList<>();
        result.collect().forEachRemaining(resultsTrimmed::add);
        assertThat(resultsTrimmed).containsExactlyInAnyOrderElementsOf(expected);
    } finally {
        tableEnv.getConfig().set(TABLE_EXEC_SINK_TYPE_LENGTH_ENFORCER.key(), ExecutionConfigOptions.TypeLengthEnforcer.IGNORE.name());
    }
}
Also used : TableResult(org.apache.flink.table.api.TableResult) ArrayList(java.util.ArrayList) StreamTableEnvironment(org.apache.flink.table.api.bridge.java.StreamTableEnvironment) Row(org.apache.flink.types.Row) TableDescriptor(org.apache.flink.table.api.TableDescriptor) Test(org.junit.Test)

Example 22 with TableDescriptor

use of org.apache.flink.table.api.TableDescriptor in project flink by apache.

the class CommonExecSinkITCase method testStreamRecordTimestampInserterSinkRuntimeProvider.

@Test
public void testStreamRecordTimestampInserterSinkRuntimeProvider() throws ExecutionException, InterruptedException {
    final StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
    final SharedReference<List<Long>> timestamps = sharedObjects.add(new ArrayList<>());
    final List<Row> rows = Arrays.asList(Row.of(1, "foo", Instant.parse("2020-11-10T12:34:56.123Z")), Row.of(2, "foo", Instant.parse("2020-11-10T11:34:56.789Z")), Row.of(3, "foo", Instant.parse("2020-11-11T10:11:22.777Z")), Row.of(4, "foo", Instant.parse("2020-11-11T10:11:23.888Z")));
    final TableDescriptor sourceDescriptor = TableFactoryHarness.newBuilder().schema(schemaStreamRecordTimestampInserter(true)).source(new TestSource(rows)).sink(buildRuntimeSinkProvider(new TestTimestampWriter(timestamps))).build();
    tableEnv.createTable("T1", sourceDescriptor);
    final String sqlStmt = "INSERT INTO T1 SELECT * FROM T1";
    assertPlan(tableEnv, sqlStmt, true);
    tableEnv.executeSql(sqlStmt).await();
    assertTimestampResults(timestamps, rows);
}
Also used : List(java.util.List) ArrayList(java.util.ArrayList) StreamTableEnvironment(org.apache.flink.table.api.bridge.java.StreamTableEnvironment) Row(org.apache.flink.types.Row) TableDescriptor(org.apache.flink.table.api.TableDescriptor) Test(org.junit.Test)

Aggregations

TableDescriptor (org.apache.flink.table.api.TableDescriptor)22 Test (org.junit.Test)20 ArrayList (java.util.ArrayList)11 List (java.util.List)9 StreamTableEnvironment (org.apache.flink.table.api.bridge.java.StreamTableEnvironment)7 Row (org.apache.flink.types.Row)7 TableResult (org.apache.flink.table.api.TableResult)4 Instant (java.time.Instant)2 Arrays (java.util.Arrays)2 Collection (java.util.Collection)2 Collections (java.util.Collections)2 ExecutionException (java.util.concurrent.ExecutionException)2 AtomicReference (java.util.concurrent.atomic.AtomicReference)2 Collectors (java.util.stream.Collectors)2 StreamExecutionEnvironment (org.apache.flink.streaming.api.environment.StreamExecutionEnvironment)2 SinkFunction (org.apache.flink.streaming.api.functions.sink.SinkFunction)2 SourceFunction (org.apache.flink.streaming.api.functions.source.SourceFunction)2 SinkV1Adapter (org.apache.flink.streaming.api.transformations.SinkV1Adapter)2 Watermark (org.apache.flink.streaming.api.watermark.Watermark)2 TestSink (org.apache.flink.streaming.runtime.operators.sink.TestSink)2