use of org.apache.flink.table.api.TableDescriptor in project flink by apache.
the class CommonExecSinkITCase method testBinaryLengthEnforcer.
@Test
public void testBinaryLengthEnforcer() throws ExecutionException, InterruptedException {
final StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
final List<Row> rows = Arrays.asList(Row.of(1, new byte[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 }, new byte[] { 1, 2, 3, 4, 5, 6, 7, 8 }, 11, 111, new byte[] { 1, 2, 3 }), Row.of(2, new byte[] { 1, 2, 3, 4, 5 }, new byte[] { 1, 2, 3 }, 22, 222, new byte[] { 1, 2, 3, 4, 5, 6 }), Row.of(3, new byte[] { 1, 2, 3, 4, 5, 6 }, new byte[] { 1, 2, 3, 4, 5 }, 33, 333, new byte[] { 1, 2, 3, 4, 5, 6, 7, 8 }), Row.of(4, new byte[] { 1, 2, 3, 4, 5, 6, 7, 8 }, new byte[] { 1, 2, 3, 4, 5, 6 }, 44, 444, new byte[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }));
final TableDescriptor sourceDescriptor = TableFactoryHarness.newBuilder().schema(schemaForBinaryLengthEnforcer()).source(new TestSource(rows)).build();
tableEnv.createTable("T1", sourceDescriptor);
// Default config - ignore (no trim)
TableResult result = tableEnv.executeSql("SELECT * FROM T1");
result.await();
final List<Row> results = new ArrayList<>();
result.collect().forEachRemaining(results::add);
assertThat(results).containsExactlyInAnyOrderElementsOf(rows);
// accordingly, based on their type length
try {
tableEnv.getConfig().set(TABLE_EXEC_SINK_TYPE_LENGTH_ENFORCER.key(), ExecutionConfigOptions.TypeLengthEnforcer.TRIM_PAD.name());
result = tableEnv.executeSql("SELECT * FROM T1");
result.await();
final List<Row> expected = Arrays.asList(Row.of(1, new byte[] { 1, 2, 3, 4, 5, 6, 7, 8 }, new byte[] { 1, 2, 3, 4, 5, 6 }, 11, 111, new byte[] { 1, 2, 3 }), Row.of(2, new byte[] { 1, 2, 3, 4, 5, 0, 0, 0 }, new byte[] { 1, 2, 3, 0, 0, 0 }, 22, 222, new byte[] { 1, 2, 3, 4, 5, 6 }), Row.of(3, new byte[] { 1, 2, 3, 4, 5, 6, 0, 0 }, new byte[] { 1, 2, 3, 4, 5, 0 }, 33, 333, new byte[] { 1, 2, 3, 4, 5, 6 }), Row.of(4, new byte[] { 1, 2, 3, 4, 5, 6, 7, 8 }, new byte[] { 1, 2, 3, 4, 5, 6 }, 44, 444, new byte[] { 1, 2, 3, 4, 5, 6 }));
final List<Row> resultsTrimmed = new ArrayList<>();
result.collect().forEachRemaining(resultsTrimmed::add);
assertThat(resultsTrimmed).containsExactlyInAnyOrderElementsOf(expected);
} finally {
tableEnv.getConfig().set(TABLE_EXEC_SINK_TYPE_LENGTH_ENFORCER.key(), ExecutionConfigOptions.TypeLengthEnforcer.IGNORE.name());
}
}
use of org.apache.flink.table.api.TableDescriptor in project flink by apache.
the class CommonExecSinkITCase method testStreamRecordTimestampInserterSinkRuntimeProvider.
@Test
public void testStreamRecordTimestampInserterSinkRuntimeProvider() throws ExecutionException, InterruptedException {
final StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
final SharedReference<List<Long>> timestamps = sharedObjects.add(new ArrayList<>());
final List<Row> rows = Arrays.asList(Row.of(1, "foo", Instant.parse("2020-11-10T12:34:56.123Z")), Row.of(2, "foo", Instant.parse("2020-11-10T11:34:56.789Z")), Row.of(3, "foo", Instant.parse("2020-11-11T10:11:22.777Z")), Row.of(4, "foo", Instant.parse("2020-11-11T10:11:23.888Z")));
final TableDescriptor sourceDescriptor = TableFactoryHarness.newBuilder().schema(schemaStreamRecordTimestampInserter(true)).source(new TestSource(rows)).sink(buildRuntimeSinkProvider(new TestTimestampWriter(timestamps))).build();
tableEnv.createTable("T1", sourceDescriptor);
final String sqlStmt = "INSERT INTO T1 SELECT * FROM T1";
assertPlan(tableEnv, sqlStmt, true);
tableEnv.executeSql(sqlStmt).await();
assertTimestampResults(timestamps, rows);
}
Aggregations