use of org.apache.flink.table.api.bridge.java.StreamTableEnvironment in project flink by apache.
the class CommonExecSinkITCase method testBinaryLengthEnforcer.
@Test
public void testBinaryLengthEnforcer() throws ExecutionException, InterruptedException {
final StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
final List<Row> rows = Arrays.asList(Row.of(1, new byte[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 }, new byte[] { 1, 2, 3, 4, 5, 6, 7, 8 }, 11, 111, new byte[] { 1, 2, 3 }), Row.of(2, new byte[] { 1, 2, 3, 4, 5 }, new byte[] { 1, 2, 3 }, 22, 222, new byte[] { 1, 2, 3, 4, 5, 6 }), Row.of(3, new byte[] { 1, 2, 3, 4, 5, 6 }, new byte[] { 1, 2, 3, 4, 5 }, 33, 333, new byte[] { 1, 2, 3, 4, 5, 6, 7, 8 }), Row.of(4, new byte[] { 1, 2, 3, 4, 5, 6, 7, 8 }, new byte[] { 1, 2, 3, 4, 5, 6 }, 44, 444, new byte[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }));
final TableDescriptor sourceDescriptor = TableFactoryHarness.newBuilder().schema(schemaForBinaryLengthEnforcer()).source(new TestSource(rows)).build();
tableEnv.createTable("T1", sourceDescriptor);
// Default config - ignore (no trim)
TableResult result = tableEnv.executeSql("SELECT * FROM T1");
result.await();
final List<Row> results = new ArrayList<>();
result.collect().forEachRemaining(results::add);
assertThat(results).containsExactlyInAnyOrderElementsOf(rows);
// accordingly, based on their type length
try {
tableEnv.getConfig().set(TABLE_EXEC_SINK_TYPE_LENGTH_ENFORCER.key(), ExecutionConfigOptions.TypeLengthEnforcer.TRIM_PAD.name());
result = tableEnv.executeSql("SELECT * FROM T1");
result.await();
final List<Row> expected = Arrays.asList(Row.of(1, new byte[] { 1, 2, 3, 4, 5, 6, 7, 8 }, new byte[] { 1, 2, 3, 4, 5, 6 }, 11, 111, new byte[] { 1, 2, 3 }), Row.of(2, new byte[] { 1, 2, 3, 4, 5, 0, 0, 0 }, new byte[] { 1, 2, 3, 0, 0, 0 }, 22, 222, new byte[] { 1, 2, 3, 4, 5, 6 }), Row.of(3, new byte[] { 1, 2, 3, 4, 5, 6, 0, 0 }, new byte[] { 1, 2, 3, 4, 5, 0 }, 33, 333, new byte[] { 1, 2, 3, 4, 5, 6 }), Row.of(4, new byte[] { 1, 2, 3, 4, 5, 6, 7, 8 }, new byte[] { 1, 2, 3, 4, 5, 6 }, 44, 444, new byte[] { 1, 2, 3, 4, 5, 6 }));
final List<Row> resultsTrimmed = new ArrayList<>();
result.collect().forEachRemaining(resultsTrimmed::add);
assertThat(resultsTrimmed).containsExactlyInAnyOrderElementsOf(expected);
} finally {
tableEnv.getConfig().set(TABLE_EXEC_SINK_TYPE_LENGTH_ENFORCER.key(), ExecutionConfigOptions.TypeLengthEnforcer.IGNORE.name());
}
}
use of org.apache.flink.table.api.bridge.java.StreamTableEnvironment in project flink by apache.
the class CommonExecSinkITCase method testStreamRecordTimestampInserterSinkRuntimeProvider.
@Test
public void testStreamRecordTimestampInserterSinkRuntimeProvider() throws ExecutionException, InterruptedException {
final StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
final SharedReference<List<Long>> timestamps = sharedObjects.add(new ArrayList<>());
final List<Row> rows = Arrays.asList(Row.of(1, "foo", Instant.parse("2020-11-10T12:34:56.123Z")), Row.of(2, "foo", Instant.parse("2020-11-10T11:34:56.789Z")), Row.of(3, "foo", Instant.parse("2020-11-11T10:11:22.777Z")), Row.of(4, "foo", Instant.parse("2020-11-11T10:11:23.888Z")));
final TableDescriptor sourceDescriptor = TableFactoryHarness.newBuilder().schema(schemaStreamRecordTimestampInserter(true)).source(new TestSource(rows)).sink(buildRuntimeSinkProvider(new TestTimestampWriter(timestamps))).build();
tableEnv.createTable("T1", sourceDescriptor);
final String sqlStmt = "INSERT INTO T1 SELECT * FROM T1";
assertPlan(tableEnv, sqlStmt, true);
tableEnv.executeSql(sqlStmt).await();
assertTimestampResults(timestamps, rows);
}
use of org.apache.flink.table.api.bridge.java.StreamTableEnvironment in project flink by apache.
the class TransformationsTest method testLegacyBatchSource.
@Test
public void testLegacyBatchSource() {
final JavaBatchTableTestUtil util = javaBatchTestUtil();
final StreamTableEnvironment env = util.tableEnv();
final Table table = env.from(TableDescriptor.forConnector("values").option("bounded", "true").schema(dummySchema()).build());
final LegacySourceTransformation<?> sourceTransform = toLegacySourceTransformation(env, table);
assertBoundedness(Boundedness.BOUNDED, sourceTransform);
assertFalse(sourceTransform.getOperator().emitsProgressiveWatermarks());
}
use of org.apache.flink.table.api.bridge.java.StreamTableEnvironment in project flink by apache.
the class EnvironmentTest method testPassingExecutionParameters.
@Test
public void testPassingExecutionParameters() {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
StreamTableEnvironment tEnv = StreamTableEnvironment.create(env);
tEnv.getConfig().addConfiguration(new Configuration().set(CoreOptions.DEFAULT_PARALLELISM, 128).set(PipelineOptions.AUTO_WATERMARK_INTERVAL, Duration.ofMillis(800)).set(ExecutionCheckpointingOptions.CHECKPOINTING_INTERVAL, Duration.ofSeconds(30)));
tEnv.createTemporaryView("test", env.fromElements(1, 2, 3));
// trigger translation
Table table = tEnv.sqlQuery("SELECT * FROM test");
tEnv.toAppendStream(table, Row.class);
assertEquals(128, env.getParallelism());
assertEquals(800, env.getConfig().getAutoWatermarkInterval());
assertEquals(30000, env.getCheckpointConfig().getCheckpointInterval());
}
Aggregations