use of org.apache.flink.table.api.TableResult in project flink by apache.
the class DependencyTest method testTableFactoryDiscovery.
@Test
public void testTableFactoryDiscovery() throws Exception {
final LocalExecutor executor = createLocalExecutor();
try {
final TableResult tableResult = executeSql(executor, SESSION_ID, "DESCRIBE TableNumber1");
assertEquals(tableResult.getResolvedSchema(), ResolvedSchema.physical(new String[] { "name", "type", "null", "key", "extras", "watermark" }, new DataType[] { DataTypes.STRING(), DataTypes.STRING(), DataTypes.BOOLEAN(), DataTypes.STRING(), DataTypes.STRING(), DataTypes.STRING() }));
List<Row> schemaData = Arrays.asList(Row.of("IntegerField1", "INT", true, null, null, null), Row.of("StringField1", "STRING", true, null, null, null), Row.of("rowtimeField", "TIMESTAMP(3) *ROWTIME*", true, null, null, "`rowtimeField`"));
assertEquals(schemaData, CollectionUtil.iteratorToList(tableResult.collect()));
} finally {
executor.closeSession(SESSION_ID);
}
}
use of org.apache.flink.table.api.TableResult in project flink by apache.
the class CliClient method callInserts.
private void callInserts(List<ModifyOperation> operations) {
printInfo(CliStrings.MESSAGE_SUBMITTING_STATEMENT);
boolean sync = executor.getSessionConfig(sessionId).get(TABLE_DML_SYNC);
if (sync) {
printInfo(MESSAGE_WAIT_EXECUTE);
}
TableResult tableResult = executor.executeModifyOperations(sessionId, operations);
checkState(tableResult.getJobClient().isPresent());
if (sync) {
terminal.writer().println(CliStrings.messageInfo(MESSAGE_FINISH_STATEMENT).toAnsi());
} else {
terminal.writer().println(CliStrings.messageInfo(MESSAGE_STATEMENT_SUBMITTED).toAnsi());
terminal.writer().println(String.format("Job ID: %s\n", tableResult.getJobClient().get().getJobID().toString()));
}
terminal.flush();
}
use of org.apache.flink.table.api.TableResult in project flink by apache.
the class CommonExecSinkITCase method testCharLengthEnforcer.
@Test
public void testCharLengthEnforcer() throws ExecutionException, InterruptedException {
final StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
final List<Row> rows = Arrays.asList(Row.of(1, "Apache Flink", "SQL RuleZ", 11, 111, "SQL"), Row.of(2, "Apache", "SQL", 22, 222, "Flink"), Row.of(3, "Apache", "Flink", 33, 333, "Apache Flink SQL"), Row.of(4, "Flink Project", "SQL or SeQueL?", 44, 444, "Apache Flink SQL"));
final TableDescriptor sourceDescriptor = TableFactoryHarness.newBuilder().schema(schemaForCharLengthEnforcer()).source(new TestSource(rows)).build();
tableEnv.createTable("T1", sourceDescriptor);
// Default config - ignore (no trim)
TableResult result = tableEnv.executeSql("SELECT * FROM T1");
result.await();
final List<Row> results = new ArrayList<>();
result.collect().forEachRemaining(results::add);
assertThat(results).containsExactlyInAnyOrderElementsOf(rows);
// accordingly, based on their type length
try {
tableEnv.getConfig().set(TABLE_EXEC_SINK_TYPE_LENGTH_ENFORCER.key(), ExecutionConfigOptions.TypeLengthEnforcer.TRIM_PAD.name());
result = tableEnv.executeSql("SELECT * FROM T1");
result.await();
final List<Row> expected = Arrays.asList(Row.of(1, "Apache F", "SQL Ru", 11, 111, "SQL"), Row.of(2, "Apache ", "SQL ", 22, 222, "Flink"), Row.of(3, "Apache ", "Flink ", 33, 333, "Apache"), Row.of(4, "Flink Pr", "SQL or", 44, 444, "Apache"));
final List<Row> resultsTrimmed = new ArrayList<>();
result.collect().forEachRemaining(resultsTrimmed::add);
assertThat(resultsTrimmed).containsExactlyInAnyOrderElementsOf(expected);
} finally {
tableEnv.getConfig().set(TABLE_EXEC_SINK_TYPE_LENGTH_ENFORCER.key(), ExecutionConfigOptions.TypeLengthEnforcer.IGNORE.name());
}
}
use of org.apache.flink.table.api.TableResult in project flink by apache.
the class DataStreamJavaITCase method testFromAndToDataStreamWithPojo.
@Test
public void testFromAndToDataStreamWithPojo() throws Exception {
final StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
final ComplexPojo[] pojos = { ComplexPojo.of(42, "hello", new ImmutablePojo(42.0, null)), ComplexPojo.of(42, null, null) };
final DataStream<ComplexPojo> dataStream = env.fromElements(pojos);
// reorders columns and enriches the immutable type
final Table table = tableEnv.fromDataStream(dataStream, Schema.newBuilder().column("c", INT()).column("a", STRING()).column("p", DataTypes.of(ImmutablePojo.class)).build());
testSchema(table, Column.physical("c", INT()), Column.physical("a", STRING()), Column.physical("p", STRUCTURED(ImmutablePojo.class, FIELD("d", DOUBLE()), FIELD("b", BOOLEAN()))));
tableEnv.createTemporaryView("t", table);
final TableResult result = tableEnv.executeSql("SELECT p, p.d, p.b FROM t");
testResult(result, Row.of(new ImmutablePojo(42.0, null), 42.0, null), Row.of(null, null, null));
testResult(tableEnv.toDataStream(table, ComplexPojo.class), pojos);
}
use of org.apache.flink.table.api.TableResult in project flink by apache.
the class DataStreamJavaITCase method testFromAndToDataStreamEventTime.
@Test
public void testFromAndToDataStreamEventTime() throws Exception {
final StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
final DataStream<Tuple3<Long, Integer, String>> dataStream = getWatermarkedDataStream();
final Table table = tableEnv.fromDataStream(dataStream, Schema.newBuilder().columnByMetadata("rowtime", "TIMESTAMP_LTZ(3)").watermark("rowtime", "SOURCE_WATERMARK()").build());
testSchema(table, new ResolvedSchema(Arrays.asList(Column.physical("f0", BIGINT().notNull()), Column.physical("f1", INT().notNull()), Column.physical("f2", STRING()), Column.metadata("rowtime", TIMESTAMP_LTZ(3), null, false)), Collections.singletonList(WatermarkSpec.of("rowtime", ResolvedExpressionMock.of(TIMESTAMP_LTZ(3), "`SOURCE_WATERMARK`()"))), null));
tableEnv.createTemporaryView("t", table);
final TableResult result = tableEnv.executeSql("SELECT f2, SUM(f1) FROM t GROUP BY f2, TUMBLE(rowtime, INTERVAL '0.005' SECOND)");
testResult(result, Row.of("a", 47), Row.of("c", 1000), Row.of("c", 1000));
testResult(tableEnv.toDataStream(table).keyBy(k -> k.getField("f2")).window(TumblingEventTimeWindows.of(Time.milliseconds(5))).<Row>apply((key, window, input, out) -> {
int sum = 0;
for (Row row : input) {
sum += row.<Integer>getFieldAs("f1");
}
out.collect(Row.of(key, sum));
}).returns(Types.ROW(Types.STRING, Types.INT)), Row.of("a", 47), Row.of("c", 1000), Row.of("c", 1000));
}
Aggregations