use of org.apache.flink.table.api.bridge.java.StreamTableEnvironment in project flink by apache.
the class PythonScalarFunctionOperatorTestBase method testPythonScalarFunctionOperatorIsChainedByDefault.
@Test
public void testPythonScalarFunctionOperatorIsChainedByDefault() {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(1);
StreamTableEnvironment tEnv = createTableEnvironment(env);
tEnv.getConfig().getConfiguration().setString(TaskManagerOptions.TASK_OFF_HEAP_MEMORY.key(), "80mb");
tEnv.registerFunction("pyFunc", new PythonScalarFunction("pyFunc"));
DataStream<Tuple2<Integer, Integer>> ds = env.fromElements(new Tuple2<>(1, 2));
Table t = tEnv.fromDataStream(ds, $("a"), $("b")).select(call("pyFunc", $("a"), $("b")));
// force generating the physical plan for the given table
tEnv.toAppendStream(t, BasicTypeInfo.INT_TYPE_INFO);
JobGraph jobGraph = env.getStreamGraph().getJobGraph();
List<JobVertex> vertices = jobGraph.getVerticesSortedTopologicallyFromSources();
Assert.assertEquals(1, vertices.size());
}
use of org.apache.flink.table.api.bridge.java.StreamTableEnvironment in project flink by apache.
the class HBaseConnectorITCase method verifyHBaseLookupJoin.
private void verifyHBaseLookupJoin(boolean async) {
StreamExecutionEnvironment execEnv = StreamExecutionEnvironment.getExecutionEnvironment();
StreamTableEnvironment tEnv = StreamTableEnvironment.create(execEnv, streamSettings);
tEnv.executeSql("CREATE TABLE " + TEST_TABLE_1 + " (" + " family1 ROW<col1 INT>," + " family2 ROW<col1 STRING, col2 BIGINT>," + " family3 ROW<col1 DOUBLE, col2 BOOLEAN, col3 STRING>," + " rowkey INT," + " family4 ROW<col1 TIMESTAMP(3), col2 DATE, col3 TIME(3), col4 DECIMAL(12, 4)>," + " PRIMARY KEY (rowkey) NOT ENFORCED" + ") WITH (" + " 'connector' = 'hbase-2.2'," + " 'lookup.async' = '" + async + "'," + " 'table-name' = '" + TEST_TABLE_1 + "'," + " 'zookeeper.quorum' = '" + getZookeeperQuorum() + "'" + ")");
// prepare a source table
String srcTableName = "src";
DataStream<Row> srcDs = execEnv.fromCollection(testData).returns(testTypeInfo);
Table in = tEnv.fromDataStream(srcDs, $("a"), $("b"), $("c"), $("proc").proctime());
tEnv.createTemporaryView(srcTableName, in);
// perform a temporal table join query
String dimJoinQuery = "SELECT" + " a," + " b," + " h.family1.col1," + " h.family2.col1," + " h.family2.col2," + " h.family3.col1," + " h.family3.col2," + " h.family3.col3," + " h.family4.col1," + " h.family4.col2," + " h.family4.col3," + " h.family4.col4 " + " FROM src JOIN " + TEST_TABLE_1 + " FOR SYSTEM_TIME AS OF src.proc as h ON src.a = h.rowkey";
Iterator<Row> collected = tEnv.executeSql(dimJoinQuery).collect();
List<String> result = Lists.newArrayList(collected).stream().map(Row::toString).sorted().collect(Collectors.toList());
List<String> expected = new ArrayList<>();
expected.add("+I[1, 1, 10, Hello-1, 100, 1.01, false, Welt-1, 2019-08-18T19:00, 2019-08-18, 19:00, 12345678.0001]");
expected.add("+I[2, 2, 20, Hello-2, 200, 2.02, true, Welt-2, 2019-08-18T19:01, 2019-08-18, 19:01, 12345678.0002]");
expected.add("+I[3, 2, 30, Hello-3, 300, 3.03, false, Welt-3, 2019-08-18T19:02, 2019-08-18, 19:02, 12345678.0003]");
expected.add("+I[3, 3, 30, Hello-3, 300, 3.03, false, Welt-3, 2019-08-18T19:02, 2019-08-18, 19:02, 12345678.0003]");
assertEquals(expected, result);
}
use of org.apache.flink.table.api.bridge.java.StreamTableEnvironment in project flink by apache.
the class HBaseConnectorITCase method testTableSink.
@Test
public void testTableSink() throws Exception {
StreamExecutionEnvironment execEnv = StreamExecutionEnvironment.getExecutionEnvironment();
StreamTableEnvironment tEnv = StreamTableEnvironment.create(execEnv, streamSettings);
// register HBase table testTable1 which contains test data
String table1DDL = createHBaseTableDDL(TEST_TABLE_1, false);
tEnv.executeSql(table1DDL);
String table2DDL = createHBaseTableDDL(TEST_TABLE_2, false);
tEnv.executeSql(table2DDL);
String query = "INSERT INTO " + TEST_TABLE_2 + " SELECT" + " rowkey," + " family1," + " family2," + " family3" + " FROM " + TEST_TABLE_1;
TableResult tableResult = tEnv.executeSql(query);
// wait to finish
tableResult.await();
assertEquals("Expected INSERT rowKind", RowKind.INSERT, tableResult.collect().next().getKind());
// start a batch scan job to verify contents in HBase table
TableEnvironment batchEnv = TableEnvironment.create(batchSettings);
batchEnv.executeSql(table2DDL);
List<String> expected = new ArrayList<>();
expected.add("+I[1, 10, Hello-1, 100, 1.01, false, Welt-1]\n");
expected.add("+I[2, 20, Hello-2, 200, 2.02, true, Welt-2]\n");
expected.add("+I[3, 30, Hello-3, 300, 3.03, false, Welt-3]\n");
expected.add("+I[4, 40, null, 400, 4.04, true, Welt-4]\n");
expected.add("+I[5, 50, Hello-5, 500, 5.05, false, Welt-5]\n");
expected.add("+I[6, 60, Hello-6, 600, 6.06, true, Welt-6]\n");
expected.add("+I[7, 70, Hello-7, 700, 7.07, false, Welt-7]\n");
expected.add("+I[8, 80, null, 800, 8.08, true, Welt-8]\n");
Table countTable = batchEnv.sqlQuery("SELECT COUNT(h.rowkey) FROM " + TEST_TABLE_2 + " AS h");
assertEquals(new Long(expected.size()), countTable.execute().collect().next().getField(0));
Table table = batchEnv.sqlQuery("SELECT " + " h.rowkey, " + " h.family1.col1, " + " h.family2.col1, " + " h.family2.col2, " + " h.family3.col1, " + " h.family3.col2, " + " h.family3.col3 " + "FROM " + TEST_TABLE_2 + " AS h");
TableResult tableResult2 = table.execute();
List<Row> results = CollectionUtil.iteratorToList(tableResult2.collect());
TestBaseUtils.compareResultAsText(results, String.join("", expected));
}
use of org.apache.flink.table.api.bridge.java.StreamTableEnvironment in project flink by apache.
the class JdbcDataTypeTest method testDataTypeValidate.
@Test
public void testDataTypeValidate() {
String sqlDDL = String.format(DDL_FORMAT, testItem.dataTypeExpr, testItem.dialect);
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
StreamTableEnvironment tEnv = StreamTableEnvironment.create(env);
tEnv.executeSql(sqlDDL);
if (testItem.expectError != null) {
try {
tEnv.sqlQuery("SELECT * FROM T");
fail();
} catch (ValidationException ex) {
Assert.assertEquals(testItem.expectError, ex.getCause().getMessage());
} catch (UnsupportedOperationException ex) {
Assert.assertEquals(testItem.expectError, ex.getMessage());
} catch (Exception e) {
fail(e);
}
} else {
tEnv.sqlQuery("SELECT * FROM T");
}
}
use of org.apache.flink.table.api.bridge.java.StreamTableEnvironment in project flink by apache.
the class StreamSQLExample method main.
// *************************************************************************
// PROGRAM
// *************************************************************************
public static void main(String[] args) throws Exception {
// set up the Java DataStream API
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
// set up the Java Table API
final StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
final DataStream<Order> orderA = env.fromCollection(Arrays.asList(new Order(1L, "beer", 3), new Order(1L, "diaper", 4), new Order(3L, "rubber", 2)));
final DataStream<Order> orderB = env.fromCollection(Arrays.asList(new Order(2L, "pen", 3), new Order(2L, "rubber", 3), new Order(4L, "beer", 1)));
// convert the first DataStream to a Table object
// it will be used "inline" and is not registered in a catalog
final Table tableA = tableEnv.fromDataStream(orderA);
// convert the second DataStream and register it as a view
// it will be accessible under a name
tableEnv.createTemporaryView("TableB", orderB);
// union the two tables
final Table result = tableEnv.sqlQuery("SELECT * FROM " + tableA + " WHERE amount > 2 UNION ALL " + "SELECT * FROM TableB WHERE amount < 2");
// convert the Table back to an insert-only DataStream of type `Order`
tableEnv.toDataStream(result, Order.class).print();
// after the table program is converted to a DataStream program,
// we must use `env.execute()` to submit the job
env.execute();
}
Aggregations