Search in sources :

Example 21 with StreamTableEnvironment

use of org.apache.flink.table.api.bridge.java.StreamTableEnvironment in project flink by apache.

the class JdbcDynamicTableSinkITCase method testUpsert.

@Test
public void testUpsert() throws Exception {
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.getConfig().enableObjectReuse();
    StreamTableEnvironment tEnv = StreamTableEnvironment.create(env);
    Table t = tEnv.fromDataStream(get4TupleDataStream(env).assignTimestampsAndWatermarks(new AscendingTimestampExtractor<Tuple4<Integer, Long, String, Timestamp>>() {

        @Override
        public long extractAscendingTimestamp(Tuple4<Integer, Long, String, Timestamp> element) {
            return element.f0;
        }
    }), $("id"), $("num"), $("text"), $("ts"));
    tEnv.createTemporaryView("T", t);
    tEnv.executeSql("CREATE TABLE upsertSink (" + "  cnt BIGINT," + "  lencnt BIGINT," + "  cTag INT," + "  ts TIMESTAMP(3)," + "  PRIMARY KEY (cnt, cTag) NOT ENFORCED" + ") WITH (" + "  'connector'='jdbc'," + "  'url'='" + DB_URL + "'," + "  'table-name'='" + OUTPUT_TABLE1 + "'," + "  'sink.buffer-flush.max-rows' = '2'," + "  'sink.buffer-flush.interval' = '0'," + "  'sink.max-retries' = '0'" + ")");
    tEnv.executeSql("INSERT INTO upsertSink \n" + "SELECT cnt, COUNT(len) AS lencnt, cTag, MAX(ts) AS ts\n" + "FROM (\n" + "  SELECT len, COUNT(id) as cnt, cTag, MAX(ts) AS ts\n" + "  FROM (SELECT id, CHAR_LENGTH(text) AS len, (CASE WHEN id > 0 THEN 1 ELSE 0 END) cTag, ts FROM T)\n" + "  GROUP BY len, cTag\n" + ")\n" + "GROUP BY cnt, cTag").await();
    check(new Row[] { Row.of(1, 5, 1, Timestamp.valueOf("1970-01-01 00:00:00.006")), Row.of(7, 1, 1, Timestamp.valueOf("1970-01-01 00:00:00.021")), Row.of(9, 1, 1, Timestamp.valueOf("1970-01-01 00:00:00.015")) }, DB_URL, OUTPUT_TABLE1, new String[] { "cnt", "lencnt", "cTag", "ts" });
}
Also used : Tuple4(org.apache.flink.api.java.tuple.Tuple4) Table(org.apache.flink.table.api.Table) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) StreamTableEnvironment(org.apache.flink.table.api.bridge.java.StreamTableEnvironment) Timestamp(java.sql.Timestamp) AscendingTimestampExtractor(org.apache.flink.streaming.api.functions.timestamps.AscendingTimestampExtractor) Test(org.junit.Test)

Example 22 with StreamTableEnvironment

use of org.apache.flink.table.api.bridge.java.StreamTableEnvironment in project flink by apache.

the class OracleTableSinkITCase method testUpsert.

@Test
public void testUpsert() throws Exception {
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.getConfig().enableObjectReuse();
    StreamTableEnvironment tEnv = StreamTableEnvironment.create(env);
    Table t = tEnv.fromDataStream(get4TupleDataStream(env).assignTimestampsAndWatermarks(new AscendingTimestampExtractor<Tuple4<Integer, Long, String, Timestamp>>() {

        @Override
        public long extractAscendingTimestamp(Tuple4<Integer, Long, String, Timestamp> element) {
            return element.f0;
        }
    }), $("id"), $("num"), $("text"), $("ts"));
    tEnv.createTemporaryView("T", t);
    tEnv.executeSql("CREATE TABLE upsertSink (" + "  cnt DECIMAL(18,2)," + "  lencnt DECIMAL(18,2)," + "  cTag INT," + "  ts TIMESTAMP(3)," + "  PRIMARY KEY (cnt, cTag) NOT ENFORCED" + ") WITH (" + "  'connector'='jdbc'," + "  'url'='" + containerUrl + "'," + "  'table-name'='" + OUTPUT_TABLE1 + "'," + "  'sink.buffer-flush.max-rows' = '2'," + "  'sink.buffer-flush.interval' = '0'," + "  'sink.max-retries' = '0'" + ")");
    tEnv.executeSql("INSERT INTO upsertSink \n" + "SELECT cnt, COUNT(len) AS lencnt, cTag, MAX(ts) AS ts\n" + "FROM (\n" + "  SELECT len, COUNT(id) as cnt, cTag, MAX(ts) AS ts\n" + "  FROM (SELECT id, CHAR_LENGTH(text) AS len, (CASE WHEN id > 0 THEN 1 ELSE 0 END) cTag, ts FROM T)\n" + "  GROUP BY len, cTag\n" + ")\n" + "GROUP BY cnt, cTag").await();
    check(new Row[] { Row.of(1, 5, 1, Timestamp.valueOf("1970-01-01 00:00:00.006")), Row.of(7, 1, 1, Timestamp.valueOf("1970-01-01 00:00:00.021")), Row.of(9, 1, 1, Timestamp.valueOf("1970-01-01 00:00:00.015")) }, containerUrl, OUTPUT_TABLE1, new String[] { "cnt", "lencnt", "cTag", "ts" });
}
Also used : Tuple4(org.apache.flink.api.java.tuple.Tuple4) Table(org.apache.flink.table.api.Table) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) StreamTableEnvironment(org.apache.flink.table.api.bridge.java.StreamTableEnvironment) Timestamp(java.sql.Timestamp) AscendingTimestampExtractor(org.apache.flink.streaming.api.functions.timestamps.AscendingTimestampExtractor) Test(org.junit.Test)

Example 23 with StreamTableEnvironment

use of org.apache.flink.table.api.bridge.java.StreamTableEnvironment in project flink by apache.

the class OracleTableSinkITCase method testReal.

@Test
public void testReal() throws Exception {
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.getConfig().enableObjectReuse();
    StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, EnvironmentSettings.inStreamingMode());
    tEnv.executeSql("CREATE TABLE upsertSink (" + "  real_data float" + ") WITH (" + "  'connector'='jdbc'," + "  'url'='" + containerUrl + "'," + "  'table-name'='" + OUTPUT_TABLE4 + "'" + ")");
    tEnv.executeSql("INSERT INTO upsertSink SELECT CAST(1.1 as FLOAT)").await();
    check(new Row[] { Row.of(1.1f) }, containerUrl, "REAL_TABLE", new String[] { "real_data" });
}
Also used : StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) StreamTableEnvironment(org.apache.flink.table.api.bridge.java.StreamTableEnvironment) Test(org.junit.Test)

Example 24 with StreamTableEnvironment

use of org.apache.flink.table.api.bridge.java.StreamTableEnvironment in project flink by apache.

the class OracleTableSinkITCase method testAppend.

@Test
public void testAppend() throws Exception {
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.getConfig().enableObjectReuse();
    env.getConfig().setParallelism(1);
    StreamTableEnvironment tEnv = StreamTableEnvironment.create(env);
    Table t = tEnv.fromDataStream(get4TupleDataStream(env), $("id"), $("num"), $("text"), $("ts"));
    tEnv.registerTable("T", t);
    tEnv.executeSql("CREATE TABLE upsertSink (" + "  id INT," + "  num BIGINT," + "  ts TIMESTAMP(3)" + ") WITH (" + "  'connector'='jdbc'," + "  'url'='" + containerUrl + "'," + "  'table-name'='" + OUTPUT_TABLE2 + "'" + ")");
    tEnv.executeSql("INSERT INTO upsertSink SELECT id, num, ts FROM T WHERE id IN (2, 10, 20)").await();
    check(new Row[] { Row.of(2, 2, Timestamp.valueOf("1970-01-01 00:00:00.002")), Row.of(10, 4, Timestamp.valueOf("1970-01-01 00:00:00.01")), Row.of(20, 6, Timestamp.valueOf("1970-01-01 00:00:00.02")) }, containerUrl, OUTPUT_TABLE2, new String[] { "id", "num", "ts" });
}
Also used : Table(org.apache.flink.table.api.Table) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) StreamTableEnvironment(org.apache.flink.table.api.bridge.java.StreamTableEnvironment) Test(org.junit.Test)

Example 25 with StreamTableEnvironment

use of org.apache.flink.table.api.bridge.java.StreamTableEnvironment in project flink by apache.

the class TransformationsTest method testLegacyBatchValues.

@Test
public void testLegacyBatchValues() {
    final JavaBatchTableTestUtil util = javaBatchTestUtil();
    final StreamTableEnvironment env = util.tableEnv();
    final Table table = env.fromValues(1, 2, 3);
    final LegacySourceTransformation<?> sourceTransform = toLegacySourceTransformation(env, table);
    assertBoundedness(Boundedness.BOUNDED, sourceTransform);
}
Also used : Table(org.apache.flink.table.api.Table) JavaBatchTableTestUtil(org.apache.flink.table.planner.utils.JavaBatchTableTestUtil) StreamTableEnvironment(org.apache.flink.table.api.bridge.java.StreamTableEnvironment) Test(org.junit.Test)

Aggregations

StreamTableEnvironment (org.apache.flink.table.api.bridge.java.StreamTableEnvironment)64 Test (org.junit.Test)53 StreamExecutionEnvironment (org.apache.flink.streaming.api.environment.StreamExecutionEnvironment)41 Row (org.apache.flink.types.Row)38 Table (org.apache.flink.table.api.Table)36 ArrayList (java.util.ArrayList)19 TableResult (org.apache.flink.table.api.TableResult)18 List (java.util.List)10 TableDescriptor (org.apache.flink.table.api.TableDescriptor)10 Arrays (java.util.Arrays)6 Collections (java.util.Collections)6 AbstractTestBase (org.apache.flink.test.util.AbstractTestBase)6 IOException (java.io.IOException)5 Tuple2 (org.apache.flink.api.java.tuple.Tuple2)5 ResolvedSchema (org.apache.flink.table.catalog.ResolvedSchema)5 Either (org.apache.flink.types.Either)5 LocalDateTime (java.time.LocalDateTime)4 ZoneId (java.time.ZoneId)4 TypeHint (org.apache.flink.api.common.typeinfo.TypeHint)4 TypeInformation (org.apache.flink.api.common.typeinfo.TypeInformation)4