Search in sources :

Example 26 with TableResult

use of org.apache.flink.table.api.TableResult in project flink by apache.

the class UpsertKafkaTableITCase method wordFreqToUpsertKafka.

private void wordFreqToUpsertKafka(String wordCountTable) throws Exception {
    // ------------- test data ---------------
    final List<String> expectedData = Arrays.asList("3,1", "2,1");
    // ------------- create table ---------------
    final String createSinkTable = "CREATE TABLE sink_" + format + "(\n" + "  `count` BIGINT,\n" + "  `freq` BIGINT,\n" + "  PRIMARY KEY (`count`) NOT ENFORCED\n" + ") WITH (\n" + "  'connector' = 'values',\n" + "  'sink-insert-only' = 'false'\n" + ")";
    tEnv.executeSql(createSinkTable);
    final TableResult query = tEnv.executeSql("INSERT INTO sink_" + format + "\n" + "SELECT `count`, count(*) as `freq`\n" + "FROM " + wordCountTable + "\n" + "GROUP BY `count`\n" + "having count(*) < 2");
    // ---------- consume stream from sink -------------------
    waitingExpectedResults("sink_" + format, expectedData, Duration.ofSeconds(10));
    query.getJobClient().get().cancel();
}
Also used : TableResult(org.apache.flink.table.api.TableResult)

Example 27 with TableResult

use of org.apache.flink.table.api.TableResult in project flink by apache.

the class HiveTableSourceITCase method testStreamPartitionReadByPartitionName.

@Test(timeout = 120000)
public void testStreamPartitionReadByPartitionName() throws Exception {
    final String catalogName = "hive";
    final String dbName = "source_db";
    final String tblName = "stream_partition_name_test";
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.enableCheckpointing(100);
    StreamTableEnvironment tEnv = HiveTestUtils.createTableEnvInStreamingMode(env, SqlDialect.HIVE);
    tEnv.registerCatalog(catalogName, hiveCatalog);
    tEnv.useCatalog(catalogName);
    tEnv.executeSql("CREATE TABLE source_db.stream_partition_name_test (x int, y string, z int)" + " PARTITIONED BY (" + " pt_year int, pt_mon string, pt_day string) TBLPROPERTIES(" + "'streaming-source.enable'='true'," + "'streaming-source.monitor-interval'='1s'," + "'streaming-source.consume-start-offset'='pt_year=2019/pt_month=09/pt_day=02'" + ")");
    HiveTestUtils.createTextTableInserter(hiveCatalog, dbName, tblName).addRow(new Object[] { 0, "a", 11 }).commit("pt_year='2019',pt_mon='09',pt_day='01'");
    HiveTestUtils.createTextTableInserter(hiveCatalog, dbName, tblName).addRow(new Object[] { 1, "b", 12 }).commit("pt_year='2020',pt_mon='09',pt_day='03'");
    TableResult result = tEnv.executeSql("select * from hive.source_db.stream_partition_name_test");
    CloseableIterator<Row> iter = result.collect();
    Assert.assertEquals(Row.of(1, "b", "12", "2020", "09", "03").toString(), fetchRows(iter, 1).get(0));
    for (int i = 2; i < 6; i++) {
        try {
            Thread.sleep(1_000);
        } catch (InterruptedException e) {
            throw new RuntimeException(e);
        }
        HiveTestUtils.createTextTableInserter(hiveCatalog, dbName, tblName).addRow(new Object[] { i, "new_add", 11 + i }).addRow(new Object[] { i, "new_add_1", 11 + i }).commit("pt_year='2020',pt_mon='10',pt_day='0" + i + "'");
        Assert.assertEquals(Arrays.asList(Row.of(i, "new_add", 11 + i, "2020", "10", "0" + i).toString(), Row.of(i, "new_add_1", 11 + i, "2020", "10", "0" + i).toString()), fetchRows(iter, 2));
    }
    result.getJobClient().get().cancel();
}
Also used : TableResult(org.apache.flink.table.api.TableResult) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) StreamTableEnvironment(org.apache.flink.table.api.bridge.java.StreamTableEnvironment) Row(org.apache.flink.types.Row) Test(org.junit.Test)

Example 28 with TableResult

use of org.apache.flink.table.api.TableResult in project flink by apache.

the class ParquetFileSystemITCase method testLimitableBulkFormat.

@Test
public void testLimitableBulkFormat() throws ExecutionException, InterruptedException {
    super.tableEnv().executeSql("insert into parquetLimitTable select x, y, " + "1 as a " + "from originalT").await();
    TableResult tableResult1 = super.tableEnv().executeSql("SELECT * FROM parquetLimitTable limit 5");
    List<Row> rows1 = CollectionUtil.iteratorToList(tableResult1.collect());
    assertEquals(5, rows1.size());
    check("select a from parquetLimitTable limit 5", Arrays.asList(Row.of(1), Row.of(1), Row.of(1), Row.of(1), Row.of(1)));
}
Also used : TableResult(org.apache.flink.table.api.TableResult) Row(org.apache.flink.types.Row) Test(org.junit.Test)

Example 29 with TableResult

use of org.apache.flink.table.api.TableResult in project flink by apache.

the class HBaseConnectorITCase method testTableSink.

@Test
public void testTableSink() throws Exception {
    StreamExecutionEnvironment execEnv = StreamExecutionEnvironment.getExecutionEnvironment();
    StreamTableEnvironment tEnv = StreamTableEnvironment.create(execEnv, streamSettings);
    // register HBase table testTable1 which contains test data
    String table1DDL = createHBaseTableDDL(TEST_TABLE_1, false);
    tEnv.executeSql(table1DDL);
    String table2DDL = createHBaseTableDDL(TEST_TABLE_2, false);
    tEnv.executeSql(table2DDL);
    String query = "INSERT INTO " + TEST_TABLE_2 + " SELECT" + " rowkey," + " family1," + " family2," + " family3" + " FROM " + TEST_TABLE_1;
    TableResult tableResult = tEnv.executeSql(query);
    // wait to finish
    tableResult.await();
    assertEquals("Expected INSERT rowKind", RowKind.INSERT, tableResult.collect().next().getKind());
    // start a batch scan job to verify contents in HBase table
    TableEnvironment batchEnv = TableEnvironment.create(batchSettings);
    batchEnv.executeSql(table2DDL);
    List<String> expected = new ArrayList<>();
    expected.add("+I[1, 10, Hello-1, 100, 1.01, false, Welt-1]\n");
    expected.add("+I[2, 20, Hello-2, 200, 2.02, true, Welt-2]\n");
    expected.add("+I[3, 30, Hello-3, 300, 3.03, false, Welt-3]\n");
    expected.add("+I[4, 40, null, 400, 4.04, true, Welt-4]\n");
    expected.add("+I[5, 50, Hello-5, 500, 5.05, false, Welt-5]\n");
    expected.add("+I[6, 60, Hello-6, 600, 6.06, true, Welt-6]\n");
    expected.add("+I[7, 70, Hello-7, 700, 7.07, false, Welt-7]\n");
    expected.add("+I[8, 80, null, 800, 8.08, true, Welt-8]\n");
    Table countTable = batchEnv.sqlQuery("SELECT COUNT(h.rowkey) FROM " + TEST_TABLE_2 + " AS h");
    assertEquals(new Long(expected.size()), countTable.execute().collect().next().getField(0));
    Table table = batchEnv.sqlQuery("SELECT " + "  h.rowkey, " + "  h.family1.col1, " + "  h.family2.col1, " + "  h.family2.col2, " + "  h.family3.col1, " + "  h.family3.col2, " + "  h.family3.col3 " + "FROM " + TEST_TABLE_2 + " AS h");
    TableResult tableResult2 = table.execute();
    List<Row> results = CollectionUtil.iteratorToList(tableResult2.collect());
    TestBaseUtils.compareResultAsText(results, String.join("", expected));
}
Also used : TableResult(org.apache.flink.table.api.TableResult) Table(org.apache.flink.table.api.Table) ArrayList(java.util.ArrayList) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) StreamTableEnvironment(org.apache.flink.table.api.bridge.java.StreamTableEnvironment) TableEnvironment(org.apache.flink.table.api.TableEnvironment) StreamTableEnvironment(org.apache.flink.table.api.bridge.java.StreamTableEnvironment) Row(org.apache.flink.types.Row) Test(org.junit.Test)

Example 30 with TableResult

use of org.apache.flink.table.api.TableResult in project flink by apache.

the class OracleTableSinkITCase method testBatchSink.

@Test
public void testBatchSink() throws Exception {
    TableEnvironment tEnv = TableEnvironment.create(EnvironmentSettings.inBatchMode());
    tEnv.executeSql("CREATE TABLE USER_RESULT(" + "NAME VARCHAR," + "SCORE BIGINT" + ") WITH ( " + "'connector' = 'jdbc'," + "'url'='" + containerUrl + "'," + "'table-name' = '" + OUTPUT_TABLE3 + "'," + "'sink.buffer-flush.max-rows' = '2'," + "'sink.buffer-flush.interval' = '300ms'," + "'sink.max-retries' = '4'" + ")");
    TableResult tableResult = tEnv.executeSql("INSERT INTO USER_RESULT\n" + "SELECT user_name, score " + "FROM (VALUES (1, 'Bob'), (22, 'Tom'), (42, 'Kim'), " + "(42, 'Kim'), (1, 'Bob')) " + "AS UserCountTable(score, user_name)");
    tableResult.await();
    check(new Row[] { Row.of("Bob", 1), Row.of("Tom", 22), Row.of("Kim", 42), Row.of("Kim", 42), Row.of("Bob", 1) }, containerUrl, OUTPUT_TABLE3, new String[] { "NAME", "SCORE" });
}
Also used : TableResult(org.apache.flink.table.api.TableResult) StreamTableEnvironment(org.apache.flink.table.api.bridge.java.StreamTableEnvironment) TableEnvironment(org.apache.flink.table.api.TableEnvironment) Test(org.junit.Test)

Aggregations

TableResult (org.apache.flink.table.api.TableResult)39 Test (org.junit.Test)26 Row (org.apache.flink.types.Row)20 StreamTableEnvironment (org.apache.flink.table.api.bridge.java.StreamTableEnvironment)15 ArrayList (java.util.ArrayList)7 StreamExecutionEnvironment (org.apache.flink.streaming.api.environment.StreamExecutionEnvironment)7 Table (org.apache.flink.table.api.Table)7 TableEnvironment (org.apache.flink.table.api.TableEnvironment)7 JobClient (org.apache.flink.core.execution.JobClient)4 Configuration (org.apache.flink.configuration.Configuration)3 ParameterTool (org.apache.flink.api.java.utils.ParameterTool)2 TableDescriptor (org.apache.flink.table.api.TableDescriptor)2 TableEnvironmentInternal (org.apache.flink.table.api.internal.TableEnvironmentInternal)2 CsvTableSink (org.apache.flink.table.sinks.CsvTableSink)2 DataType (org.apache.flink.table.types.DataType)2 File (java.io.File)1 IOException (java.io.IOException)1 BigDecimal (java.math.BigDecimal)1 Timestamp (java.sql.Timestamp)1 Random (java.util.Random)1