Search in sources :

Example 21 with TableEnvironment

use of org.apache.flink.table.api.TableEnvironment in project flink by apache.

the class TableEnvHiveConnectorITCase method testMultiInputBroadcast.

@Test
public void testMultiInputBroadcast() throws Exception {
    TableEnvironment tableEnv = getTableEnvWithHiveCatalog();
    tableEnv.executeSql("create database db1");
    try {
        tableEnv.useDatabase("db1");
        tableEnv.executeSql("create table src1(key string, val string)");
        tableEnv.executeSql("create table src2(key string, val string)");
        tableEnv.executeSql("create table dest(key string, val string)");
        HiveTestUtils.createTextTableInserter(hiveCatalog, "db1", "src1").addRow(new Object[] { "1", "val1" }).addRow(new Object[] { "2", "val2" }).addRow(new Object[] { "3", "val3" }).commit();
        HiveTestUtils.createTextTableInserter(hiveCatalog, "db1", "src2").addRow(new Object[] { "3", "val4" }).addRow(new Object[] { "4", "val4" }).commit();
        tableEnv.executeSql("INSERT OVERWRITE dest\n" + "SELECT j.*\n" + "FROM (SELECT t1.key, p1.val\n" + "      FROM src2 t1\n" + "      LEFT OUTER JOIN src1 p1\n" + "      ON (t1.key = p1.key)\n" + "      UNION ALL\n" + "      SELECT t2.key, p2.val\n" + "      FROM src2 t2\n" + "      LEFT OUTER JOIN src1 p2\n" + "      ON (t2.key = p2.key)) j").await();
        List<Row> results = CollectionUtil.iteratorToList(tableEnv.executeSql("select * from dest order by key").collect());
        assertEquals("[+I[3, val3], +I[3, val3], +I[4, null], +I[4, null]]", results.toString());
    } finally {
        tableEnv.useDatabase("default");
        tableEnv.executeSql("drop database db1 cascade");
    }
}
Also used : TableEnvironment(org.apache.flink.table.api.TableEnvironment) Row(org.apache.flink.types.Row) Test(org.junit.Test)

Example 22 with TableEnvironment

use of org.apache.flink.table.api.TableEnvironment in project flink by apache.

the class TableEnvHiveConnectorITCase method testDefaultPartitionName.

@Test
public void testDefaultPartitionName() throws Exception {
    TableEnvironment tableEnv = getTableEnvWithHiveCatalog();
    tableEnv.executeSql("create database db1");
    tableEnv.executeSql("create table db1.src (x int, y int)");
    tableEnv.executeSql("create table db1.part (x int) partitioned by (y int)");
    HiveTestUtils.createTextTableInserter(hiveCatalog, "db1", "src").addRow(new Object[] { 1, 1 }).addRow(new Object[] { 2, null }).commit();
    // test generating partitions with default name
    tableEnv.executeSql("insert into db1.part select * from db1.src").await();
    HiveConf hiveConf = hiveCatalog.getHiveConf();
    String defaultPartName = hiveConf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME);
    Table hiveTable = hmsClient.getTable("db1", "part");
    Path defaultPartPath = new Path(hiveTable.getSd().getLocation(), "y=" + defaultPartName);
    FileSystem fs = defaultPartPath.getFileSystem(hiveConf);
    assertTrue(fs.exists(defaultPartPath));
    TableImpl flinkTable = (TableImpl) tableEnv.sqlQuery("select y, x from db1.part order by x");
    List<Row> rows = CollectionUtil.iteratorToList(flinkTable.execute().collect());
    assertEquals("[+I[1, 1], +I[null, 2]]", rows.toString());
    tableEnv.executeSql("drop database db1 cascade");
}
Also used : ObjectPath(org.apache.flink.table.catalog.ObjectPath) Path(org.apache.hadoop.fs.Path) CatalogBaseTable(org.apache.flink.table.catalog.CatalogBaseTable) Table(org.apache.hadoop.hive.metastore.api.Table) FileSystem(org.apache.hadoop.fs.FileSystem) TableImpl(org.apache.flink.table.api.internal.TableImpl) TableEnvironment(org.apache.flink.table.api.TableEnvironment) HiveConf(org.apache.hadoop.hive.conf.HiveConf) Row(org.apache.flink.types.Row) Test(org.junit.Test)

Example 23 with TableEnvironment

use of org.apache.flink.table.api.TableEnvironment in project flink by apache.

the class TableEnvHiveConnectorITCase method testOverwriteWithEmptySource.

@Test
public void testOverwriteWithEmptySource() throws Exception {
    TableEnvironment tableEnv = getTableEnvWithHiveCatalog();
    tableEnv.executeSql("create database db1");
    try {
        tableEnv.useDatabase("db1");
        tableEnv.executeSql("create table src (x int,p int)");
        // non-partitioned table
        tableEnv.executeSql("create table dest (x int)");
        HiveTestUtils.createTextTableInserter(hiveCatalog, "db1", "dest").addRow(new Object[] { 1 }).addRow(new Object[] { 2 }).commit();
        tableEnv.executeSql("insert overwrite table dest select x from src").await();
        List<Row> results = CollectionUtil.iteratorToList(tableEnv.executeSql("select * from dest").collect());
        assertEquals(0, results.size());
        // dynamic partitioned table
        tableEnv.executeSql("create table destp (x int) partitioned by (p int)");
        HiveTestUtils.createTextTableInserter(hiveCatalog, "db1", "destp").addRow(new Object[] { 1 }).commit("p=1");
        HiveTestUtils.createTextTableInserter(hiveCatalog, "db1", "destp").addRow(new Object[] { 2 }).commit("p=2");
        tableEnv.executeSql("insert overwrite table destp partition (p) select * from src").await();
        results = CollectionUtil.iteratorToList(tableEnv.executeSql("select * from destp order by x").collect());
        assertEquals("[+I[1, 1], +I[2, 2]]", results.toString());
        // static partitioned table
        tableEnv.executeSql("insert overwrite table destp partition(p=1) select x from src").await();
        results = CollectionUtil.iteratorToList(tableEnv.executeSql("select * from destp order by x").collect());
        assertEquals("[+I[1, 1], +I[2, 2]]", results.toString());
    } finally {
        tableEnv.executeSql("drop database db1 cascade");
    }
}
Also used : TableEnvironment(org.apache.flink.table.api.TableEnvironment) Row(org.apache.flink.types.Row) Test(org.junit.Test)

Example 24 with TableEnvironment

use of org.apache.flink.table.api.TableEnvironment in project flink by apache.

the class HiveCatalogITCase method testTableApiWithProctime.

private void testTableApiWithProctime(boolean isStreaming) {
    TableEnvironment tableEnv = prepareTable(isStreaming);
    List<Row> rows = CollectionUtil.iteratorToList(tableEnv.from("proctime_src").select($("price"), $("ts"), $("l_proctime")).execute().collect());
    assertThat(rows).hasSize(5);
    tableEnv.executeSql("DROP TABLE proctime_src");
}
Also used : TableEnvironment(org.apache.flink.table.api.TableEnvironment) Row(org.apache.flink.types.Row)

Example 25 with TableEnvironment

use of org.apache.flink.table.api.TableEnvironment in project flink by apache.

the class HiveCatalogITCase method testTemporaryGenericTable.

@Test
public void testTemporaryGenericTable() throws Exception {
    TableEnvironment tableEnv = TableEnvironment.create(EnvironmentSettings.inStreamingMode());
    tableEnv.registerCatalog(hiveCatalog.getName(), hiveCatalog);
    tableEnv.useCatalog(hiveCatalog.getName());
    TestCollectionTableFactory.reset();
    TestCollectionTableFactory.initData(Arrays.asList(Row.of(1), Row.of(2)));
    tableEnv.executeSql("create temporary table src(x int) with ('connector'='COLLECTION','is-bounded' = 'false')");
    File tempDir = Files.createTempDirectory("dest-").toFile();
    Runtime.getRuntime().addShutdownHook(new Thread(() -> org.apache.commons.io.FileUtils.deleteQuietly(tempDir)));
    tableEnv.executeSql("create temporary table dest(x int) with (" + "'connector' = 'filesystem'," + String.format("'path' = 'file://%s/1.csv',", tempDir.getAbsolutePath()) + "'format' = 'csv')");
    tableEnv.executeSql("insert into dest select * from src").await();
    tableEnv.executeSql("create temporary table datagen(i int) with (" + "'connector'='datagen'," + "'rows-per-second'='5'," + "'fields.i.kind'='sequence'," + "'fields.i.start'='1'," + "'fields.i.end'='10')");
    tableEnv.executeSql("create temporary table blackhole(i int) with ('connector'='blackhole')");
    tableEnv.executeSql("insert into blackhole select * from datagen").await();
}
Also used : TableEnvironment(org.apache.flink.table.api.TableEnvironment) File(java.io.File) Test(org.junit.Test)

Aggregations

TableEnvironment (org.apache.flink.table.api.TableEnvironment)137 Test (org.junit.Test)95 Row (org.apache.flink.types.Row)58 StreamTableEnvironment (org.apache.flink.table.api.bridge.java.StreamTableEnvironment)38 Table (org.apache.flink.table.api.Table)27 ObjectPath (org.apache.flink.table.catalog.ObjectPath)19 StreamExecutionEnvironment (org.apache.flink.streaming.api.environment.StreamExecutionEnvironment)14 ArrayList (java.util.ArrayList)13 CatalogTable (org.apache.flink.table.catalog.CatalogTable)12 HashMap (java.util.HashMap)11 EnvironmentSettings (org.apache.flink.table.api.EnvironmentSettings)10 CatalogBaseTable (org.apache.flink.table.catalog.CatalogBaseTable)10 TableResult (org.apache.flink.table.api.TableResult)8 File (java.io.File)7 Constructor (java.lang.reflect.Constructor)7 TableImpl (org.apache.flink.table.api.internal.TableImpl)7 TableException (org.apache.flink.table.api.TableException)5 List (java.util.List)4 Configuration (org.apache.flink.configuration.Configuration)4 TableSchema (org.apache.flink.table.api.TableSchema)4