use of org.apache.flink.table.api.TableEnvironment in project flink by apache.
the class TableEnvHiveConnectorITCase method testMultiInputBroadcast.
@Test
public void testMultiInputBroadcast() throws Exception {
TableEnvironment tableEnv = getTableEnvWithHiveCatalog();
tableEnv.executeSql("create database db1");
try {
tableEnv.useDatabase("db1");
tableEnv.executeSql("create table src1(key string, val string)");
tableEnv.executeSql("create table src2(key string, val string)");
tableEnv.executeSql("create table dest(key string, val string)");
HiveTestUtils.createTextTableInserter(hiveCatalog, "db1", "src1").addRow(new Object[] { "1", "val1" }).addRow(new Object[] { "2", "val2" }).addRow(new Object[] { "3", "val3" }).commit();
HiveTestUtils.createTextTableInserter(hiveCatalog, "db1", "src2").addRow(new Object[] { "3", "val4" }).addRow(new Object[] { "4", "val4" }).commit();
tableEnv.executeSql("INSERT OVERWRITE dest\n" + "SELECT j.*\n" + "FROM (SELECT t1.key, p1.val\n" + " FROM src2 t1\n" + " LEFT OUTER JOIN src1 p1\n" + " ON (t1.key = p1.key)\n" + " UNION ALL\n" + " SELECT t2.key, p2.val\n" + " FROM src2 t2\n" + " LEFT OUTER JOIN src1 p2\n" + " ON (t2.key = p2.key)) j").await();
List<Row> results = CollectionUtil.iteratorToList(tableEnv.executeSql("select * from dest order by key").collect());
assertEquals("[+I[3, val3], +I[3, val3], +I[4, null], +I[4, null]]", results.toString());
} finally {
tableEnv.useDatabase("default");
tableEnv.executeSql("drop database db1 cascade");
}
}
use of org.apache.flink.table.api.TableEnvironment in project flink by apache.
the class TableEnvHiveConnectorITCase method testDefaultPartitionName.
@Test
public void testDefaultPartitionName() throws Exception {
TableEnvironment tableEnv = getTableEnvWithHiveCatalog();
tableEnv.executeSql("create database db1");
tableEnv.executeSql("create table db1.src (x int, y int)");
tableEnv.executeSql("create table db1.part (x int) partitioned by (y int)");
HiveTestUtils.createTextTableInserter(hiveCatalog, "db1", "src").addRow(new Object[] { 1, 1 }).addRow(new Object[] { 2, null }).commit();
// test generating partitions with default name
tableEnv.executeSql("insert into db1.part select * from db1.src").await();
HiveConf hiveConf = hiveCatalog.getHiveConf();
String defaultPartName = hiveConf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME);
Table hiveTable = hmsClient.getTable("db1", "part");
Path defaultPartPath = new Path(hiveTable.getSd().getLocation(), "y=" + defaultPartName);
FileSystem fs = defaultPartPath.getFileSystem(hiveConf);
assertTrue(fs.exists(defaultPartPath));
TableImpl flinkTable = (TableImpl) tableEnv.sqlQuery("select y, x from db1.part order by x");
List<Row> rows = CollectionUtil.iteratorToList(flinkTable.execute().collect());
assertEquals("[+I[1, 1], +I[null, 2]]", rows.toString());
tableEnv.executeSql("drop database db1 cascade");
}
use of org.apache.flink.table.api.TableEnvironment in project flink by apache.
the class TableEnvHiveConnectorITCase method testOverwriteWithEmptySource.
@Test
public void testOverwriteWithEmptySource() throws Exception {
TableEnvironment tableEnv = getTableEnvWithHiveCatalog();
tableEnv.executeSql("create database db1");
try {
tableEnv.useDatabase("db1");
tableEnv.executeSql("create table src (x int,p int)");
// non-partitioned table
tableEnv.executeSql("create table dest (x int)");
HiveTestUtils.createTextTableInserter(hiveCatalog, "db1", "dest").addRow(new Object[] { 1 }).addRow(new Object[] { 2 }).commit();
tableEnv.executeSql("insert overwrite table dest select x from src").await();
List<Row> results = CollectionUtil.iteratorToList(tableEnv.executeSql("select * from dest").collect());
assertEquals(0, results.size());
// dynamic partitioned table
tableEnv.executeSql("create table destp (x int) partitioned by (p int)");
HiveTestUtils.createTextTableInserter(hiveCatalog, "db1", "destp").addRow(new Object[] { 1 }).commit("p=1");
HiveTestUtils.createTextTableInserter(hiveCatalog, "db1", "destp").addRow(new Object[] { 2 }).commit("p=2");
tableEnv.executeSql("insert overwrite table destp partition (p) select * from src").await();
results = CollectionUtil.iteratorToList(tableEnv.executeSql("select * from destp order by x").collect());
assertEquals("[+I[1, 1], +I[2, 2]]", results.toString());
// static partitioned table
tableEnv.executeSql("insert overwrite table destp partition(p=1) select x from src").await();
results = CollectionUtil.iteratorToList(tableEnv.executeSql("select * from destp order by x").collect());
assertEquals("[+I[1, 1], +I[2, 2]]", results.toString());
} finally {
tableEnv.executeSql("drop database db1 cascade");
}
}
use of org.apache.flink.table.api.TableEnvironment in project flink by apache.
the class HiveCatalogITCase method testTableApiWithProctime.
private void testTableApiWithProctime(boolean isStreaming) {
TableEnvironment tableEnv = prepareTable(isStreaming);
List<Row> rows = CollectionUtil.iteratorToList(tableEnv.from("proctime_src").select($("price"), $("ts"), $("l_proctime")).execute().collect());
assertThat(rows).hasSize(5);
tableEnv.executeSql("DROP TABLE proctime_src");
}
use of org.apache.flink.table.api.TableEnvironment in project flink by apache.
the class HiveCatalogITCase method testTemporaryGenericTable.
@Test
public void testTemporaryGenericTable() throws Exception {
TableEnvironment tableEnv = TableEnvironment.create(EnvironmentSettings.inStreamingMode());
tableEnv.registerCatalog(hiveCatalog.getName(), hiveCatalog);
tableEnv.useCatalog(hiveCatalog.getName());
TestCollectionTableFactory.reset();
TestCollectionTableFactory.initData(Arrays.asList(Row.of(1), Row.of(2)));
tableEnv.executeSql("create temporary table src(x int) with ('connector'='COLLECTION','is-bounded' = 'false')");
File tempDir = Files.createTempDirectory("dest-").toFile();
Runtime.getRuntime().addShutdownHook(new Thread(() -> org.apache.commons.io.FileUtils.deleteQuietly(tempDir)));
tableEnv.executeSql("create temporary table dest(x int) with (" + "'connector' = 'filesystem'," + String.format("'path' = 'file://%s/1.csv',", tempDir.getAbsolutePath()) + "'format' = 'csv')");
tableEnv.executeSql("insert into dest select * from src").await();
tableEnv.executeSql("create temporary table datagen(i int) with (" + "'connector'='datagen'," + "'rows-per-second'='5'," + "'fields.i.kind'='sequence'," + "'fields.i.start'='1'," + "'fields.i.end'='10')");
tableEnv.executeSql("create temporary table blackhole(i int) with ('connector'='blackhole')");
tableEnv.executeSql("insert into blackhole select * from datagen").await();
}
Aggregations