use of org.apache.flink.table.api.TableEnvironment in project flink by apache.
the class HiveRunnerITCase method testWriteNullValues.
@Test
public void testWriteNullValues() throws Exception {
TableEnvironment tableEnv = HiveTestUtils.createTableEnvInBatchMode(SqlDialect.HIVE);
tableEnv.registerCatalog(hiveCatalog.getName(), hiveCatalog);
tableEnv.useCatalog(hiveCatalog.getName());
tableEnv.executeSql("create database db1");
try {
// 17 data types
tableEnv.executeSql("create table db1.src" + "(t tinyint,s smallint,i int,b bigint,f float,d double,de decimal(10,5),ts timestamp,dt date," + "str string,ch char(5),vch varchar(8),bl boolean,bin binary,arr array<int>,mp map<int,string>,strt struct<f1:int,f2:string>)");
HiveTestUtils.createTextTableInserter(hiveCatalog, "db1", "src").addRow(new Object[] { null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null }).commit();
hiveShell.execute("create table db1.dest like db1.src");
tableEnv.executeSql("insert into db1.dest select * from db1.src").await();
List<String> results = hiveShell.executeQuery("select * from db1.dest");
assertEquals(1, results.size());
String[] cols = results.get(0).split("\t");
assertEquals(17, cols.length);
assertEquals("NULL", cols[0]);
assertEquals(1, new HashSet<>(Arrays.asList(cols)).size());
} finally {
tableEnv.executeSql("drop database db1 cascade");
}
}
use of org.apache.flink.table.api.TableEnvironment in project flink by apache.
the class HiveRunnerITCase method testDecimal.
@Test
public void testDecimal() throws Exception {
TableEnvironment tableEnv = getTableEnvWithHiveCatalog();
tableEnv.executeSql("create database db1");
try {
tableEnv.executeSql("create table db1.src1 (x decimal(10,2))");
tableEnv.executeSql("create table db1.src2 (x decimal(10,2))");
tableEnv.executeSql("create table db1.dest (x decimal(10,2))");
// populate src1 from Hive
// TABLE keyword in INSERT INTO is mandatory prior to 1.1.0
hiveShell.execute("insert into table db1.src1 values (1.0),(2.12),(5.123),(5.456),(123456789.12)");
// populate src2 with same data from Flink
tableEnv.executeSql("insert into db1.src2 values (1.0),(2.12),(5.123),(5.456),(123456789.12)").await();
// verify src1 and src2 contain same data
verifyHiveQueryResult("select * from db1.src2", hiveShell.executeQuery("select * from db1.src1"));
// populate dest with src1 from Flink -- to test reading decimal type from Hive
tableEnv.executeSql("insert into db1.dest select * from db1.src1").await();
verifyHiveQueryResult("select * from db1.dest", hiveShell.executeQuery("select * from db1.src1"));
} finally {
tableEnv.executeSql("drop database db1 cascade");
}
}
use of org.apache.flink.table.api.TableEnvironment in project flink by apache.
the class HiveRunnerITCase method testDynamicPartition.
@Test
public void testDynamicPartition() throws Exception {
TableEnvironment tableEnv = getTableEnvWithHiveCatalog();
tableEnv.executeSql("create database db1");
try {
tableEnv.executeSql("create table db1.src (x int, y string, z double)");
HiveTestUtils.createTextTableInserter(hiveCatalog, "db1", "src").addRow(new Object[] { 1, "a", 1.1 }).addRow(new Object[] { 2, "a", 2.2 }).addRow(new Object[] { 3, "b", 3.3 }).commit();
tableEnv.executeSql("create table db1.dest (x int) partitioned by (p1 string, p2 double)");
tableEnv.executeSql("insert into db1.dest select * from db1.src").await();
assertEquals(3, hiveCatalog.listPartitions(new ObjectPath("db1", "dest")).size());
verifyHiveQueryResult("select * from db1.dest", Arrays.asList("1\ta\t1.1", "2\ta\t2.2", "3\tb\t3.3"));
} finally {
tableEnv.executeSql("drop database db1 cascade");
}
}
use of org.apache.flink.table.api.TableEnvironment in project flink by apache.
the class HiveRunnerITCase method testViews.
@Test
public void testViews() throws Exception {
TableEnvironment tableEnv = getTableEnvWithHiveCatalog();
tableEnv.executeSql("create database db1");
try {
tableEnv.executeSql("create table db1.src (key int,val string)");
HiveTestUtils.createTextTableInserter(hiveCatalog, "db1", "src").addRow(new Object[] { 1, "a" }).addRow(new Object[] { 1, "aa" }).addRow(new Object[] { 1, "aaa" }).addRow(new Object[] { 2, "b" }).addRow(new Object[] { 3, "c" }).addRow(new Object[] { 3, "ccc" }).commit();
tableEnv.executeSql("create table db1.keys (key int,name string)");
HiveTestUtils.createTextTableInserter(hiveCatalog, "db1", "keys").addRow(new Object[] { 1, "key1" }).addRow(new Object[] { 2, "key2" }).addRow(new Object[] { 3, "key3" }).addRow(new Object[] { 4, "key4" }).commit();
hiveShell.execute("create view db1.v1 as select key as k,val as v from db1.src limit 2");
hiveShell.execute("create view db1.v2 as select key,count(*) from db1.src group by key having count(*)>1 order by key");
hiveShell.execute("create view db1.v3 as select k.key,k.name,count(*) from db1.src s join db1.keys k on s.key=k.key group by k.key,k.name order by k.key");
List<Row> results = CollectionUtil.iteratorToList(tableEnv.sqlQuery("select count(v) from db1.v1").execute().collect());
assertEquals("[+I[2]]", results.toString());
results = CollectionUtil.iteratorToList(tableEnv.sqlQuery("select * from db1.v2").execute().collect());
assertEquals("[+I[1, 3], +I[3, 2]]", results.toString());
results = CollectionUtil.iteratorToList(tableEnv.sqlQuery("select * from db1.v3").execute().collect());
assertEquals("[+I[1, key1, 3], +I[2, key2, 1], +I[3, key3, 2]]", results.toString());
} finally {
tableEnv.executeSql("drop database db1 cascade");
}
}
use of org.apache.flink.table.api.TableEnvironment in project flink by apache.
the class HiveRunnerITCase method testPartialDynamicPartition.
@Test
public void testPartialDynamicPartition() throws Exception {
TableEnvironment tableEnv = getTableEnvWithHiveCatalog();
tableEnv.executeSql("create database db1");
try {
tableEnv.executeSql("create table db1.src (x int, y string)");
HiveTestUtils.createTextTableInserter(hiveCatalog, "db1", "src").addRow(new Object[] { 1, "a" }).addRow(new Object[] { 2, "b" }).commit();
tableEnv.executeSql("create table db1.dest (x int) partitioned by (p1 double, p2 string)");
tableEnv.executeSql("insert into db1.dest partition (p1=1.1,p2) select x,y from db1.src").await();
assertEquals(2, hiveCatalog.listPartitions(new ObjectPath("db1", "dest")).size());
verifyHiveQueryResult("select * from db1.dest", Arrays.asList("1\t1.1\ta", "2\t1.1\tb"));
} finally {
tableEnv.executeSql("drop database db1 cascade");
}
}
Aggregations