use of org.apache.flink.table.api.TableEnvironment in project flink by apache.
the class HiveRunnerITCase method testOrcSchemaEvol.
@Test
public void testOrcSchemaEvol() throws Exception {
// not supported until 2.1.0 -- https://issues.apache.org/jira/browse/HIVE-11981,
// https://issues.apache.org/jira/browse/HIVE-13178
Assume.assumeTrue(HiveVersionTestUtil.HIVE_210_OR_LATER);
TableEnvironment tableEnv = getTableEnvWithHiveCatalog();
tableEnv.executeSql("create database db1");
try {
tableEnv.executeSql("create table db1.src (x smallint,y int) stored as orc");
hiveShell.execute("insert into table db1.src values (1,100),(2,200)");
tableEnv.getConfig().getConfiguration().setBoolean(HiveOptions.TABLE_EXEC_HIVE_FALLBACK_MAPRED_READER, true);
tableEnv.executeSql("alter table db1.src change x x int");
assertEquals("[+I[1, 100], +I[2, 200]]", CollectionUtil.iteratorToList(tableEnv.sqlQuery("select * from db1.src").execute().collect()).toString());
tableEnv.executeSql("alter table db1.src change y y string");
assertEquals("[+I[1, 100], +I[2, 200]]", CollectionUtil.iteratorToList(tableEnv.sqlQuery("select * from db1.src").execute().collect()).toString());
} finally {
tableEnv.executeSql("drop database db1 cascade");
}
}
use of org.apache.flink.table.api.TableEnvironment in project flink by apache.
the class HiveRunnerITCase method testCompressTextTable.
private void testCompressTextTable(boolean batch) throws Exception {
TableEnvironment tableEnv = batch ? getTableEnvWithHiveCatalog() : getStreamTableEnvWithHiveCatalog();
tableEnv.executeSql("create database db1");
try {
tableEnv.executeSql("create table db1.src (x string,y string)");
hiveShell.execute("create table db1.dest like db1.src");
HiveTestUtils.createTextTableInserter(hiveCatalog, "db1", "src").addRow(new Object[] { "a", "b" }).addRow(new Object[] { "c", "d" }).commit();
hiveCatalog.getHiveConf().setBoolVar(HiveConf.ConfVars.COMPRESSRESULT, true);
tableEnv.executeSql("insert into db1.dest select * from db1.src").await();
List<String> expected = Arrays.asList("a\tb", "c\td");
verifyHiveQueryResult("select * from db1.dest", expected);
verifyFlinkQueryResult(tableEnv.sqlQuery("select * from db1.dest"), expected);
} finally {
tableEnv.executeSql("drop database db1 cascade");
}
}
use of org.apache.flink.table.api.TableEnvironment in project flink by apache.
the class HiveRunnerITCase method getStreamTableEnvWithHiveCatalog.
private TableEnvironment getStreamTableEnvWithHiveCatalog() {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
TableEnvironment tableEnv = HiveTestUtils.createTableEnvInStreamingMode(env, SqlDialect.HIVE);
tableEnv.registerCatalog(hiveCatalog.getName(), hiveCatalog);
tableEnv.useCatalog(hiveCatalog.getName());
return tableEnv;
}
use of org.apache.flink.table.api.TableEnvironment in project flink by apache.
the class HiveRunnerITCase method testTimestamp.
@Test
public void testTimestamp() throws Exception {
TableEnvironment tableEnv = getTableEnvWithHiveCatalog();
tableEnv.executeSql("create database db1");
try {
tableEnv.executeSql("create table db1.src (ts timestamp)");
tableEnv.executeSql("create table db1.dest (ts timestamp)");
HiveTestUtils.createTextTableInserter(hiveCatalog, "db1", "src").addRow(new Object[] { Timestamp.valueOf("2019-11-11 00:00:00") }).addRow(new Object[] { Timestamp.valueOf("2019-12-03 15:43:32.123456789") }).commit();
// test read timestamp from hive
List<Row> results = CollectionUtil.iteratorToList(tableEnv.sqlQuery("select * from db1.src").execute().collect());
assertEquals(2, results.size());
assertEquals(LocalDateTime.of(2019, 11, 11, 0, 0), results.get(0).getField(0));
assertEquals(LocalDateTime.of(2019, 12, 3, 15, 43, 32, 123456789), results.get(1).getField(0));
// test write timestamp to hive
tableEnv.executeSql("insert into db1.dest select max(ts) from db1.src").await();
verifyHiveQueryResult("select * from db1.dest", Collections.singletonList("2019-12-03 15:43:32.123456789"));
} finally {
tableEnv.executeSql("drop database db1 cascade");
}
}
use of org.apache.flink.table.api.TableEnvironment in project flink by apache.
the class HiveRunnerITCase method testInsertOverwrite.
@Test
public void testInsertOverwrite() throws Exception {
TableEnvironment tableEnv = getTableEnvWithHiveCatalog();
tableEnv.executeSql("create database db1");
try {
// non-partitioned
tableEnv.executeSql("create table db1.dest (x int, y string)");
HiveTestUtils.createTextTableInserter(hiveCatalog, "db1", "dest").addRow(new Object[] { 1, "a" }).addRow(new Object[] { 2, "b" }).commit();
verifyHiveQueryResult("select * from db1.dest", Arrays.asList("1\ta", "2\tb"));
tableEnv.executeSql("insert overwrite db1.dest values (3, 'c')").await();
verifyHiveQueryResult("select * from db1.dest", Collections.singletonList("3\tc"));
// static partition
tableEnv.executeSql("create table db1.part(x int) partitioned by (y int)");
HiveTestUtils.createTextTableInserter(hiveCatalog, "db1", "part").addRow(new Object[] { 1 }).commit("y=1");
HiveTestUtils.createTextTableInserter(hiveCatalog, "db1", "part").addRow(new Object[] { 2 }).commit("y=2");
tableEnv = getTableEnvWithHiveCatalog();
tableEnv.executeSql("insert overwrite db1.part partition (y=1) select 100").await();
verifyHiveQueryResult("select * from db1.part", Arrays.asList("100\t1", "2\t2"));
// dynamic partition
tableEnv = getTableEnvWithHiveCatalog();
tableEnv.executeSql("insert overwrite db1.part values (200,2),(3,3)").await();
// only overwrite dynamically matched partitions, other existing partitions remain
// intact
verifyHiveQueryResult("select * from db1.part", Arrays.asList("100\t1", "200\t2", "3\t3"));
} finally {
tableEnv.executeSql("drop database db1 cascade");
}
}
Aggregations