use of org.apache.flink.table.api.TableEnvironment in project flink by apache.
the class HiveDialectQueryITCase method getTableEnvWithHiveCatalog.
private static TableEnvironment getTableEnvWithHiveCatalog() {
TableEnvironment tableEnv = HiveTestUtils.createTableEnvInBatchMode(SqlDialect.HIVE);
tableEnv.registerCatalog(hiveCatalog.getName(), hiveCatalog);
tableEnv.useCatalog(hiveCatalog.getName());
// automatically load hive module in hive-compatible mode
HiveModule hiveModule = new HiveModule(hiveCatalog.getHiveVersion());
CoreModule coreModule = CoreModule.INSTANCE;
for (String loaded : tableEnv.listModules()) {
tableEnv.unloadModule(loaded);
}
tableEnv.loadModule("hive", hiveModule);
tableEnv.loadModule("core", coreModule);
return tableEnv;
}
use of org.apache.flink.table.api.TableEnvironment in project flink by apache.
the class TableEnvHiveConnectorITCase method testInsertPartitionWithValuesSource.
@Test
public void testInsertPartitionWithValuesSource() throws Exception {
TableEnvironment tableEnv = getTableEnvWithHiveCatalog();
tableEnv.executeSql("create table dest (x int) partitioned by (p1 int,p2 string)");
tableEnv.executeSql("insert into dest partition (p1=1,p2) values(1, 'a')").await();
List<Row> results = CollectionUtil.iteratorToList(tableEnv.sqlQuery("select * from dest").execute().collect());
assertEquals("[+I[1, 1, a]]", results.toString());
tableEnv.executeSql("drop table if exists dest");
}
use of org.apache.flink.table.api.TableEnvironment in project flink by apache.
the class HiveCatalogITCase method testNewTableFactory.
@Test
public void testNewTableFactory() throws Exception {
TableEnvironment tEnv = TableEnvironment.create(EnvironmentSettings.newInstance().inBatchMode().build());
tEnv.registerCatalog("myhive", hiveCatalog);
tEnv.useCatalog("myhive");
tEnv.getConfig().getConfiguration().set(TABLE_EXEC_RESOURCE_DEFAULT_PARALLELISM, 1);
String path = this.getClass().getResource("/csv/test.csv").getPath();
PrintStream originalSystemOut = System.out;
try {
ByteArrayOutputStream arrayOutputStream = new ByteArrayOutputStream();
System.setOut(new PrintStream(arrayOutputStream));
tEnv.executeSql("create table csv_table (name String, age Int) with (" + "'connector.type' = 'filesystem'," + "'connector.path' = 'file://" + path + "'," + "'format.type' = 'csv')");
tEnv.executeSql("create table print_table (name String, age Int) with ('connector' = 'print')");
tEnv.executeSql("insert into print_table select * from csv_table").await();
// assert query result
assertThat(arrayOutputStream.toString()).isEqualTo("+I[1, 1]\n+I[2, 2]\n+I[3, 3]\n");
} finally {
if (System.out != originalSystemOut) {
System.out.close();
}
System.setOut(originalSystemOut);
tEnv.executeSql("DROP TABLE csv_table");
tEnv.executeSql("DROP TABLE print_table");
}
}
use of org.apache.flink.table.api.TableEnvironment in project flink by apache.
the class HiveCatalogITCase method testReadWriteCsv.
@Test
public void testReadWriteCsv() throws Exception {
// similar to CatalogTableITCase::testReadWriteCsvUsingDDL but uses HiveCatalog
TableEnvironment tableEnv = TableEnvironment.create(EnvironmentSettings.inStreamingMode());
tableEnv.getConfig().getConfiguration().setInteger(TABLE_EXEC_RESOURCE_DEFAULT_PARALLELISM, 1);
tableEnv.registerCatalog("myhive", hiveCatalog);
tableEnv.useCatalog("myhive");
String srcPath = this.getClass().getResource("/csv/test3.csv").getPath();
tableEnv.executeSql("CREATE TABLE src (" + "price DECIMAL(10, 2),currency STRING,ts6 TIMESTAMP(6),ts AS CAST(ts6 AS TIMESTAMP(3)),WATERMARK FOR ts AS ts) " + String.format("WITH ('connector.type' = 'filesystem','connector.path' = 'file://%s','format.type' = 'csv')", srcPath));
String sinkPath = new File(tempFolder.newFolder(), "csv-order-sink").toURI().toString();
tableEnv.executeSql("CREATE TABLE sink (" + "window_end TIMESTAMP(3),max_ts TIMESTAMP(6),counter BIGINT,total_price DECIMAL(10, 2)) " + String.format("WITH ('connector.type' = 'filesystem','connector.path' = '%s','format.type' = 'csv')", sinkPath));
tableEnv.executeSql("INSERT INTO sink " + "SELECT TUMBLE_END(ts, INTERVAL '5' SECOND),MAX(ts6),COUNT(*),MAX(price) FROM src " + "GROUP BY TUMBLE(ts, INTERVAL '5' SECOND)").await();
String expected = "2019-12-12 00:00:05.0,2019-12-12 00:00:04.004001,3,50.00\n" + "2019-12-12 00:00:10.0,2019-12-12 00:00:06.006001,2,5.33\n";
assertThat(FileUtils.readFileUtf8(new File(new URI(sinkPath)))).isEqualTo(expected);
}
use of org.apache.flink.table.api.TableEnvironment in project flink by apache.
the class HiveCatalogITCase method testCreateTableLike.
@Test
public void testCreateTableLike() throws Exception {
TableEnvironment tableEnv = HiveTestUtils.createTableEnvInBatchMode();
tableEnv.registerCatalog(hiveCatalog.getName(), hiveCatalog);
tableEnv.useCatalog(hiveCatalog.getName());
tableEnv.executeSql("create table generic_table (x int) with ('connector'='COLLECTION')");
tableEnv.useCatalog(EnvironmentSettings.DEFAULT_BUILTIN_CATALOG);
tableEnv.executeSql(String.format("create table copy like `%s`.`default`.generic_table", hiveCatalog.getName()));
Catalog builtInCat = tableEnv.getCatalog(EnvironmentSettings.DEFAULT_BUILTIN_CATALOG).get();
CatalogBaseTable catalogTable = builtInCat.getTable(new ObjectPath(EnvironmentSettings.DEFAULT_BUILTIN_DATABASE, "copy"));
assertThat(catalogTable.getOptions()).hasSize(1);
assertThat(catalogTable.getOptions()).containsEntry(FactoryUtil.CONNECTOR.key(), "COLLECTION");
assertThat(catalogTable.getSchema().getFieldCount()).isEqualTo(1);
assertThat(catalogTable.getSchema().getFieldNames()).hasSameElementsAs(Collections.singletonList("x"));
assertThat(catalogTable.getSchema().getFieldDataTypes()).hasSameElementsAs(Collections.singletonList(DataTypes.INT()));
}
Aggregations