use of org.apache.flink.table.api.TableEnvironment in project flink by apache.
the class HiveCatalogITCase method testCsvTableViaAPI.
@Test
public void testCsvTableViaAPI() throws Exception {
TableEnvironment tableEnv = TableEnvironment.create(EnvironmentSettings.inBatchMode());
tableEnv.getConfig().addConfiguration(new Configuration().set(CoreOptions.DEFAULT_PARALLELISM, 1));
tableEnv.registerCatalog("myhive", hiveCatalog);
tableEnv.useCatalog("myhive");
final TableSchema schema = TableSchema.builder().field("name", DataTypes.STRING()).field("age", DataTypes.INT()).build();
final Map<String, String> sourceOptions = new HashMap<>();
sourceOptions.put("connector.type", "filesystem");
sourceOptions.put("connector.path", getClass().getResource("/csv/test.csv").getPath());
sourceOptions.put("format.type", "csv");
CatalogTable source = new CatalogTableImpl(schema, sourceOptions, "Comment.");
Path p = Paths.get(tempFolder.newFolder().getAbsolutePath(), "test.csv");
final Map<String, String> sinkOptions = new HashMap<>();
sinkOptions.put("connector.type", "filesystem");
sinkOptions.put("connector.path", p.toAbsolutePath().toString());
sinkOptions.put("format.type", "csv");
CatalogTable sink = new CatalogTableImpl(schema, sinkOptions, "Comment.");
hiveCatalog.createTable(new ObjectPath(HiveCatalog.DEFAULT_DB, sourceTableName), source, false);
hiveCatalog.createTable(new ObjectPath(HiveCatalog.DEFAULT_DB, sinkTableName), sink, false);
Table t = tableEnv.sqlQuery(String.format("select * from myhive.`default`.%s", sourceTableName));
List<Row> result = CollectionUtil.iteratorToList(t.execute().collect());
result.sort(Comparator.comparing(String::valueOf));
// assert query result
assertThat(result).containsExactly(Row.of("1", 1), Row.of("2", 2), Row.of("3", 3));
tableEnv.executeSql(String.format("insert into myhive.`default`.%s select * from myhive.`default`.%s", sinkTableName, sourceTableName)).await();
// assert written result
File resultFile = new File(p.toAbsolutePath().toString());
BufferedReader reader = new BufferedReader(new FileReader(resultFile));
String readLine;
for (int i = 0; i < 3; i++) {
readLine = reader.readLine();
assertThat(readLine).isEqualTo(String.format("%d,%d", i + 1, i + 1));
}
// No more line
assertThat(reader.readLine()).isNull();
tableEnv.executeSql(String.format("DROP TABLE %s", sourceTableName));
tableEnv.executeSql(String.format("DROP TABLE %s", sinkTableName));
}
use of org.apache.flink.table.api.TableEnvironment in project flink by apache.
the class HiveTableSinkITCase method assertBatch.
private void assertBatch(String table, List<String> expected) {
// using batch table env to query.
List<String> results = new ArrayList<>();
TableEnvironment batchTEnv = HiveTestUtils.createTableEnvInBatchMode();
batchTEnv.registerCatalog(hiveCatalog.getName(), hiveCatalog);
batchTEnv.useCatalog(hiveCatalog.getName());
batchTEnv.executeSql("select * from " + table).collect().forEachRemaining(r -> results.add(r.toString()));
results.sort(String::compareTo);
expected.sort(String::compareTo);
Assert.assertEquals(expected, results);
}
use of org.apache.flink.table.api.TableEnvironment in project flink by apache.
the class TableEnvHiveConnectorITCase method testNonExistingPartitionFolder.
@Test
public void testNonExistingPartitionFolder() throws Exception {
TableEnvironment tableEnv = getTableEnvWithHiveCatalog();
tableEnv.executeSql("create database db1");
try {
tableEnv.executeSql("create table db1.part (x int) partitioned by (p int)");
HiveTestUtils.createTextTableInserter(hiveCatalog, "db1", "part").addRow(new Object[] { 1 }).commit("p=1");
HiveTestUtils.createTextTableInserter(hiveCatalog, "db1", "part").addRow(new Object[] { 2 }).commit("p=2");
tableEnv.executeSql("alter table db1.part add partition (p=3)");
// remove one partition
Path toRemove = new Path(hiveCatalog.getHiveTable(new ObjectPath("db1", "part")).getSd().getLocation(), "p=2");
FileSystem fs = toRemove.getFileSystem(hiveCatalog.getHiveConf());
fs.delete(toRemove, true);
List<Row> results = CollectionUtil.iteratorToList(tableEnv.sqlQuery("select * from db1.part").execute().collect());
assertEquals("[+I[1, 1]]", results.toString());
} finally {
tableEnv.executeSql("drop database db1 cascade");
}
}
use of org.apache.flink.table.api.TableEnvironment in project flink by apache.
the class TableEnvHiveConnectorITCase method testReadEmptyCollectionFromParquet.
@Test
public void testReadEmptyCollectionFromParquet() throws Exception {
Assume.assumeTrue(HiveShimLoader.getHiveVersion().equals("2.0.0"));
TableEnvironment tableEnv = getTableEnvWithHiveCatalog();
try {
String format = "parquet";
// test.parquet data: hehuiyuan {} []
String folderURI = this.getClass().getResource("/parquet").getPath();
tableEnv.getConfig().getConfiguration().set(HiveOptions.TABLE_EXEC_HIVE_FALLBACK_MAPRED_READER, true);
tableEnv.executeSql(String.format("create external table src_t (a string, b map<string, string>, c array<string>) stored as %s location 'file://%s'", format, folderURI));
List<Row> results = CollectionUtil.iteratorToList(tableEnv.sqlQuery("select * from src_t").execute().collect());
assertEquals("[+I[hehuiyuan, null, null]]", results.toString());
} finally {
tableEnv.executeSql("drop table if exists src_t");
}
}
use of org.apache.flink.table.api.TableEnvironment in project flink by apache.
the class TableEnvHiveConnectorITCase method testInsertPartitionWithStarSource.
@Test
public void testInsertPartitionWithStarSource() throws Exception {
TableEnvironment tableEnv = getTableEnvWithHiveCatalog();
tableEnv.executeSql("create table src (x int,y string)");
HiveTestUtils.createTextTableInserter(hiveCatalog, "default", "src").addRow(new Object[] { 1, "a" }).commit();
tableEnv.executeSql("create table dest (x int) partitioned by (p1 int,p2 string)");
tableEnv.executeSql("insert into dest partition (p1=1,p2) select * from src").await();
List<Row> results = CollectionUtil.iteratorToList(tableEnv.sqlQuery("select * from dest").execute().collect());
assertEquals("[+I[1, 1, a]]", results.toString());
tableEnv.executeSql("drop table if exists src");
tableEnv.executeSql("drop table if exists dest");
}
Aggregations