use of org.apache.flink.table.api.TableEnvironment in project flink by apache.
the class HiveInputFormatPartitionReaderITCase method testReadMultipleSplits.
@Test
public void testReadMultipleSplits() throws Exception {
HiveCatalog hiveCatalog = HiveTestUtils.createHiveCatalog();
TableEnvironment tableEnv = HiveTestUtils.createTableEnvInBatchMode(SqlDialect.HIVE);
tableEnv.registerCatalog(hiveCatalog.getName(), hiveCatalog);
tableEnv.useCatalog(hiveCatalog.getName());
if (!HiveShimLoader.getHiveVersion().startsWith("2.0")) {
testReadFormat(tableEnv, hiveCatalog, "orc");
}
testReadFormat(tableEnv, hiveCatalog, "parquet");
}
use of org.apache.flink.table.api.TableEnvironment in project flink by apache.
the class HiveCatalogITCase method testCsvTableViaSQL.
@Test
public void testCsvTableViaSQL() {
TableEnvironment tableEnv = TableEnvironment.create(EnvironmentSettings.inBatchMode());
tableEnv.registerCatalog("myhive", hiveCatalog);
tableEnv.useCatalog("myhive");
String path = this.getClass().getResource("/csv/test.csv").getPath();
tableEnv.executeSql("create table test2 (name String, age Int) with (\n" + " 'connector.type' = 'filesystem',\n" + " 'connector.path' = 'file://" + path + "',\n" + " 'format.type' = 'csv'\n" + ")");
Table t = tableEnv.sqlQuery("SELECT * FROM myhive.`default`.test2");
List<Row> result = CollectionUtil.iteratorToList(t.execute().collect());
// assert query result
assertThat(result).containsExactlyInAnyOrder(Row.of("1", 1), Row.of("2", 2), Row.of("3", 3));
tableEnv.executeSql("ALTER TABLE test2 RENAME TO newtable");
t = tableEnv.sqlQuery("SELECT * FROM myhive.`default`.newtable");
result = CollectionUtil.iteratorToList(t.execute().collect());
// assert query result
assertThat(result).containsExactlyInAnyOrder(Row.of("1", 1), Row.of("2", 2), Row.of("3", 3));
tableEnv.executeSql("DROP TABLE newtable");
}
use of org.apache.flink.table.api.TableEnvironment in project flink by apache.
the class HiveCatalogITCase method prepareTable.
private TableEnvironment prepareTable(boolean isStreaming) {
EnvironmentSettings settings;
if (isStreaming) {
settings = EnvironmentSettings.inStreamingMode();
} else {
settings = EnvironmentSettings.inBatchMode();
}
TableEnvironment tableEnv = TableEnvironment.create(settings);
tableEnv.getConfig().getConfiguration().setInteger(TABLE_EXEC_RESOURCE_DEFAULT_PARALLELISM, 1);
tableEnv.registerCatalog("myhive", hiveCatalog);
tableEnv.useCatalog("myhive");
String srcPath = this.getClass().getResource("/csv/test3.csv").getPath();
tableEnv.executeSql("CREATE TABLE proctime_src (" + "price DECIMAL(10, 2)," + "currency STRING," + "ts6 TIMESTAMP(6)," + "ts AS CAST(ts6 AS TIMESTAMP(3))," + "WATERMARK FOR ts AS ts," + "l_proctime AS PROCTIME( )) " + // test " " in proctime()
String.format("WITH (" + "'connector.type' = 'filesystem'," + "'connector.path' = 'file://%s'," + "'format.type' = 'csv')", srcPath));
return tableEnv;
}
use of org.apache.flink.table.api.TableEnvironment in project flink by apache.
the class HiveCatalogITCase method testCreateAndGetManagedTable.
@Test
public void testCreateAndGetManagedTable() throws Exception {
TableEnvironment tableEnv = TableEnvironment.create(EnvironmentSettings.inStreamingMode());
String catalog = "myhive";
String database = "default";
String table = "managed_table";
ObjectIdentifier tableIdentifier = ObjectIdentifier.of(catalog, database, table);
try {
TestManagedTableFactory.MANAGED_TABLES.put(tableIdentifier, new AtomicReference<>());
tableEnv.registerCatalog(catalog, hiveCatalog);
tableEnv.useCatalog(catalog);
final String sql = String.format("CREATE TABLE %s (\n" + " uuid varchar(40) not null,\n" + " price DECIMAL(10, 2),\n" + " currency STRING,\n" + " ts6 TIMESTAMP(6),\n" + " ts AS CAST(ts6 AS TIMESTAMP(3)),\n" + " WATERMARK FOR ts AS ts,\n" + " constraint ct1 PRIMARY KEY(uuid) NOT ENFORCED)\n", table);
tableEnv.executeSql(sql);
Map<String, String> expectedOptions = new HashMap<>();
expectedOptions.put(TestManagedTableFactory.ENRICHED_KEY, TestManagedTableFactory.ENRICHED_VALUE);
assertThat(TestManagedTableFactory.MANAGED_TABLES.get(tableIdentifier).get()).containsExactlyInAnyOrderEntriesOf(expectedOptions);
Map<String, String> expectedParameters = new HashMap<>();
expectedOptions.forEach((k, v) -> expectedParameters.put(FLINK_PROPERTY_PREFIX + k, v));
expectedParameters.put(FLINK_PROPERTY_PREFIX + CONNECTOR.key(), ManagedTableFactory.DEFAULT_IDENTIFIER);
assertThat(hiveCatalog.getHiveTable(tableIdentifier.toObjectPath()).getParameters()).containsAllEntriesOf(expectedParameters);
assertThat(hiveCatalog.getTable(tableIdentifier.toObjectPath()).getOptions()).containsExactlyEntriesOf(Collections.singletonMap(TestManagedTableFactory.ENRICHED_KEY, TestManagedTableFactory.ENRICHED_VALUE));
} finally {
tableEnv.executeSql(String.format("DROP TABLE %s", table));
assertThat(TestManagedTableFactory.MANAGED_TABLES.get(tableIdentifier).get()).isNull();
}
}
use of org.apache.flink.table.api.TableEnvironment in project flink by apache.
the class HiveCatalogITCase method testReadWriteCsvWithProctime.
private void testReadWriteCsvWithProctime(boolean isStreaming) {
TableEnvironment tableEnv = prepareTable(isStreaming);
List<Row> rows = CollectionUtil.iteratorToList(tableEnv.executeSql("SELECT * FROM proctime_src").collect());
assertThat(rows).hasSize(5);
tableEnv.executeSql("DROP TABLE proctime_src");
}
Aggregations