use of org.apache.flink.table.catalog.Catalog in project flink by apache.
the class HiveParser method parse.
@Override
public List<Operation> parse(String statement) {
CatalogManager catalogManager = getCatalogManager();
Catalog currentCatalog = catalogManager.getCatalog(catalogManager.getCurrentCatalog()).orElse(null);
if (!(currentCatalog instanceof HiveCatalog)) {
LOG.warn("Current catalog is not HiveCatalog. Falling back to Flink's planner.");
return super.parse(statement);
}
HiveConf hiveConf = new HiveConf(((HiveCatalog) currentCatalog).getHiveConf());
hiveConf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict");
hiveConf.set("hive.allow.udf.load.on.demand", "false");
hiveConf.setVar(HiveConf.ConfVars.HIVE_EXECUTION_ENGINE, "mr");
HiveShim hiveShim = HiveShimLoader.loadHiveShim(((HiveCatalog) currentCatalog).getHiveVersion());
try {
// creates SessionState
startSessionState(hiveConf, catalogManager);
// We override Hive's grouping function. Refer to the implementation for more details.
hiveShim.registerTemporaryFunction("grouping", HiveGenericUDFGrouping.class);
return processCmd(statement, hiveConf, hiveShim, (HiveCatalog) currentCatalog);
} finally {
clearSessionState();
}
}
use of org.apache.flink.table.catalog.Catalog in project flink by apache.
the class HiveCatalogITCase method testTableWithPrimaryKey.
@Test
public void testTableWithPrimaryKey() {
TableEnvironment tableEnv = TableEnvironment.create(EnvironmentSettings.inStreamingMode());
tableEnv.getConfig().getConfiguration().setInteger(TABLE_EXEC_RESOURCE_DEFAULT_PARALLELISM, 1);
tableEnv.registerCatalog("catalog1", hiveCatalog);
tableEnv.useCatalog("catalog1");
final String createTable = "CREATE TABLE pk_src (\n" + " uuid varchar(40) not null,\n" + " price DECIMAL(10, 2),\n" + " currency STRING,\n" + " ts6 TIMESTAMP(6),\n" + " ts AS CAST(ts6 AS TIMESTAMP(3)),\n" + " WATERMARK FOR ts AS ts,\n" + " constraint ct1 PRIMARY KEY(uuid) NOT ENFORCED)\n" + " WITH (\n" + " 'connector.type' = 'filesystem'," + " 'connector.path' = 'file://fakePath'," + " 'format.type' = 'csv')";
tableEnv.executeSql(createTable);
TableSchema tableSchema = tableEnv.getCatalog(tableEnv.getCurrentCatalog()).map(catalog -> {
try {
final ObjectPath tablePath = ObjectPath.fromString(catalog.getDefaultDatabase() + '.' + "pk_src");
return catalog.getTable(tablePath).getSchema();
} catch (TableNotExistException e) {
return null;
}
}).orElse(null);
assertThat(tableSchema).isNotNull();
assertThat(tableSchema.getPrimaryKey()).hasValue(UniqueConstraint.primaryKey("ct1", Collections.singletonList("uuid")));
tableEnv.executeSql("DROP TABLE pk_src");
}
use of org.apache.flink.table.catalog.Catalog in project flink by apache.
the class HiveCatalogFactoryTest method testCreateHiveCatalogWithIllegalHadoopConfDir.
@Test
public void testCreateHiveCatalogWithIllegalHadoopConfDir() throws IOException {
final String catalogName = "mycatalog";
final String hadoopConfDir = tempFolder.newFolder().getAbsolutePath();
try {
final Map<String, String> options = new HashMap<>();
options.put(CommonCatalogOptions.CATALOG_TYPE.key(), HiveCatalogFactoryOptions.IDENTIFIER);
options.put(HiveCatalogFactoryOptions.HIVE_CONF_DIR.key(), CONF_DIR.getPath());
options.put(HiveCatalogFactoryOptions.HADOOP_CONF_DIR.key(), hadoopConfDir);
final Catalog actualCatalog = FactoryUtil.createCatalog(catalogName, options, null, Thread.currentThread().getContextClassLoader());
Assert.fail();
} catch (ValidationException e) {
}
}
use of org.apache.flink.table.catalog.Catalog in project flink by apache.
the class HiveParserDDLSemanticAnalyzer method getDatabase.
private CatalogDatabase getDatabase(String databaseName) {
Catalog catalog = catalogManager.getCatalog(catalogManager.getCurrentCatalog()).get();
CatalogDatabase database;
try {
database = catalog.getDatabase(databaseName);
} catch (DatabaseNotExistException e) {
throw new ValidationException(String.format("Database %s not exists", databaseName), e);
}
return database;
}
use of org.apache.flink.table.catalog.Catalog in project flink by apache.
the class JdbcCatalogFactoryTest method test.
@Test
public void test() {
final Map<String, String> options = new HashMap<>();
options.put(CommonCatalogOptions.CATALOG_TYPE.key(), JdbcCatalogFactoryOptions.IDENTIFIER);
options.put(JdbcCatalogFactoryOptions.DEFAULT_DATABASE.key(), PostgresCatalog.DEFAULT_DATABASE);
options.put(JdbcCatalogFactoryOptions.USERNAME.key(), TEST_USERNAME);
options.put(JdbcCatalogFactoryOptions.PASSWORD.key(), TEST_PWD);
options.put(JdbcCatalogFactoryOptions.BASE_URL.key(), baseUrl);
final Catalog actualCatalog = FactoryUtil.createCatalog(TEST_CATALOG_NAME, options, null, Thread.currentThread().getContextClassLoader());
checkEquals(catalog, (JdbcCatalog) actualCatalog);
assertTrue(((JdbcCatalog) actualCatalog).getInternal() instanceof PostgresCatalog);
}
Aggregations