use of io.trino.plugin.hive.HiveHdfsConfiguration in project trino by trinodb.
the class TestHiveProjectionPushdownIntoTableScan method createLocalQueryRunner.
@Override
protected LocalQueryRunner createLocalQueryRunner() {
baseDir = Files.createTempDir();
HdfsConfig config = new HdfsConfig();
HdfsConfiguration configuration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(config), ImmutableSet.of());
HdfsEnvironment environment = new HdfsEnvironment(configuration, config, new NoHdfsAuthentication());
HiveMetastore metastore = new FileHiveMetastore(new NodeVersion("test_version"), environment, new MetastoreConfig(), new FileHiveMetastoreConfig().setCatalogDirectory(baseDir.toURI().toString()).setMetastoreUser("test"));
Database database = Database.builder().setDatabaseName(SCHEMA_NAME).setOwnerName(Optional.of("public")).setOwnerType(Optional.of(PrincipalType.ROLE)).build();
metastore.createDatabase(database);
LocalQueryRunner queryRunner = LocalQueryRunner.create(HIVE_SESSION);
queryRunner.createCatalog(HIVE_CATALOG_NAME, new TestingHiveConnectorFactory(metastore), ImmutableMap.of());
return queryRunner;
}
use of io.trino.plugin.hive.HiveHdfsConfiguration in project trino by trinodb.
the class TestRubixCaching method getNonCachingFileSystem.
private FileSystem getNonCachingFileSystem() throws IOException {
HdfsConfigurationInitializer configurationInitializer = new HdfsConfigurationInitializer(config);
HiveHdfsConfiguration configuration = new HiveHdfsConfiguration(configurationInitializer, ImmutableSet.of());
HdfsEnvironment environment = new HdfsEnvironment(configuration, config, new NoHdfsAuthentication());
return environment.getFileSystem(context, cacheStoragePath);
}
use of io.trino.plugin.hive.HiveHdfsConfiguration in project trino by trinodb.
the class TestIcebergSplitSource method createQueryRunner.
@Override
protected QueryRunner createQueryRunner() throws Exception {
HdfsConfig config = new HdfsConfig();
HdfsConfiguration configuration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(config), ImmutableSet.of());
HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(configuration, config, new NoHdfsAuthentication());
File tempDir = Files.createTempDirectory("test_iceberg_split_source").toFile();
this.metastoreDir = new File(tempDir, "iceberg_data");
HiveMetastore metastore = createTestingFileHiveMetastore(metastoreDir);
IcebergTableOperationsProvider operationsProvider = new FileMetastoreTableOperationsProvider(new HdfsFileIoProvider(hdfsEnvironment));
this.catalog = new TrinoHiveCatalog(new CatalogName("hive"), memoizeMetastore(metastore, 1000), hdfsEnvironment, new TestingTypeManager(), operationsProvider, "test", false, false, false);
return createIcebergQueryRunner(ImmutableMap.of(), ImmutableMap.of(), ImmutableList.of(NATION), Optional.of(metastoreDir));
}
use of io.trino.plugin.hive.HiveHdfsConfiguration in project trino by trinodb.
the class TestIcebergTableWithCustomLocation method createQueryRunner.
@Override
protected DistributedQueryRunner createQueryRunner() throws Exception {
metastoreDir = Files.createTempDirectory("test_iceberg").toFile();
HdfsConfig hdfsConfig = new HdfsConfig();
HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hdfsConfig), ImmutableSet.of());
hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hdfsConfig, new NoHdfsAuthentication());
FileHiveMetastoreConfig config = new FileHiveMetastoreConfig().setCatalogDirectory(metastoreDir.toURI().toString()).setMetastoreUser("test");
hdfsContext = new HdfsContext(ConnectorIdentity.ofUser(config.getMetastoreUser()));
metastore = new FileHiveMetastore(new NodeVersion("testversion"), hdfsEnvironment, new MetastoreConfig(), config);
return createIcebergQueryRunner(ImmutableMap.of(), ImmutableMap.of("iceberg.unique-table-location", "true"), ImmutableList.of(), Optional.of(metastoreDir));
}
use of io.trino.plugin.hive.HiveHdfsConfiguration in project trino by trinodb.
the class TestSharedGlueMetastore method createQueryRunner.
@Override
protected QueryRunner createQueryRunner() throws Exception {
Session icebergSession = testSessionBuilder().setCatalog(ICEBERG_CATALOG).setSchema(schema).build();
Session hiveSession = testSessionBuilder().setCatalog(HIVE_CATALOG).setSchema(schema).build();
DistributedQueryRunner queryRunner = DistributedQueryRunner.builder(icebergSession).build();
queryRunner.installPlugin(new TpchPlugin());
queryRunner.createCatalog("tpch", "tpch");
this.dataDirectory = queryRunner.getCoordinator().getBaseDataDir().resolve("iceberg_data");
this.dataDirectory.toFile().deleteOnExit();
queryRunner.installPlugin(new IcebergPlugin());
queryRunner.createCatalog(ICEBERG_CATALOG, "iceberg", ImmutableMap.of("iceberg.catalog.type", "glue", "hive.metastore.glue.default-warehouse-dir", dataDirectory.toString()));
HdfsConfig hdfsConfig = new HdfsConfig();
HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hdfsConfig), ImmutableSet.of()), hdfsConfig, new NoHdfsAuthentication());
this.glueMetastore = new GlueHiveMetastore(hdfsEnvironment, new GlueHiveMetastoreConfig(), directExecutor(), new DefaultGlueColumnStatisticsProviderFactory(new GlueHiveMetastoreConfig(), directExecutor(), directExecutor()), Optional.empty(), table -> true);
queryRunner.installPlugin(new TestingHivePlugin(glueMetastore));
queryRunner.createCatalog(HIVE_CATALOG, "hive");
queryRunner.createCatalog("hive_with_redirections", "hive", ImmutableMap.of("hive.iceberg-catalog-name", "iceberg"));
queryRunner.execute("CREATE SCHEMA " + schema + " WITH (location = '" + dataDirectory.toString() + "')");
copyTpchTables(queryRunner, "tpch", TINY_SCHEMA_NAME, icebergSession, ImmutableList.of(TpchTable.NATION));
copyTpchTables(queryRunner, "tpch", TINY_SCHEMA_NAME, hiveSession, ImmutableList.of(TpchTable.REGION));
return queryRunner;
}
Aggregations