Search in sources :

Example 6 with HdfsConfig

use of io.trino.plugin.hive.HdfsConfig in project trino by trinodb.

the class TestHiveProjectionPushdownIntoTableScan method createLocalQueryRunner.

@Override
protected LocalQueryRunner createLocalQueryRunner() {
    baseDir = Files.createTempDir();
    HdfsConfig config = new HdfsConfig();
    HdfsConfiguration configuration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(config), ImmutableSet.of());
    HdfsEnvironment environment = new HdfsEnvironment(configuration, config, new NoHdfsAuthentication());
    HiveMetastore metastore = new FileHiveMetastore(new NodeVersion("test_version"), environment, new MetastoreConfig(), new FileHiveMetastoreConfig().setCatalogDirectory(baseDir.toURI().toString()).setMetastoreUser("test"));
    Database database = Database.builder().setDatabaseName(SCHEMA_NAME).setOwnerName(Optional.of("public")).setOwnerType(Optional.of(PrincipalType.ROLE)).build();
    metastore.createDatabase(database);
    LocalQueryRunner queryRunner = LocalQueryRunner.create(HIVE_SESSION);
    queryRunner.createCatalog(HIVE_CATALOG_NAME, new TestingHiveConnectorFactory(metastore), ImmutableMap.of());
    return queryRunner;
}
Also used : HdfsConfigurationInitializer(io.trino.plugin.hive.HdfsConfigurationInitializer) HiveHdfsConfiguration(io.trino.plugin.hive.HiveHdfsConfiguration) MetastoreConfig(io.trino.plugin.hive.metastore.MetastoreConfig) FileHiveMetastoreConfig(io.trino.plugin.hive.metastore.file.FileHiveMetastoreConfig) FileHiveMetastore(io.trino.plugin.hive.metastore.file.FileHiveMetastore) HiveMetastore(io.trino.plugin.hive.metastore.HiveMetastore) HdfsConfig(io.trino.plugin.hive.HdfsConfig) HiveHdfsConfiguration(io.trino.plugin.hive.HiveHdfsConfiguration) HdfsConfiguration(io.trino.plugin.hive.HdfsConfiguration) NoHdfsAuthentication(io.trino.plugin.hive.authentication.NoHdfsAuthentication) LocalQueryRunner(io.trino.testing.LocalQueryRunner) HdfsEnvironment(io.trino.plugin.hive.HdfsEnvironment) NodeVersion(io.trino.plugin.hive.NodeVersion) FileHiveMetastoreConfig(io.trino.plugin.hive.metastore.file.FileHiveMetastoreConfig) FileHiveMetastore(io.trino.plugin.hive.metastore.file.FileHiveMetastore) TestingHiveConnectorFactory(io.trino.plugin.hive.TestingHiveConnectorFactory) Database(io.trino.plugin.hive.metastore.Database)

Example 7 with HdfsConfig

use of io.trino.plugin.hive.HdfsConfig in project trino by trinodb.

the class TestRubixCaching method setup.

@BeforeClass
public void setup() throws IOException {
    cacheStoragePath = getStoragePath("/");
    config = new HdfsConfig();
    List<PropertyMetadata<?>> hiveSessionProperties = getHiveSessionProperties(new HiveConfig(), new RubixEnabledConfig().setCacheEnabled(true), new OrcReaderConfig()).getSessionProperties();
    context = new HdfsContext(TestingConnectorSession.builder().setPropertyMetadata(hiveSessionProperties).build());
    nonCachingFileSystem = getNonCachingFileSystem();
}
Also used : OrcReaderConfig(io.trino.plugin.hive.orc.OrcReaderConfig) HdfsConfig(io.trino.plugin.hive.HdfsConfig) PropertyMetadata(io.trino.spi.session.PropertyMetadata) HdfsContext(io.trino.plugin.hive.HdfsEnvironment.HdfsContext) HiveConfig(io.trino.plugin.hive.HiveConfig) BeforeClass(org.testng.annotations.BeforeClass)

Example 8 with HdfsConfig

use of io.trino.plugin.hive.HdfsConfig in project trino by trinodb.

the class TestIcebergSplitSource method createQueryRunner.

@Override
protected QueryRunner createQueryRunner() throws Exception {
    HdfsConfig config = new HdfsConfig();
    HdfsConfiguration configuration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(config), ImmutableSet.of());
    HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(configuration, config, new NoHdfsAuthentication());
    File tempDir = Files.createTempDirectory("test_iceberg_split_source").toFile();
    this.metastoreDir = new File(tempDir, "iceberg_data");
    HiveMetastore metastore = createTestingFileHiveMetastore(metastoreDir);
    IcebergTableOperationsProvider operationsProvider = new FileMetastoreTableOperationsProvider(new HdfsFileIoProvider(hdfsEnvironment));
    this.catalog = new TrinoHiveCatalog(new CatalogName("hive"), memoizeMetastore(metastore, 1000), hdfsEnvironment, new TestingTypeManager(), operationsProvider, "test", false, false, false);
    return createIcebergQueryRunner(ImmutableMap.of(), ImmutableMap.of(), ImmutableList.of(NATION), Optional.of(metastoreDir));
}
Also used : HdfsConfigurationInitializer(io.trino.plugin.hive.HdfsConfigurationInitializer) HiveHdfsConfiguration(io.trino.plugin.hive.HiveHdfsConfiguration) HiveMetastore(io.trino.plugin.hive.metastore.HiveMetastore) FileHiveMetastore.createTestingFileHiveMetastore(io.trino.plugin.hive.metastore.file.FileHiveMetastore.createTestingFileHiveMetastore) HdfsConfig(io.trino.plugin.hive.HdfsConfig) HiveHdfsConfiguration(io.trino.plugin.hive.HiveHdfsConfiguration) HdfsConfiguration(io.trino.plugin.hive.HdfsConfiguration) NoHdfsAuthentication(io.trino.plugin.hive.authentication.NoHdfsAuthentication) HdfsEnvironment(io.trino.plugin.hive.HdfsEnvironment) FileMetastoreTableOperationsProvider(io.trino.plugin.iceberg.catalog.file.FileMetastoreTableOperationsProvider) TrinoHiveCatalog(io.trino.plugin.iceberg.catalog.hms.TrinoHiveCatalog) CatalogName(io.trino.plugin.base.CatalogName) IcebergTableOperationsProvider(io.trino.plugin.iceberg.catalog.IcebergTableOperationsProvider) File(java.io.File) TestingTypeManager(io.trino.spi.type.TestingTypeManager)

Example 9 with HdfsConfig

use of io.trino.plugin.hive.HdfsConfig in project trino by trinodb.

the class TestIcebergTableWithCustomLocation method createQueryRunner.

@Override
protected DistributedQueryRunner createQueryRunner() throws Exception {
    metastoreDir = Files.createTempDirectory("test_iceberg").toFile();
    HdfsConfig hdfsConfig = new HdfsConfig();
    HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hdfsConfig), ImmutableSet.of());
    hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hdfsConfig, new NoHdfsAuthentication());
    FileHiveMetastoreConfig config = new FileHiveMetastoreConfig().setCatalogDirectory(metastoreDir.toURI().toString()).setMetastoreUser("test");
    hdfsContext = new HdfsContext(ConnectorIdentity.ofUser(config.getMetastoreUser()));
    metastore = new FileHiveMetastore(new NodeVersion("testversion"), hdfsEnvironment, new MetastoreConfig(), config);
    return createIcebergQueryRunner(ImmutableMap.of(), ImmutableMap.of("iceberg.unique-table-location", "true"), ImmutableList.of(), Optional.of(metastoreDir));
}
Also used : FileHiveMetastoreConfig(io.trino.plugin.hive.metastore.file.FileHiveMetastoreConfig) NodeVersion(io.trino.plugin.hive.NodeVersion) HdfsConfigurationInitializer(io.trino.plugin.hive.HdfsConfigurationInitializer) FileHiveMetastore(io.trino.plugin.hive.metastore.file.FileHiveMetastore) HiveHdfsConfiguration(io.trino.plugin.hive.HiveHdfsConfiguration) MetastoreConfig(io.trino.plugin.hive.metastore.MetastoreConfig) FileHiveMetastoreConfig(io.trino.plugin.hive.metastore.file.FileHiveMetastoreConfig) HdfsConfig(io.trino.plugin.hive.HdfsConfig) HdfsContext(io.trino.plugin.hive.HdfsEnvironment.HdfsContext) HiveHdfsConfiguration(io.trino.plugin.hive.HiveHdfsConfiguration) HdfsConfiguration(io.trino.plugin.hive.HdfsConfiguration) NoHdfsAuthentication(io.trino.plugin.hive.authentication.NoHdfsAuthentication) HdfsEnvironment(io.trino.plugin.hive.HdfsEnvironment)

Example 10 with HdfsConfig

use of io.trino.plugin.hive.HdfsConfig in project trino by trinodb.

the class TestSharedGlueMetastore method createQueryRunner.

@Override
protected QueryRunner createQueryRunner() throws Exception {
    Session icebergSession = testSessionBuilder().setCatalog(ICEBERG_CATALOG).setSchema(schema).build();
    Session hiveSession = testSessionBuilder().setCatalog(HIVE_CATALOG).setSchema(schema).build();
    DistributedQueryRunner queryRunner = DistributedQueryRunner.builder(icebergSession).build();
    queryRunner.installPlugin(new TpchPlugin());
    queryRunner.createCatalog("tpch", "tpch");
    this.dataDirectory = queryRunner.getCoordinator().getBaseDataDir().resolve("iceberg_data");
    this.dataDirectory.toFile().deleteOnExit();
    queryRunner.installPlugin(new IcebergPlugin());
    queryRunner.createCatalog(ICEBERG_CATALOG, "iceberg", ImmutableMap.of("iceberg.catalog.type", "glue", "hive.metastore.glue.default-warehouse-dir", dataDirectory.toString()));
    HdfsConfig hdfsConfig = new HdfsConfig();
    HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hdfsConfig), ImmutableSet.of()), hdfsConfig, new NoHdfsAuthentication());
    this.glueMetastore = new GlueHiveMetastore(hdfsEnvironment, new GlueHiveMetastoreConfig(), directExecutor(), new DefaultGlueColumnStatisticsProviderFactory(new GlueHiveMetastoreConfig(), directExecutor(), directExecutor()), Optional.empty(), table -> true);
    queryRunner.installPlugin(new TestingHivePlugin(glueMetastore));
    queryRunner.createCatalog(HIVE_CATALOG, "hive");
    queryRunner.createCatalog("hive_with_redirections", "hive", ImmutableMap.of("hive.iceberg-catalog-name", "iceberg"));
    queryRunner.execute("CREATE SCHEMA " + schema + " WITH (location = '" + dataDirectory.toString() + "')");
    copyTpchTables(queryRunner, "tpch", TINY_SCHEMA_NAME, icebergSession, ImmutableList.of(TpchTable.NATION));
    copyTpchTables(queryRunner, "tpch", TINY_SCHEMA_NAME, hiveSession, ImmutableList.of(TpchTable.REGION));
    return queryRunner;
}
Also used : Logger(io.airlift.log.Logger) Assertions.assertThat(org.assertj.core.api.Assertions.assertThat) Assert.assertEquals(org.testng.Assert.assertEquals) Test(org.testng.annotations.Test) NoHdfsAuthentication(io.trino.plugin.hive.authentication.NoHdfsAuthentication) AbstractTestQueryFramework(io.trino.testing.AbstractTestQueryFramework) GlueHiveMetastoreConfig(io.trino.plugin.hive.metastore.glue.GlueHiveMetastoreConfig) HiveMetastore(io.trino.plugin.hive.metastore.HiveMetastore) DistributedQueryRunner(io.trino.testing.DistributedQueryRunner) ImmutableList(com.google.common.collect.ImmutableList) Assertions.assertThatThrownBy(org.assertj.core.api.Assertions.assertThatThrownBy) QueryAssertions.copyTpchTables(io.trino.testing.QueryAssertions.copyTpchTables) HiveHdfsConfiguration(io.trino.plugin.hive.HiveHdfsConfiguration) TpchPlugin(io.trino.plugin.tpch.TpchPlugin) Path(java.nio.file.Path) TestTable.randomTableSuffix(io.trino.testing.sql.TestTable.randomTableSuffix) TpchTable(io.trino.tpch.TpchTable) AfterClass(org.testng.annotations.AfterClass) TINY_SCHEMA_NAME(io.trino.plugin.tpch.TpchMetadata.TINY_SCHEMA_NAME) ImmutableSet(com.google.common.collect.ImmutableSet) HdfsEnvironment(io.trino.plugin.hive.HdfsEnvironment) ImmutableMap(com.google.common.collect.ImmutableMap) String.format(java.lang.String.format) MoreExecutors.directExecutor(com.google.common.util.concurrent.MoreExecutors.directExecutor) DefaultGlueColumnStatisticsProviderFactory(io.trino.plugin.hive.metastore.glue.DefaultGlueColumnStatisticsProviderFactory) ICEBERG_CATALOG(io.trino.plugin.iceberg.IcebergQueryRunner.ICEBERG_CATALOG) TestingSession.testSessionBuilder(io.trino.testing.TestingSession.testSessionBuilder) GlueHiveMetastore(io.trino.plugin.hive.metastore.glue.GlueHiveMetastore) HdfsConfig(io.trino.plugin.hive.HdfsConfig) QueryRunner(io.trino.testing.QueryRunner) HdfsConfigurationInitializer(io.trino.plugin.hive.HdfsConfigurationInitializer) TestingHivePlugin(io.trino.plugin.hive.TestingHivePlugin) Optional(java.util.Optional) Session(io.trino.Session) HdfsConfigurationInitializer(io.trino.plugin.hive.HdfsConfigurationInitializer) DistributedQueryRunner(io.trino.testing.DistributedQueryRunner) HiveHdfsConfiguration(io.trino.plugin.hive.HiveHdfsConfiguration) TpchPlugin(io.trino.plugin.tpch.TpchPlugin) TestingHivePlugin(io.trino.plugin.hive.TestingHivePlugin) HdfsConfig(io.trino.plugin.hive.HdfsConfig) NoHdfsAuthentication(io.trino.plugin.hive.authentication.NoHdfsAuthentication) DefaultGlueColumnStatisticsProviderFactory(io.trino.plugin.hive.metastore.glue.DefaultGlueColumnStatisticsProviderFactory) HdfsEnvironment(io.trino.plugin.hive.HdfsEnvironment) GlueHiveMetastore(io.trino.plugin.hive.metastore.glue.GlueHiveMetastore) GlueHiveMetastoreConfig(io.trino.plugin.hive.metastore.glue.GlueHiveMetastoreConfig) Session(io.trino.Session)

Aggregations

HdfsConfig (io.trino.plugin.hive.HdfsConfig)22 HdfsConfigurationInitializer (io.trino.plugin.hive.HdfsConfigurationInitializer)21 HdfsEnvironment (io.trino.plugin.hive.HdfsEnvironment)21 HiveHdfsConfiguration (io.trino.plugin.hive.HiveHdfsConfiguration)21 NoHdfsAuthentication (io.trino.plugin.hive.authentication.NoHdfsAuthentication)21 HdfsConfiguration (io.trino.plugin.hive.HdfsConfiguration)19 NodeVersion (io.trino.plugin.hive.NodeVersion)11 MetastoreConfig (io.trino.plugin.hive.metastore.MetastoreConfig)11 FileHiveMetastore (io.trino.plugin.hive.metastore.file.FileHiveMetastore)10 FileHiveMetastoreConfig (io.trino.plugin.hive.metastore.file.FileHiveMetastoreConfig)10 File (java.io.File)8 HiveMetastore (io.trino.plugin.hive.metastore.HiveMetastore)7 CheckpointSchemaManager (io.trino.plugin.deltalake.transactionlog.checkpoint.CheckpointSchemaManager)5 FileFormatDataSourceStats (io.trino.plugin.hive.FileFormatDataSourceStats)4 Database (io.trino.plugin.hive.metastore.Database)4 ParquetReaderConfig (io.trino.plugin.hive.parquet.ParquetReaderConfig)4 DistributedQueryRunner (io.trino.testing.DistributedQueryRunner)4 BeforeClass (org.testng.annotations.BeforeClass)4 Session (io.trino.Session)3 CatalogName (io.trino.plugin.base.CatalogName)3