use of io.trino.plugin.hive.metastore.MetastoreConfig in project trino by trinodb.
the class TestHiveProjectionPushdownIntoTableScan method createLocalQueryRunner.
@Override
protected LocalQueryRunner createLocalQueryRunner() {
baseDir = Files.createTempDir();
HdfsConfig config = new HdfsConfig();
HdfsConfiguration configuration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(config), ImmutableSet.of());
HdfsEnvironment environment = new HdfsEnvironment(configuration, config, new NoHdfsAuthentication());
HiveMetastore metastore = new FileHiveMetastore(new NodeVersion("test_version"), environment, new MetastoreConfig(), new FileHiveMetastoreConfig().setCatalogDirectory(baseDir.toURI().toString()).setMetastoreUser("test"));
Database database = Database.builder().setDatabaseName(SCHEMA_NAME).setOwnerName(Optional.of("public")).setOwnerType(Optional.of(PrincipalType.ROLE)).build();
metastore.createDatabase(database);
LocalQueryRunner queryRunner = LocalQueryRunner.create(HIVE_SESSION);
queryRunner.createCatalog(HIVE_CATALOG_NAME, new TestingHiveConnectorFactory(metastore), ImmutableMap.of());
return queryRunner;
}
use of io.trino.plugin.hive.metastore.MetastoreConfig in project trino by trinodb.
the class TestIcebergTableWithCustomLocation method createQueryRunner.
@Override
protected DistributedQueryRunner createQueryRunner() throws Exception {
metastoreDir = Files.createTempDirectory("test_iceberg").toFile();
HdfsConfig hdfsConfig = new HdfsConfig();
HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hdfsConfig), ImmutableSet.of());
hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hdfsConfig, new NoHdfsAuthentication());
FileHiveMetastoreConfig config = new FileHiveMetastoreConfig().setCatalogDirectory(metastoreDir.toURI().toString()).setMetastoreUser("test");
hdfsContext = new HdfsContext(ConnectorIdentity.ofUser(config.getMetastoreUser()));
metastore = new FileHiveMetastore(new NodeVersion("testversion"), hdfsEnvironment, new MetastoreConfig(), config);
return createIcebergQueryRunner(ImmutableMap.of(), ImmutableMap.of("iceberg.unique-table-location", "true"), ImmutableList.of(), Optional.of(metastoreDir));
}
use of io.trino.plugin.hive.metastore.MetastoreConfig in project trino by trinodb.
the class DeltaLakeModule method setup.
@Override
public void setup(Binder binder) {
Provider<CatalogName> catalogName = binder.getProvider(CatalogName.class);
configBinder(binder).bindConfig(DeltaLakeConfig.class);
configBinder(binder).bindConfig(HiveConfig.class);
// currently not configurable
binder.bind(MetastoreConfig.class).toInstance(new MetastoreConfig());
configBinder(binder).bindConfig(ParquetReaderConfig.class);
configBinder(binder).bindConfig(ParquetWriterConfig.class);
Multibinder<SystemTableProvider> systemTableProviders = newSetBinder(binder, SystemTableProvider.class);
systemTableProviders.addBinding().to(PropertiesSystemTableProvider.class).in(Scopes.SINGLETON);
binder.bind(DeltaLakeSessionProperties.class).in(Scopes.SINGLETON);
binder.bind(DeltaLakeTableProperties.class).in(Scopes.SINGLETON);
binder.bind(DeltaLakeAnalyzeProperties.class).in(Scopes.SINGLETON);
binder.bind(DeltaLakeTransactionManager.class).in(Scopes.SINGLETON);
binder.bind(ConnectorSplitManager.class).to(DeltaLakeSplitManager.class).in(Scopes.SINGLETON);
binder.bind(ConnectorPageSourceProvider.class).to(DeltaLakePageSourceProvider.class).in(Scopes.SINGLETON);
binder.bind(ConnectorPageSinkProvider.class).to(DeltaLakePageSinkProvider.class).in(Scopes.SINGLETON);
binder.bind(ConnectorNodePartitioningProvider.class).to(DeltaLakeNodePartitioningProvider.class).in(Scopes.SINGLETON);
binder.bind(LocationService.class).to(HiveLocationService.class).in(Scopes.SINGLETON);
binder.bind(DeltaLakeMetadataFactory.class).in(Scopes.SINGLETON);
binder.bind(CachingDeltaLakeStatisticsAccess.class).in(Scopes.SINGLETON);
binder.bind(DeltaLakeStatisticsAccess.class).to(CachingDeltaLakeStatisticsAccess.class).in(Scopes.SINGLETON);
binder.bind(DeltaLakeStatisticsAccess.class).annotatedWith(ForCachingDeltaLakeStatisticsAccess.class).to(MetaDirStatisticsAccess.class).in(Scopes.SINGLETON);
jsonCodecBinder(binder).bindJsonCodec(DeltaLakeStatistics.class);
binder.bind(HiveTransactionManager.class).in(Scopes.SINGLETON);
binder.bind(CheckpointSchemaManager.class).in(Scopes.SINGLETON);
jsonCodecBinder(binder).bindJsonCodec(LastCheckpoint.class);
binder.bind(CheckpointWriterManager.class).in(Scopes.SINGLETON);
binder.bind(TransactionLogAccess.class).in(Scopes.SINGLETON);
newExporter(binder).export(TransactionLogAccess.class).as(generator -> generator.generatedNameOf(TransactionLogAccess.class, catalogName.get().toString()));
binder.bind(TransactionLogWriterFactory.class).in(Scopes.SINGLETON);
binder.bind(TransactionLogSynchronizerManager.class).in(Scopes.SINGLETON);
binder.bind(NoIsolationSynchronizer.class).in(Scopes.SINGLETON);
MapBinder<String, TransactionLogSynchronizer> logSynchronizerMapBinder = newMapBinder(binder, String.class, TransactionLogSynchronizer.class);
// S3
jsonCodecBinder(binder).bindJsonCodec(S3TransactionLogSynchronizer.LockFileContents.class);
logSynchronizerMapBinder.addBinding("s3").to(S3TransactionLogSynchronizer.class).in(Scopes.SINGLETON);
logSynchronizerMapBinder.addBinding("s3a").to(S3TransactionLogSynchronizer.class).in(Scopes.SINGLETON);
logSynchronizerMapBinder.addBinding("s3n").to(S3TransactionLogSynchronizer.class).in(Scopes.SINGLETON);
// Azure
logSynchronizerMapBinder.addBinding("abfs").to(AzureTransactionLogSynchronizer.class).in(Scopes.SINGLETON);
logSynchronizerMapBinder.addBinding("abfss").to(AzureTransactionLogSynchronizer.class).in(Scopes.SINGLETON);
jsonCodecBinder(binder).bindJsonCodec(DataFileInfo.class);
jsonCodecBinder(binder).bindJsonCodec(DeltaLakeUpdateResult.class);
binder.bind(DeltaLakeWriterStats.class).in(Scopes.SINGLETON);
binder.bind(FileFormatDataSourceStats.class).in(Scopes.SINGLETON);
newExporter(binder).export(FileFormatDataSourceStats.class).as(generator -> generator.generatedNameOf(FileFormatDataSourceStats.class, catalogName.get().toString()));
Multibinder<Procedure> procedures = newSetBinder(binder, Procedure.class);
procedures.addBinding().toProvider(DropExtendedStatsProcedure.class).in(Scopes.SINGLETON);
procedures.addBinding().toProvider(VacuumProcedure.class).in(Scopes.SINGLETON);
Multibinder<TableProcedureMetadata> tableProcedures = newSetBinder(binder, TableProcedureMetadata.class);
tableProcedures.addBinding().toProvider(OptimizeTableProcedure.class).in(Scopes.SINGLETON);
}
use of io.trino.plugin.hive.metastore.MetastoreConfig in project trino by trinodb.
the class TestIcebergTableWithExternalLocation method createQueryRunner.
@Override
protected DistributedQueryRunner createQueryRunner() throws Exception {
metastoreDir = Files.createTempDirectory("test_iceberg").toFile();
HdfsConfig hdfsConfig = new HdfsConfig();
HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hdfsConfig), ImmutableSet.of());
hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hdfsConfig, new NoHdfsAuthentication());
FileHiveMetastoreConfig config = new FileHiveMetastoreConfig().setCatalogDirectory(metastoreDir.toURI().toString()).setMetastoreUser("test");
hdfsContext = new HdfsContext(ConnectorIdentity.ofUser(config.getMetastoreUser()));
metastore = new FileHiveMetastore(new NodeVersion("testversion"), hdfsEnvironment, new MetastoreConfig(), config);
return createIcebergQueryRunner(ImmutableMap.of(), ImmutableMap.of(), ImmutableList.of(), Optional.of(metastoreDir));
}
use of io.trino.plugin.hive.metastore.MetastoreConfig in project trino by trinodb.
the class TestIcebergOrcMetricsCollection method createQueryRunner.
@Override
protected QueryRunner createQueryRunner() throws Exception {
Session session = testSessionBuilder().setCatalog("iceberg").setSchema("test_schema").setSystemProperty(TASK_CONCURRENCY, "1").setSystemProperty(TASK_WRITER_COUNT, "1").setSystemProperty(MAX_DRIVERS_PER_TASK, "1").setCatalogSessionProperty("iceberg", "orc_string_statistics_limit", Integer.MAX_VALUE + "B").build();
DistributedQueryRunner queryRunner = DistributedQueryRunner.builder(session).setNodeCount(1).build();
File baseDir = queryRunner.getCoordinator().getBaseDataDir().resolve("iceberg_data").toFile();
HdfsConfig hdfsConfig = new HdfsConfig();
HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hdfsConfig), ImmutableSet.of());
HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hdfsConfig, new NoHdfsAuthentication());
HiveMetastore metastore = new FileHiveMetastore(new NodeVersion("test_version"), hdfsEnvironment, new MetastoreConfig(), new FileHiveMetastoreConfig().setCatalogDirectory(baseDir.toURI().toString()).setMetastoreUser("test"));
tableOperationsProvider = new FileMetastoreTableOperationsProvider(new HdfsFileIoProvider(hdfsEnvironment));
trinoCatalog = new TrinoHiveCatalog(new CatalogName("catalog"), memoizeMetastore(metastore, 1000), hdfsEnvironment, new TestingTypeManager(), tableOperationsProvider, "trino-version", false, false, false);
queryRunner.installPlugin(new TestingIcebergPlugin(Optional.of(metastore), Optional.empty(), EMPTY_MODULE));
queryRunner.createCatalog("iceberg", "iceberg");
queryRunner.installPlugin(new TpchPlugin());
queryRunner.createCatalog("tpch", "tpch");
queryRunner.execute("CREATE SCHEMA test_schema");
return queryRunner;
}
Aggregations