Search in sources :

Example 11 with CatalogName

use of io.trino.plugin.base.CatalogName in project trino by trinodb.

the class PhoenixConnectorFactory method create.

@Override
public Connector create(String catalogName, Map<String, String> requiredConfig, ConnectorContext context) {
    requireNonNull(requiredConfig, "requiredConfig is null");
    try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(classLoader)) {
        Bootstrap app = new Bootstrap(new JsonModule(), new PhoenixClientModule(), binder -> {
            binder.bind(CatalogName.class).toInstance(new CatalogName(catalogName));
            binder.bind(ClassLoader.class).toInstance(PhoenixConnectorFactory.class.getClassLoader());
            binder.bind(TypeManager.class).toInstance(context.getTypeManager());
        });
        Injector injector = app.doNotInitializeLogging().setRequiredConfigurationProperties(requiredConfig).initialize();
        return injector.getInstance(PhoenixConnector.class);
    }
}
Also used : Injector(com.google.inject.Injector) Bootstrap(io.airlift.bootstrap.Bootstrap) ThreadContextClassLoader(io.trino.spi.classloader.ThreadContextClassLoader) TypeManager(io.trino.spi.type.TypeManager) CatalogName(io.trino.plugin.base.CatalogName) ThreadContextClassLoader(io.trino.spi.classloader.ThreadContextClassLoader) JsonModule(io.airlift.json.JsonModule)

Example 12 with CatalogName

use of io.trino.plugin.base.CatalogName in project trino by trinodb.

the class DeltaLakeModule method setup.

@Override
public void setup(Binder binder) {
    Provider<CatalogName> catalogName = binder.getProvider(CatalogName.class);
    configBinder(binder).bindConfig(DeltaLakeConfig.class);
    configBinder(binder).bindConfig(HiveConfig.class);
    // currently not configurable
    binder.bind(MetastoreConfig.class).toInstance(new MetastoreConfig());
    configBinder(binder).bindConfig(ParquetReaderConfig.class);
    configBinder(binder).bindConfig(ParquetWriterConfig.class);
    Multibinder<SystemTableProvider> systemTableProviders = newSetBinder(binder, SystemTableProvider.class);
    systemTableProviders.addBinding().to(PropertiesSystemTableProvider.class).in(Scopes.SINGLETON);
    binder.bind(DeltaLakeSessionProperties.class).in(Scopes.SINGLETON);
    binder.bind(DeltaLakeTableProperties.class).in(Scopes.SINGLETON);
    binder.bind(DeltaLakeAnalyzeProperties.class).in(Scopes.SINGLETON);
    binder.bind(DeltaLakeTransactionManager.class).in(Scopes.SINGLETON);
    binder.bind(ConnectorSplitManager.class).to(DeltaLakeSplitManager.class).in(Scopes.SINGLETON);
    binder.bind(ConnectorPageSourceProvider.class).to(DeltaLakePageSourceProvider.class).in(Scopes.SINGLETON);
    binder.bind(ConnectorPageSinkProvider.class).to(DeltaLakePageSinkProvider.class).in(Scopes.SINGLETON);
    binder.bind(ConnectorNodePartitioningProvider.class).to(DeltaLakeNodePartitioningProvider.class).in(Scopes.SINGLETON);
    binder.bind(LocationService.class).to(HiveLocationService.class).in(Scopes.SINGLETON);
    binder.bind(DeltaLakeMetadataFactory.class).in(Scopes.SINGLETON);
    binder.bind(CachingDeltaLakeStatisticsAccess.class).in(Scopes.SINGLETON);
    binder.bind(DeltaLakeStatisticsAccess.class).to(CachingDeltaLakeStatisticsAccess.class).in(Scopes.SINGLETON);
    binder.bind(DeltaLakeStatisticsAccess.class).annotatedWith(ForCachingDeltaLakeStatisticsAccess.class).to(MetaDirStatisticsAccess.class).in(Scopes.SINGLETON);
    jsonCodecBinder(binder).bindJsonCodec(DeltaLakeStatistics.class);
    binder.bind(HiveTransactionManager.class).in(Scopes.SINGLETON);
    binder.bind(CheckpointSchemaManager.class).in(Scopes.SINGLETON);
    jsonCodecBinder(binder).bindJsonCodec(LastCheckpoint.class);
    binder.bind(CheckpointWriterManager.class).in(Scopes.SINGLETON);
    binder.bind(TransactionLogAccess.class).in(Scopes.SINGLETON);
    newExporter(binder).export(TransactionLogAccess.class).as(generator -> generator.generatedNameOf(TransactionLogAccess.class, catalogName.get().toString()));
    binder.bind(TransactionLogWriterFactory.class).in(Scopes.SINGLETON);
    binder.bind(TransactionLogSynchronizerManager.class).in(Scopes.SINGLETON);
    binder.bind(NoIsolationSynchronizer.class).in(Scopes.SINGLETON);
    MapBinder<String, TransactionLogSynchronizer> logSynchronizerMapBinder = newMapBinder(binder, String.class, TransactionLogSynchronizer.class);
    // S3
    jsonCodecBinder(binder).bindJsonCodec(S3TransactionLogSynchronizer.LockFileContents.class);
    logSynchronizerMapBinder.addBinding("s3").to(S3TransactionLogSynchronizer.class).in(Scopes.SINGLETON);
    logSynchronizerMapBinder.addBinding("s3a").to(S3TransactionLogSynchronizer.class).in(Scopes.SINGLETON);
    logSynchronizerMapBinder.addBinding("s3n").to(S3TransactionLogSynchronizer.class).in(Scopes.SINGLETON);
    // Azure
    logSynchronizerMapBinder.addBinding("abfs").to(AzureTransactionLogSynchronizer.class).in(Scopes.SINGLETON);
    logSynchronizerMapBinder.addBinding("abfss").to(AzureTransactionLogSynchronizer.class).in(Scopes.SINGLETON);
    jsonCodecBinder(binder).bindJsonCodec(DataFileInfo.class);
    jsonCodecBinder(binder).bindJsonCodec(DeltaLakeUpdateResult.class);
    binder.bind(DeltaLakeWriterStats.class).in(Scopes.SINGLETON);
    binder.bind(FileFormatDataSourceStats.class).in(Scopes.SINGLETON);
    newExporter(binder).export(FileFormatDataSourceStats.class).as(generator -> generator.generatedNameOf(FileFormatDataSourceStats.class, catalogName.get().toString()));
    Multibinder<Procedure> procedures = newSetBinder(binder, Procedure.class);
    procedures.addBinding().toProvider(DropExtendedStatsProcedure.class).in(Scopes.SINGLETON);
    procedures.addBinding().toProvider(VacuumProcedure.class).in(Scopes.SINGLETON);
    Multibinder<TableProcedureMetadata> tableProcedures = newSetBinder(binder, TableProcedureMetadata.class);
    tableProcedures.addBinding().toProvider(OptimizeTableProcedure.class).in(Scopes.SINGLETON);
}
Also used : AzureTransactionLogSynchronizer(io.trino.plugin.deltalake.transactionlog.writer.AzureTransactionLogSynchronizer) MetaDirStatisticsAccess(io.trino.plugin.deltalake.statistics.MetaDirStatisticsAccess) PropertiesSystemTableProvider(io.trino.plugin.hive.PropertiesSystemTableProvider) NoIsolationSynchronizer(io.trino.plugin.deltalake.transactionlog.writer.NoIsolationSynchronizer) OptimizeTableProcedure(io.trino.plugin.hive.procedure.OptimizeTableProcedure) CheckpointSchemaManager(io.trino.plugin.deltalake.transactionlog.checkpoint.CheckpointSchemaManager) TableProcedureMetadata(io.trino.spi.connector.TableProcedureMetadata) VacuumProcedure(io.trino.plugin.deltalake.procedure.VacuumProcedure) DropExtendedStatsProcedure(io.trino.plugin.deltalake.procedure.DropExtendedStatsProcedure) Procedure(io.trino.spi.procedure.Procedure) OptimizeTableProcedure(io.trino.plugin.hive.procedure.OptimizeTableProcedure) TransactionLogSynchronizerManager(io.trino.plugin.deltalake.transactionlog.writer.TransactionLogSynchronizerManager) HiveLocationService(io.trino.plugin.hive.HiveLocationService) MetastoreConfig(io.trino.plugin.hive.metastore.MetastoreConfig) TransactionLogWriterFactory(io.trino.plugin.deltalake.transactionlog.writer.TransactionLogWriterFactory) TransactionLogAccess(io.trino.plugin.deltalake.transactionlog.TransactionLogAccess) DropExtendedStatsProcedure(io.trino.plugin.deltalake.procedure.DropExtendedStatsProcedure) FileFormatDataSourceStats(io.trino.plugin.hive.FileFormatDataSourceStats) ForCachingDeltaLakeStatisticsAccess(io.trino.plugin.deltalake.statistics.CachingDeltaLakeStatisticsAccess.ForCachingDeltaLakeStatisticsAccess) CachingDeltaLakeStatisticsAccess(io.trino.plugin.deltalake.statistics.CachingDeltaLakeStatisticsAccess) DeltaLakeStatisticsAccess(io.trino.plugin.deltalake.statistics.DeltaLakeStatisticsAccess) VacuumProcedure(io.trino.plugin.deltalake.procedure.VacuumProcedure) S3TransactionLogSynchronizer(io.trino.plugin.deltalake.transactionlog.writer.S3TransactionLogSynchronizer) TransactionLogSynchronizer(io.trino.plugin.deltalake.transactionlog.writer.TransactionLogSynchronizer) S3TransactionLogSynchronizer(io.trino.plugin.deltalake.transactionlog.writer.S3TransactionLogSynchronizer) AzureTransactionLogSynchronizer(io.trino.plugin.deltalake.transactionlog.writer.AzureTransactionLogSynchronizer) CatalogName(io.trino.plugin.base.CatalogName) ForCachingDeltaLakeStatisticsAccess(io.trino.plugin.deltalake.statistics.CachingDeltaLakeStatisticsAccess.ForCachingDeltaLakeStatisticsAccess) CachingDeltaLakeStatisticsAccess(io.trino.plugin.deltalake.statistics.CachingDeltaLakeStatisticsAccess) CheckpointWriterManager(io.trino.plugin.deltalake.transactionlog.checkpoint.CheckpointWriterManager) SystemTableProvider(io.trino.plugin.hive.SystemTableProvider) PropertiesSystemTableProvider(io.trino.plugin.hive.PropertiesSystemTableProvider) HiveTransactionManager(io.trino.plugin.hive.HiveTransactionManager)

Example 13 with CatalogName

use of io.trino.plugin.base.CatalogName in project trino by trinodb.

the class TestIcebergOrcMetricsCollection method createQueryRunner.

@Override
protected QueryRunner createQueryRunner() throws Exception {
    Session session = testSessionBuilder().setCatalog("iceberg").setSchema("test_schema").setSystemProperty(TASK_CONCURRENCY, "1").setSystemProperty(TASK_WRITER_COUNT, "1").setSystemProperty(MAX_DRIVERS_PER_TASK, "1").setCatalogSessionProperty("iceberg", "orc_string_statistics_limit", Integer.MAX_VALUE + "B").build();
    DistributedQueryRunner queryRunner = DistributedQueryRunner.builder(session).setNodeCount(1).build();
    File baseDir = queryRunner.getCoordinator().getBaseDataDir().resolve("iceberg_data").toFile();
    HdfsConfig hdfsConfig = new HdfsConfig();
    HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hdfsConfig), ImmutableSet.of());
    HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hdfsConfig, new NoHdfsAuthentication());
    HiveMetastore metastore = new FileHiveMetastore(new NodeVersion("test_version"), hdfsEnvironment, new MetastoreConfig(), new FileHiveMetastoreConfig().setCatalogDirectory(baseDir.toURI().toString()).setMetastoreUser("test"));
    tableOperationsProvider = new FileMetastoreTableOperationsProvider(new HdfsFileIoProvider(hdfsEnvironment));
    trinoCatalog = new TrinoHiveCatalog(new CatalogName("catalog"), memoizeMetastore(metastore, 1000), hdfsEnvironment, new TestingTypeManager(), tableOperationsProvider, "trino-version", false, false, false);
    queryRunner.installPlugin(new TestingIcebergPlugin(Optional.of(metastore), Optional.empty(), EMPTY_MODULE));
    queryRunner.createCatalog("iceberg", "iceberg");
    queryRunner.installPlugin(new TpchPlugin());
    queryRunner.createCatalog("tpch", "tpch");
    queryRunner.execute("CREATE SCHEMA test_schema");
    return queryRunner;
}
Also used : HdfsConfigurationInitializer(io.trino.plugin.hive.HdfsConfigurationInitializer) DistributedQueryRunner(io.trino.testing.DistributedQueryRunner) HiveHdfsConfiguration(io.trino.plugin.hive.HiveHdfsConfiguration) MetastoreConfig(io.trino.plugin.hive.metastore.MetastoreConfig) FileHiveMetastoreConfig(io.trino.plugin.hive.metastore.file.FileHiveMetastoreConfig) HiveMetastore(io.trino.plugin.hive.metastore.HiveMetastore) FileHiveMetastore(io.trino.plugin.hive.metastore.file.FileHiveMetastore) TpchPlugin(io.trino.plugin.tpch.TpchPlugin) HdfsConfig(io.trino.plugin.hive.HdfsConfig) HiveHdfsConfiguration(io.trino.plugin.hive.HiveHdfsConfiguration) HdfsConfiguration(io.trino.plugin.hive.HdfsConfiguration) NoHdfsAuthentication(io.trino.plugin.hive.authentication.NoHdfsAuthentication) HdfsEnvironment(io.trino.plugin.hive.HdfsEnvironment) NodeVersion(io.trino.plugin.hive.NodeVersion) FileHiveMetastoreConfig(io.trino.plugin.hive.metastore.file.FileHiveMetastoreConfig) FileHiveMetastore(io.trino.plugin.hive.metastore.file.FileHiveMetastore) FileMetastoreTableOperationsProvider(io.trino.plugin.iceberg.catalog.file.FileMetastoreTableOperationsProvider) TrinoHiveCatalog(io.trino.plugin.iceberg.catalog.hms.TrinoHiveCatalog) CatalogName(io.trino.plugin.base.CatalogName) File(java.io.File) TestingTypeManager(io.trino.spi.type.TestingTypeManager) TestingConnectorSession(io.trino.testing.TestingConnectorSession) Session(io.trino.Session)

Example 14 with CatalogName

use of io.trino.plugin.base.CatalogName in project trino by trinodb.

the class TestIcebergMergeAppend method createQueryRunner.

@Override
protected QueryRunner createQueryRunner() throws Exception {
    DistributedQueryRunner queryRunner = IcebergQueryRunner.createIcebergQueryRunner();
    HdfsConfig hdfsConfig = new HdfsConfig();
    HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hdfsConfig), ImmutableSet.of());
    HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hdfsConfig, new NoHdfsAuthentication());
    File baseDir = queryRunner.getCoordinator().getBaseDataDir().resolve("iceberg_data").toFile();
    HiveMetastore metastore = new FileHiveMetastore(new NodeVersion("testversion"), hdfsEnvironment, new MetastoreConfig(), new FileHiveMetastoreConfig().setCatalogDirectory(baseDir.toURI().toString()).setMetastoreUser("test"));
    tableOperationsProvider = new FileMetastoreTableOperationsProvider(new HdfsFileIoProvider(hdfsEnvironment));
    trinoCatalog = new TrinoHiveCatalog(new CatalogName("catalog"), memoizeMetastore(metastore, 1000), hdfsEnvironment, new TestingTypeManager(), tableOperationsProvider, "trino-version", false, false, false);
    return queryRunner;
}
Also used : HdfsConfigurationInitializer(io.trino.plugin.hive.HdfsConfigurationInitializer) DistributedQueryRunner(io.trino.testing.DistributedQueryRunner) HiveHdfsConfiguration(io.trino.plugin.hive.HiveHdfsConfiguration) MetastoreConfig(io.trino.plugin.hive.metastore.MetastoreConfig) FileHiveMetastoreConfig(io.trino.plugin.hive.metastore.file.FileHiveMetastoreConfig) HiveMetastore(io.trino.plugin.hive.metastore.HiveMetastore) FileHiveMetastore(io.trino.plugin.hive.metastore.file.FileHiveMetastore) HdfsConfig(io.trino.plugin.hive.HdfsConfig) HiveHdfsConfiguration(io.trino.plugin.hive.HiveHdfsConfiguration) HdfsConfiguration(io.trino.plugin.hive.HdfsConfiguration) NoHdfsAuthentication(io.trino.plugin.hive.authentication.NoHdfsAuthentication) HdfsEnvironment(io.trino.plugin.hive.HdfsEnvironment) NodeVersion(io.trino.plugin.hive.NodeVersion) FileHiveMetastoreConfig(io.trino.plugin.hive.metastore.file.FileHiveMetastoreConfig) FileHiveMetastore(io.trino.plugin.hive.metastore.file.FileHiveMetastore) FileMetastoreTableOperationsProvider(io.trino.plugin.iceberg.catalog.file.FileMetastoreTableOperationsProvider) TrinoHiveCatalog(io.trino.plugin.iceberg.catalog.hms.TrinoHiveCatalog) CatalogName(io.trino.plugin.base.CatalogName) File(java.io.File) TestingTypeManager(io.trino.spi.type.TestingTypeManager)

Example 15 with CatalogName

use of io.trino.plugin.base.CatalogName in project trino by trinodb.

the class JdbcDiagnosticModule method configure.

@Override
public void configure(Binder binder) {
    binder.install(new MBeanServerModule());
    binder.install(new MBeanModule());
    Provider<CatalogName> catalogName = binder.getProvider(CatalogName.class);
    newExporter(binder).export(Key.get(JdbcClient.class, StatsCollecting.class)).as(generator -> generator.generatedNameOf(JdbcClient.class, catalogName.get().toString()));
    newExporter(binder).export(Key.get(ConnectionFactory.class, StatsCollecting.class)).as(generator -> generator.generatedNameOf(ConnectionFactory.class, catalogName.get().toString()));
    newExporter(binder).export(JdbcClient.class).as(generator -> generator.generatedNameOf(CachingJdbcClient.class, catalogName.get().toString()));
}
Also used : MBeanModule(org.weakref.jmx.guice.MBeanModule) StatisticsAwareConnectionFactory(io.trino.plugin.jdbc.jmx.StatisticsAwareConnectionFactory) MBeanServerModule(io.trino.plugin.base.jmx.MBeanServerModule) CatalogName(io.trino.plugin.base.CatalogName) StatisticsAwareJdbcClient(io.trino.plugin.jdbc.jmx.StatisticsAwareJdbcClient)

Aggregations

CatalogName (io.trino.plugin.base.CatalogName)22 HdfsEnvironment (io.trino.plugin.hive.HdfsEnvironment)7 TestingNodeManager (io.trino.testing.TestingNodeManager)7 Injector (com.google.inject.Injector)6 Bootstrap (io.airlift.bootstrap.Bootstrap)6 JsonModule (io.airlift.json.JsonModule)6 NoHdfsAuthentication (io.trino.plugin.hive.authentication.NoHdfsAuthentication)6 TypeManager (io.trino.spi.type.TypeManager)6 ImmutableSet (com.google.common.collect.ImmutableSet)5 HdfsConfigurationInitializer (io.trino.plugin.hive.HdfsConfigurationInitializer)5 HiveMetastore (io.trino.plugin.hive.metastore.HiveMetastore)5 ThreadContextClassLoader (io.trino.spi.classloader.ThreadContextClassLoader)5 LifeCycleManager (io.airlift.bootstrap.LifeCycleManager)4 NodeVersion (io.trino.plugin.hive.NodeVersion)4 MetastoreConfig (io.trino.plugin.hive.metastore.MetastoreConfig)4 ConnectorPageSinkProvider (io.trino.spi.connector.ConnectorPageSinkProvider)4 ConnectorSplitManager (io.trino.spi.connector.ConnectorSplitManager)4 Procedure (io.trino.spi.procedure.Procedure)4 Set (java.util.Set)4 ImmutableList (com.google.common.collect.ImmutableList)3