use of io.trino.spi.connector.ConnectorContext in project trino by trinodb.
the class InternalHiveConnectorFactory method createConnector.
public static Connector createConnector(String catalogName, Map<String, String> config, ConnectorContext context, Module module, Optional<HiveMetastore> metastore, Optional<CachingDirectoryLister> cachingDirectoryLister) {
requireNonNull(config, "config is null");
ClassLoader classLoader = InternalHiveConnectorFactory.class.getClassLoader();
try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(classLoader)) {
Bootstrap app = new Bootstrap(new CatalogNameModule(catalogName), new EventModule(), new MBeanModule(), new ConnectorObjectNameGeneratorModule(catalogName, "io.trino.plugin.hive", "trino.plugin.hive"), new JsonModule(), new TypeDeserializerModule(context.getTypeManager()), new HiveModule(), new CachingDirectoryListerModule(cachingDirectoryLister), new HiveHdfsModule(), new HiveS3Module(), new HiveGcsModule(), new HiveAzureModule(), conditionalModule(RubixEnabledConfig.class, RubixEnabledConfig::isCacheEnabled, new RubixModule()), new HiveMetastoreModule(metastore), new HiveSecurityModule(), new HdfsAuthenticationModule(), new HiveProcedureModule(), new MBeanServerModule(), binder -> {
binder.bind(NodeVersion.class).toInstance(new NodeVersion(context.getNodeManager().getCurrentNode().getVersion()));
binder.bind(NodeManager.class).toInstance(context.getNodeManager());
binder.bind(VersionEmbedder.class).toInstance(context.getVersionEmbedder());
binder.bind(MetadataProvider.class).toInstance(context.getMetadataProvider());
binder.bind(PageIndexerFactory.class).toInstance(context.getPageIndexerFactory());
binder.bind(PageSorter.class).toInstance(context.getPageSorter());
}, binder -> newSetBinder(binder, EventListener.class), binder -> bindSessionPropertiesProvider(binder, HiveSessionProperties.class), module);
Injector injector = app.doNotInitializeLogging().setRequiredConfigurationProperties(config).initialize();
LifeCycleManager lifeCycleManager = injector.getInstance(LifeCycleManager.class);
HiveTransactionManager transactionManager = injector.getInstance(HiveTransactionManager.class);
ConnectorSplitManager splitManager = injector.getInstance(ConnectorSplitManager.class);
ConnectorPageSourceProvider connectorPageSource = injector.getInstance(ConnectorPageSourceProvider.class);
ConnectorPageSinkProvider pageSinkProvider = injector.getInstance(ConnectorPageSinkProvider.class);
ConnectorNodePartitioningProvider connectorDistributionProvider = injector.getInstance(ConnectorNodePartitioningProvider.class);
Set<SessionPropertiesProvider> sessionPropertiesProviders = injector.getInstance(Key.get(new TypeLiteral<Set<SessionPropertiesProvider>>() {
}));
HiveTableProperties hiveTableProperties = injector.getInstance(HiveTableProperties.class);
HiveAnalyzeProperties hiveAnalyzeProperties = injector.getInstance(HiveAnalyzeProperties.class);
HiveMaterializedViewPropertiesProvider hiveMaterializedViewPropertiesProvider = injector.getInstance(HiveMaterializedViewPropertiesProvider.class);
Set<Procedure> procedures = injector.getInstance(Key.get(new TypeLiteral<Set<Procedure>>() {
}));
Set<TableProcedureMetadata> tableProcedures = injector.getInstance(Key.get(new TypeLiteral<Set<TableProcedureMetadata>>() {
}));
Set<EventListener> eventListeners = injector.getInstance(Key.get(new TypeLiteral<Set<EventListener>>() {
})).stream().map(listener -> new ClassLoaderSafeEventListener(listener, classLoader)).collect(toImmutableSet());
Set<SystemTableProvider> systemTableProviders = injector.getInstance(Key.get(new TypeLiteral<Set<SystemTableProvider>>() {
}));
Optional<ConnectorAccessControl> hiveAccessControl = injector.getInstance(Key.get(new TypeLiteral<Optional<ConnectorAccessControl>>() {
})).map(accessControl -> new SystemTableAwareAccessControl(accessControl, systemTableProviders)).map(accessControl -> new ClassLoaderSafeConnectorAccessControl(accessControl, classLoader));
return new HiveConnector(lifeCycleManager, transactionManager, new ClassLoaderSafeConnectorSplitManager(splitManager, classLoader), new ClassLoaderSafeConnectorPageSourceProvider(connectorPageSource, classLoader), new ClassLoaderSafeConnectorPageSinkProvider(pageSinkProvider, classLoader), new ClassLoaderSafeNodePartitioningProvider(connectorDistributionProvider, classLoader), procedures, tableProcedures, eventListeners, sessionPropertiesProviders, HiveSchemaProperties.SCHEMA_PROPERTIES, hiveTableProperties.getTableProperties(), hiveAnalyzeProperties.getAnalyzeProperties(), hiveMaterializedViewPropertiesProvider.getMaterializedViewProperties(), hiveAccessControl, injector.getInstance(HiveConfig.class).isSingleStatementWritesOnly(), classLoader);
}
}
use of io.trino.spi.connector.ConnectorContext in project trino by trinodb.
the class AtopConnectorFactory method create.
@Override
public Connector create(String catalogName, Map<String, String> requiredConfig, ConnectorContext context) {
requireNonNull(requiredConfig, "requiredConfig is null");
try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(classLoader)) {
Bootstrap app = new Bootstrap(new AtopModule(atopFactoryClass, context.getTypeManager(), context.getNodeManager(), context.getNodeManager().getEnvironment()), new CatalogNameModule(catalogName), new ConnectorAccessControlModule(), conditionalModule(AtopConnectorConfig.class, config -> config.getSecurity() == AtopSecurity.FILE, binder -> {
binder.install(new FileBasedAccessControlModule());
binder.install(new JsonModule());
}));
Injector injector = app.doNotInitializeLogging().setRequiredConfigurationProperties(requiredConfig).initialize();
return injector.getInstance(AtopConnector.class);
}
}
use of io.trino.spi.connector.ConnectorContext in project trino by trinodb.
the class InternalDeltaLakeConnectorFactory method createConnector.
@VisibleForTesting
public static Connector createConnector(String catalogName, Map<String, String> config, ConnectorContext context, Module extraModule) {
ClassLoader classLoader = InternalDeltaLakeConnectorFactory.class.getClassLoader();
try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(classLoader)) {
Bootstrap app = new Bootstrap(new EventModule(), new MBeanModule(), new JsonModule(), new MBeanServerModule(), new HiveHdfsModule(), new HiveS3Module(), new HiveAzureModule(), new HdfsAuthenticationModule(), new CatalogNameModule(catalogName), new DeltaLakeMetastoreModule(), new DeltaLakeModule(), binder -> {
binder.bind(NodeVersion.class).toInstance(new NodeVersion(context.getNodeManager().getCurrentNode().getVersion()));
binder.bind(NodeManager.class).toInstance(context.getNodeManager());
binder.bind(TypeManager.class).toInstance(context.getTypeManager());
binder.bind(PageIndexerFactory.class).toInstance(context.getPageIndexerFactory());
binder.bind(CatalogName.class).toInstance(new CatalogName(catalogName));
newSetBinder(binder, EventListener.class);
}, binder -> bindSessionPropertiesProvider(binder, DeltaLakeSessionProperties.class), extraModule);
Injector injector = app.doNotInitializeLogging().setRequiredConfigurationProperties(config).initialize();
LifeCycleManager lifeCycleManager = injector.getInstance(LifeCycleManager.class);
ConnectorSplitManager splitManager = injector.getInstance(ConnectorSplitManager.class);
ConnectorPageSourceProvider connectorPageSource = injector.getInstance(ConnectorPageSourceProvider.class);
ConnectorPageSinkProvider connectorPageSink = injector.getInstance(ConnectorPageSinkProvider.class);
ConnectorNodePartitioningProvider connectorDistributionProvider = injector.getInstance(ConnectorNodePartitioningProvider.class);
Set<SessionPropertiesProvider> sessionPropertiesProviders = injector.getInstance(Key.get(new TypeLiteral<Set<SessionPropertiesProvider>>() {
}));
DeltaLakeTableProperties deltaLakeTableProperties = injector.getInstance(DeltaLakeTableProperties.class);
DeltaLakeAnalyzeProperties deltaLakeAnalyzeProperties = injector.getInstance(DeltaLakeAnalyzeProperties.class);
DeltaLakeTransactionManager transactionManager = injector.getInstance(DeltaLakeTransactionManager.class);
Set<EventListener> eventListeners = injector.getInstance(Key.get(new TypeLiteral<Set<EventListener>>() {
})).stream().map(listener -> new ClassLoaderSafeEventListener(listener, classLoader)).collect(toImmutableSet());
Set<Procedure> procedures = injector.getInstance(Key.get(new TypeLiteral<Set<Procedure>>() {
}));
Set<TableProcedureMetadata> tableProcedures = injector.getInstance(Key.get(new TypeLiteral<Set<TableProcedureMetadata>>() {
}));
return new DeltaLakeConnector(lifeCycleManager, new ClassLoaderSafeConnectorSplitManager(splitManager, classLoader), new ClassLoaderSafeConnectorPageSourceProvider(connectorPageSource, classLoader), new ClassLoaderSafeConnectorPageSinkProvider(connectorPageSink, classLoader), new ClassLoaderSafeNodePartitioningProvider(connectorDistributionProvider, classLoader), ImmutableSet.of(), procedures, tableProcedures, sessionPropertiesProviders, DeltaLakeSchemaProperties.SCHEMA_PROPERTIES, deltaLakeTableProperties.getTableProperties(), deltaLakeAnalyzeProperties.getAnalyzeProperties(), eventListeners, transactionManager);
}
}
use of io.trino.spi.connector.ConnectorContext in project trino by trinodb.
the class InternalIcebergConnectorFactory method createConnector.
public static Connector createConnector(String catalogName, Map<String, String> config, ConnectorContext context, Module module, Optional<HiveMetastore> metastore, Optional<FileIoProvider> fileIoProvider) {
ClassLoader classLoader = InternalIcebergConnectorFactory.class.getClassLoader();
try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(classLoader)) {
Bootstrap app = new Bootstrap(new EventModule(), new MBeanModule(), new ConnectorObjectNameGeneratorModule(catalogName, "io.trino.plugin.iceberg", "trino.plugin.iceberg"), new JsonModule(), new IcebergModule(), new IcebergSecurityModule(), new IcebergCatalogModule(metastore), new HiveHdfsModule(), new HiveS3Module(), new HiveGcsModule(), new HiveAzureModule(), new HdfsAuthenticationModule(), new MBeanServerModule(), fileIoProvider.<Module>map(provider -> binder -> binder.bind(FileIoProvider.class).toInstance(provider)).orElse(binder -> binder.bind(FileIoProvider.class).to(HdfsFileIoProvider.class).in(SINGLETON)), binder -> {
binder.bind(NodeVersion.class).toInstance(new NodeVersion(context.getNodeManager().getCurrentNode().getVersion()));
binder.bind(NodeManager.class).toInstance(context.getNodeManager());
binder.bind(TypeManager.class).toInstance(context.getTypeManager());
binder.bind(PageIndexerFactory.class).toInstance(context.getPageIndexerFactory());
binder.bind(CatalogName.class).toInstance(new CatalogName(catalogName));
}, module);
Injector injector = app.doNotInitializeLogging().setRequiredConfigurationProperties(config).initialize();
LifeCycleManager lifeCycleManager = injector.getInstance(LifeCycleManager.class);
IcebergTransactionManager transactionManager = injector.getInstance(IcebergTransactionManager.class);
ConnectorSplitManager splitManager = injector.getInstance(ConnectorSplitManager.class);
ConnectorPageSourceProvider connectorPageSource = injector.getInstance(ConnectorPageSourceProvider.class);
ConnectorPageSinkProvider pageSinkProvider = injector.getInstance(ConnectorPageSinkProvider.class);
ConnectorNodePartitioningProvider connectorDistributionProvider = injector.getInstance(ConnectorNodePartitioningProvider.class);
Set<SessionPropertiesProvider> sessionPropertiesProviders = injector.getInstance(Key.get(new TypeLiteral<Set<SessionPropertiesProvider>>() {
}));
IcebergTableProperties icebergTableProperties = injector.getInstance(IcebergTableProperties.class);
Set<Procedure> procedures = injector.getInstance(Key.get(new TypeLiteral<Set<Procedure>>() {
}));
Set<TableProcedureMetadata> tableProcedures = injector.getInstance(Key.get(new TypeLiteral<Set<TableProcedureMetadata>>() {
}));
Optional<ConnectorAccessControl> accessControl = injector.getInstance(Key.get(new TypeLiteral<Optional<ConnectorAccessControl>>() {
}));
return new IcebergConnector(lifeCycleManager, transactionManager, new ClassLoaderSafeConnectorSplitManager(splitManager, classLoader), new ClassLoaderSafeConnectorPageSourceProvider(connectorPageSource, classLoader), new ClassLoaderSafeConnectorPageSinkProvider(pageSinkProvider, classLoader), new ClassLoaderSafeNodePartitioningProvider(connectorDistributionProvider, classLoader), sessionPropertiesProviders, IcebergSchemaProperties.SCHEMA_PROPERTIES, icebergTableProperties.getTableProperties(), accessControl, procedures, tableProcedures);
}
}
use of io.trino.spi.connector.ConnectorContext in project trino by trinodb.
the class TestDeltaLakePerTransactionMetastoreCache method createQueryRunner.
private DistributedQueryRunner createQueryRunner(boolean enablePerTransactionHiveMetastoreCaching) throws Exception {
boolean createdDeltaLake = false;
if (dockerizedMinioDataLake == null) {
// share environment between testcases to speed things up
dockerizedMinioDataLake = createDockerizedMinioDataLakeForDeltaLake(BUCKET_NAME);
createdDeltaLake = true;
}
Session session = testSessionBuilder().setCatalog(DELTA_CATALOG).setSchema("default").build();
DistributedQueryRunner queryRunner = DistributedQueryRunner.builder(session).build();
queryRunner.installPlugin(new Plugin() {
@Override
public Iterable<ConnectorFactory> getConnectorFactories() {
return ImmutableList.of(new ConnectorFactory() {
@Override
public String getName() {
return TEST_DELTA_CONNECTOR_NAME;
}
@Override
public Connector create(String catalogName, Map<String, String> config, ConnectorContext context) {
return InternalDeltaLakeConnectorFactory.createConnector(catalogName, config, context, new AbstractConfigurationAwareModule() {
@Override
protected void setup(Binder binder) {
newOptionalBinder(binder, ThriftMetastoreClientFactory.class).setDefault().to(DefaultThriftMetastoreClientFactory.class).in(Scopes.SINGLETON);
binder.bind(MetastoreLocator.class).to(StaticMetastoreLocator.class).in(Scopes.SINGLETON);
configBinder(binder).bindConfig(StaticMetastoreConfig.class);
configBinder(binder).bindConfig(ThriftMetastoreConfig.class);
binder.bind(ThriftMetastore.class).to(ThriftHiveMetastore.class).in(Scopes.SINGLETON);
newExporter(binder).export(ThriftMetastore.class).as((generator) -> generator.generatedNameOf(ThriftHiveMetastore.class));
install(new ThriftMetastoreAuthenticationModule());
binder.bind(Boolean.class).annotatedWith(HideNonDeltaLakeTables.class).toInstance(false);
binder.bind(BridgingHiveMetastoreFactory.class).in(Scopes.SINGLETON);
}
@Provides
@Singleton
@RawHiveMetastoreFactory
public HiveMetastoreFactory getCountingHiveMetastoreFactory(BridgingHiveMetastoreFactory bridgingHiveMetastoreFactory) {
return new HiveMetastoreFactory() {
@Override
public boolean isImpersonationEnabled() {
return false;
}
@Override
public HiveMetastore createMetastore(Optional<ConnectorIdentity> identity) {
HiveMetastore bridgingHiveMetastore = bridgingHiveMetastoreFactory.createMetastore(identity);
// bind HiveMetastore which counts method executions
return Reflection.newProxy(HiveMetastore.class, (proxy, method, args) -> {
String methodName = method.getName();
long count = hiveMetastoreInvocationCounts.getOrDefault(methodName, 0L);
hiveMetastoreInvocationCounts.put(methodName, count + 1);
return method.invoke(bridgingHiveMetastore, args);
});
}
};
}
});
}
});
}
});
ImmutableMap.Builder<String, String> deltaLakeProperties = ImmutableMap.builder();
deltaLakeProperties.put("hive.metastore.uri", dockerizedMinioDataLake.getTestingHadoop().getMetastoreAddress());
deltaLakeProperties.put("hive.s3.aws-access-key", MINIO_ACCESS_KEY);
deltaLakeProperties.put("hive.s3.aws-secret-key", MINIO_SECRET_KEY);
deltaLakeProperties.put("hive.s3.endpoint", dockerizedMinioDataLake.getMinioAddress());
deltaLakeProperties.put("hive.s3.path-style-access", "true");
// use test value so we do not get clash with default bindings)
deltaLakeProperties.put("hive.metastore", "test");
if (!enablePerTransactionHiveMetastoreCaching) {
// almost disable the cache; 0 is not allowed as config property value
deltaLakeProperties.put("hive.per-transaction-metastore-cache-maximum-size", "1");
}
queryRunner.createCatalog(DELTA_CATALOG, TEST_DELTA_CONNECTOR_NAME, deltaLakeProperties.buildOrThrow());
if (createdDeltaLake) {
List<TpchTable<? extends TpchEntity>> tpchTables = List.of(TpchTable.NATION, TpchTable.REGION);
tpchTables.forEach(table -> {
String tableName = table.getTableName();
dockerizedMinioDataLake.copyResources("io/trino/plugin/deltalake/testing/resources/databricks/" + tableName, tableName);
queryRunner.execute(format("CREATE TABLE %s.%s.%s (dummy int) WITH (location = 's3://%s/%3$s')", DELTA_CATALOG, "default", tableName, BUCKET_NAME));
});
}
return queryRunner;
}
Aggregations