use of io.trino.spi.connector.Connector in project trino by trinodb.
the class InternalDeltaLakeConnectorFactory method createConnector.
@VisibleForTesting
public static Connector createConnector(String catalogName, Map<String, String> config, ConnectorContext context, Module extraModule) {
ClassLoader classLoader = InternalDeltaLakeConnectorFactory.class.getClassLoader();
try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(classLoader)) {
Bootstrap app = new Bootstrap(new EventModule(), new MBeanModule(), new JsonModule(), new MBeanServerModule(), new HiveHdfsModule(), new HiveS3Module(), new HiveAzureModule(), new HdfsAuthenticationModule(), new CatalogNameModule(catalogName), new DeltaLakeMetastoreModule(), new DeltaLakeModule(), binder -> {
binder.bind(NodeVersion.class).toInstance(new NodeVersion(context.getNodeManager().getCurrentNode().getVersion()));
binder.bind(NodeManager.class).toInstance(context.getNodeManager());
binder.bind(TypeManager.class).toInstance(context.getTypeManager());
binder.bind(PageIndexerFactory.class).toInstance(context.getPageIndexerFactory());
binder.bind(CatalogName.class).toInstance(new CatalogName(catalogName));
newSetBinder(binder, EventListener.class);
}, binder -> bindSessionPropertiesProvider(binder, DeltaLakeSessionProperties.class), extraModule);
Injector injector = app.doNotInitializeLogging().setRequiredConfigurationProperties(config).initialize();
LifeCycleManager lifeCycleManager = injector.getInstance(LifeCycleManager.class);
ConnectorSplitManager splitManager = injector.getInstance(ConnectorSplitManager.class);
ConnectorPageSourceProvider connectorPageSource = injector.getInstance(ConnectorPageSourceProvider.class);
ConnectorPageSinkProvider connectorPageSink = injector.getInstance(ConnectorPageSinkProvider.class);
ConnectorNodePartitioningProvider connectorDistributionProvider = injector.getInstance(ConnectorNodePartitioningProvider.class);
Set<SessionPropertiesProvider> sessionPropertiesProviders = injector.getInstance(Key.get(new TypeLiteral<Set<SessionPropertiesProvider>>() {
}));
DeltaLakeTableProperties deltaLakeTableProperties = injector.getInstance(DeltaLakeTableProperties.class);
DeltaLakeAnalyzeProperties deltaLakeAnalyzeProperties = injector.getInstance(DeltaLakeAnalyzeProperties.class);
DeltaLakeTransactionManager transactionManager = injector.getInstance(DeltaLakeTransactionManager.class);
Set<EventListener> eventListeners = injector.getInstance(Key.get(new TypeLiteral<Set<EventListener>>() {
})).stream().map(listener -> new ClassLoaderSafeEventListener(listener, classLoader)).collect(toImmutableSet());
Set<Procedure> procedures = injector.getInstance(Key.get(new TypeLiteral<Set<Procedure>>() {
}));
Set<TableProcedureMetadata> tableProcedures = injector.getInstance(Key.get(new TypeLiteral<Set<TableProcedureMetadata>>() {
}));
return new DeltaLakeConnector(lifeCycleManager, new ClassLoaderSafeConnectorSplitManager(splitManager, classLoader), new ClassLoaderSafeConnectorPageSourceProvider(connectorPageSource, classLoader), new ClassLoaderSafeConnectorPageSinkProvider(connectorPageSink, classLoader), new ClassLoaderSafeNodePartitioningProvider(connectorDistributionProvider, classLoader), ImmutableSet.of(), procedures, tableProcedures, sessionPropertiesProviders, DeltaLakeSchemaProperties.SCHEMA_PROPERTIES, deltaLakeTableProperties.getTableProperties(), deltaLakeAnalyzeProperties.getAnalyzeProperties(), eventListeners, transactionManager);
}
}
use of io.trino.spi.connector.Connector in project trino by trinodb.
the class InternalIcebergConnectorFactory method createConnector.
public static Connector createConnector(String catalogName, Map<String, String> config, ConnectorContext context, Module module, Optional<HiveMetastore> metastore, Optional<FileIoProvider> fileIoProvider) {
ClassLoader classLoader = InternalIcebergConnectorFactory.class.getClassLoader();
try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(classLoader)) {
Bootstrap app = new Bootstrap(new EventModule(), new MBeanModule(), new ConnectorObjectNameGeneratorModule(catalogName, "io.trino.plugin.iceberg", "trino.plugin.iceberg"), new JsonModule(), new IcebergModule(), new IcebergSecurityModule(), new IcebergCatalogModule(metastore), new HiveHdfsModule(), new HiveS3Module(), new HiveGcsModule(), new HiveAzureModule(), new HdfsAuthenticationModule(), new MBeanServerModule(), fileIoProvider.<Module>map(provider -> binder -> binder.bind(FileIoProvider.class).toInstance(provider)).orElse(binder -> binder.bind(FileIoProvider.class).to(HdfsFileIoProvider.class).in(SINGLETON)), binder -> {
binder.bind(NodeVersion.class).toInstance(new NodeVersion(context.getNodeManager().getCurrentNode().getVersion()));
binder.bind(NodeManager.class).toInstance(context.getNodeManager());
binder.bind(TypeManager.class).toInstance(context.getTypeManager());
binder.bind(PageIndexerFactory.class).toInstance(context.getPageIndexerFactory());
binder.bind(CatalogName.class).toInstance(new CatalogName(catalogName));
}, module);
Injector injector = app.doNotInitializeLogging().setRequiredConfigurationProperties(config).initialize();
LifeCycleManager lifeCycleManager = injector.getInstance(LifeCycleManager.class);
IcebergTransactionManager transactionManager = injector.getInstance(IcebergTransactionManager.class);
ConnectorSplitManager splitManager = injector.getInstance(ConnectorSplitManager.class);
ConnectorPageSourceProvider connectorPageSource = injector.getInstance(ConnectorPageSourceProvider.class);
ConnectorPageSinkProvider pageSinkProvider = injector.getInstance(ConnectorPageSinkProvider.class);
ConnectorNodePartitioningProvider connectorDistributionProvider = injector.getInstance(ConnectorNodePartitioningProvider.class);
Set<SessionPropertiesProvider> sessionPropertiesProviders = injector.getInstance(Key.get(new TypeLiteral<Set<SessionPropertiesProvider>>() {
}));
IcebergTableProperties icebergTableProperties = injector.getInstance(IcebergTableProperties.class);
Set<Procedure> procedures = injector.getInstance(Key.get(new TypeLiteral<Set<Procedure>>() {
}));
Set<TableProcedureMetadata> tableProcedures = injector.getInstance(Key.get(new TypeLiteral<Set<TableProcedureMetadata>>() {
}));
Optional<ConnectorAccessControl> accessControl = injector.getInstance(Key.get(new TypeLiteral<Optional<ConnectorAccessControl>>() {
}));
return new IcebergConnector(lifeCycleManager, transactionManager, new ClassLoaderSafeConnectorSplitManager(splitManager, classLoader), new ClassLoaderSafeConnectorPageSourceProvider(connectorPageSource, classLoader), new ClassLoaderSafeConnectorPageSinkProvider(pageSinkProvider, classLoader), new ClassLoaderSafeNodePartitioningProvider(connectorDistributionProvider, classLoader), sessionPropertiesProviders, IcebergSchemaProperties.SCHEMA_PROPERTIES, icebergTableProperties.getTableProperties(), accessControl, procedures, tableProcedures);
}
}
use of io.trino.spi.connector.Connector in project trino by trinodb.
the class TestMongoPlugin method testCreateConnector.
@Test
public void testCreateConnector() {
MongoPlugin plugin = new MongoPlugin();
ConnectorFactory factory = getOnlyElement(plugin.getConnectorFactories());
Connector connector = factory.create("test", ImmutableMap.of("mongodb.connection-url", connectionString), new TestingConnectorContext());
Type type = getOnlyElement(plugin.getTypes());
assertEquals(type, OBJECT_ID);
connector.shutdown();
}
use of io.trino.spi.connector.Connector in project trino by trinodb.
the class TestKafkaPlugin method testSslSpinup.
@Test
public void testSslSpinup() throws IOException {
KafkaPlugin plugin = new KafkaPlugin();
ConnectorFactory factory = getOnlyElement(plugin.getConnectorFactories());
assertInstanceOf(factory, KafkaConnectorFactory.class);
String secret = "confluent";
Path keystorePath = Files.createTempFile("keystore", ".jks");
Path truststorePath = Files.createTempFile("truststore", ".jks");
writeToFile(keystorePath, secret);
writeToFile(truststorePath, secret);
Connector connector = factory.create("test-connector", ImmutableMap.<String, String>builder().put("kafka.table-names", "test").put("kafka.nodes", "localhost:9092").put("kafka.security-protocol", "SSL").put("kafka.ssl.keystore.type", "JKS").put("kafka.ssl.keystore.location", keystorePath.toString()).put("kafka.ssl.keystore.password", "keystore-password").put("kafka.ssl.key.password", "key-password").put("kafka.ssl.truststore.type", "JKS").put("kafka.ssl.truststore.location", truststorePath.toString()).put("kafka.ssl.truststore.password", "truststore-password").put("kafka.ssl.endpoint-identification-algorithm", "https").buildOrThrow(), new TestingConnectorContext());
assertNotNull(connector);
connector.shutdown();
}
use of io.trino.spi.connector.Connector in project trino by trinodb.
the class TestCassandraConnector method setup.
@BeforeClass
public void setup() throws Exception {
this.server = new CassandraServer();
String keyspace = "test_connector";
createTestTables(server.getSession(), keyspace, DATE);
CassandraConnectorFactory connectorFactory = new CassandraConnectorFactory();
Connector connector = connectorFactory.create("test", ImmutableMap.of("cassandra.contact-points", server.getHost(), "cassandra.native-protocol-port", Integer.toString(server.getPort())), new TestingConnectorContext());
metadata = connector.getMetadata(SESSION, CassandraTransactionHandle.INSTANCE);
assertInstanceOf(metadata, CassandraMetadata.class);
splitManager = connector.getSplitManager();
assertInstanceOf(splitManager, CassandraSplitManager.class);
recordSetProvider = connector.getRecordSetProvider();
assertInstanceOf(recordSetProvider, CassandraRecordSetProvider.class);
database = keyspace;
table = new SchemaTableName(database, TABLE_ALL_TYPES.toLowerCase(ENGLISH));
tableForDelete = new SchemaTableName(database, TABLE_DELETE_DATA.toLowerCase(ENGLISH));
tableTuple = new SchemaTableName(database, TABLE_TUPLE_TYPE.toLowerCase(ENGLISH));
tableUdt = new SchemaTableName(database, TABLE_USER_DEFINED_TYPE.toLowerCase(ENGLISH));
}
Aggregations