Search in sources :

Example 16 with CatalogName

use of io.trino.plugin.base.CatalogName in project trino by trinodb.

the class TestDeltaLakeGlueMetastore method setUp.

@BeforeClass
public void setUp() {
    tempDir = Files.createTempDir();
    String temporaryLocation = tempDir.toURI().toString();
    Map<String, String> config = ImmutableMap.<String, String>builder().put("hive.metastore", "glue").put("delta.hide-non-delta-lake-tables", "true").buildOrThrow();
    Bootstrap app = new Bootstrap(// connector dependencies
    new JsonModule(), binder -> {
        ConnectorContext context = new TestingConnectorContext();
        binder.bind(CatalogName.class).toInstance(new CatalogName("test"));
        binder.bind(TypeManager.class).toInstance(context.getTypeManager());
        binder.bind(NodeManager.class).toInstance(context.getNodeManager());
        binder.bind(PageIndexerFactory.class).toInstance(context.getPageIndexerFactory());
        binder.bind(NodeVersion.class).toInstance(new NodeVersion("test_version"));
    }, // connector modules
    new DeltaLakeMetastoreModule(), new DeltaLakeModule(), // test setup
    binder -> {
        binder.bind(HdfsEnvironment.class).toInstance(HDFS_ENVIRONMENT);
    });
    Injector injector = app.doNotInitializeLogging().setRequiredConfigurationProperties(config).initialize();
    lifeCycleManager = injector.getInstance(LifeCycleManager.class);
    metastoreClient = injector.getInstance(GlueHiveMetastore.class);
    metadataFactory = injector.getInstance(DeltaLakeMetadataFactory.class);
    session = TestingConnectorSession.builder().setPropertyMetadata(injector.getInstance(DeltaLakeSessionProperties.class).getSessionProperties()).build();
    databaseName = "test_delta_glue" + randomName();
    metastoreClient.createDatabase(Database.builder().setDatabaseName(databaseName).setOwnerName(Optional.of("public")).setOwnerType(Optional.of(ROLE)).setLocation(Optional.of(temporaryLocation)).build());
}
Also used : DeltaLakeModule(io.trino.plugin.deltalake.DeltaLakeModule) TestingConnectorContext(io.trino.testing.TestingConnectorContext) DeltaLakeMetadataFactory(io.trino.plugin.deltalake.DeltaLakeMetadataFactory) JsonModule(io.airlift.json.JsonModule) PageIndexerFactory(io.trino.spi.PageIndexerFactory) HdfsEnvironment(io.trino.plugin.hive.HdfsEnvironment) GlueHiveMetastore(io.trino.plugin.hive.metastore.glue.GlueHiveMetastore) NodeVersion(io.trino.plugin.hive.NodeVersion) NodeManager(io.trino.spi.NodeManager) LifeCycleManager(io.airlift.bootstrap.LifeCycleManager) Injector(com.google.inject.Injector) ConnectorContext(io.trino.spi.connector.ConnectorContext) TestingConnectorContext(io.trino.testing.TestingConnectorContext) DeltaLakeMetastoreModule(io.trino.plugin.deltalake.metastore.DeltaLakeMetastoreModule) Bootstrap(io.airlift.bootstrap.Bootstrap) TypeManager(io.trino.spi.type.TypeManager) CatalogName(io.trino.plugin.base.CatalogName) BeforeClass(org.testng.annotations.BeforeClass)

Example 17 with CatalogName

use of io.trino.plugin.base.CatalogName in project trino by trinodb.

the class TestDeltaLakeMetadata method setUp.

@BeforeClass
public void setUp() throws IOException {
    temporaryCatalogDirectory = createTempDirectory("HiveCatalog").toFile();
    Map<String, String> config = ImmutableMap.<String, String>builder().put("hive.metastore", "file").put("hive.metastore.catalog.dir", temporaryCatalogDirectory.getPath()).buildOrThrow();
    Bootstrap app = new Bootstrap(// connector dependencies
    new JsonModule(), binder -> {
        ConnectorContext context = new TestingConnectorContext();
        binder.bind(NodeVersion.class).toInstance(new NodeVersion(context.getNodeManager().getCurrentNode().getVersion()));
        binder.bind(CatalogName.class).toInstance(new CatalogName("test"));
        binder.bind(TypeManager.class).toInstance(context.getTypeManager());
        binder.bind(NodeManager.class).toInstance(context.getNodeManager());
        binder.bind(PageIndexerFactory.class).toInstance(context.getPageIndexerFactory());
    }, // connector modules
    new DeltaLakeMetastoreModule(), new DeltaLakeModule(), // test setup
    binder -> {
        binder.bind(HdfsEnvironment.class).toInstance(HDFS_ENVIRONMENT);
    }, new AbstractModule() {

        @Provides
        public DeltaLakeMetastore getDeltaLakeMetastore(@RawHiveMetastoreFactory HiveMetastoreFactory hiveMetastoreFactory, TransactionLogAccess transactionLogAccess, TypeManager typeManager, CachingDeltaLakeStatisticsAccess statistics) {
            return new HiveMetastoreBackedDeltaLakeMetastore(hiveMetastoreFactory.createMetastore(Optional.empty()), transactionLogAccess, typeManager, statistics);
        }
    });
    Injector injector = app.doNotInitializeLogging().setRequiredConfigurationProperties(config).initialize();
    deltaLakeMetadataFactory = injector.getInstance(DeltaLakeMetadataFactory.class);
    injector.getInstance(DeltaLakeMetastore.class).createDatabase(Database.builder().setDatabaseName(DATABASE_NAME).setOwnerName(Optional.of("test")).setOwnerType(Optional.of(USER)).setLocation(Optional.empty()).build());
}
Also used : HiveMetastoreFactory(io.trino.plugin.hive.metastore.HiveMetastoreFactory) RawHiveMetastoreFactory(io.trino.plugin.hive.metastore.RawHiveMetastoreFactory) TransactionLogAccess(io.trino.plugin.deltalake.transactionlog.TransactionLogAccess) TestingConnectorContext(io.trino.testing.TestingConnectorContext) Provides(com.google.inject.Provides) DeltaLakeMetastore(io.trino.plugin.deltalake.metastore.DeltaLakeMetastore) HiveMetastoreBackedDeltaLakeMetastore(io.trino.plugin.deltalake.metastore.HiveMetastoreBackedDeltaLakeMetastore) JsonModule(io.airlift.json.JsonModule) PageIndexerFactory(io.trino.spi.PageIndexerFactory) HdfsEnvironment(io.trino.plugin.hive.HdfsEnvironment) AbstractModule(com.google.inject.AbstractModule) NodeVersion(io.trino.plugin.hive.NodeVersion) NodeManager(io.trino.spi.NodeManager) HiveMetastoreBackedDeltaLakeMetastore(io.trino.plugin.deltalake.metastore.HiveMetastoreBackedDeltaLakeMetastore) Injector(com.google.inject.Injector) ConnectorContext(io.trino.spi.connector.ConnectorContext) TestingConnectorContext(io.trino.testing.TestingConnectorContext) DeltaLakeMetastoreModule(io.trino.plugin.deltalake.metastore.DeltaLakeMetastoreModule) Bootstrap(io.airlift.bootstrap.Bootstrap) TypeManager(io.trino.spi.type.TypeManager) CatalogName(io.trino.plugin.base.CatalogName) CachingDeltaLakeStatisticsAccess(io.trino.plugin.deltalake.statistics.CachingDeltaLakeStatisticsAccess) BeforeClass(org.testng.annotations.BeforeClass)

Example 18 with CatalogName

use of io.trino.plugin.base.CatalogName in project trino by trinodb.

the class AbstractTestHiveFileSystem method setup.

protected void setup(String host, int port, String databaseName, boolean s3SelectPushdownEnabled, HdfsConfiguration hdfsConfiguration) {
    database = databaseName;
    table = new SchemaTableName(database, "trino_test_external_fs");
    tableWithHeader = new SchemaTableName(database, "trino_test_external_fs_with_header");
    tableWithHeaderAndFooter = new SchemaTableName(database, "trino_test_external_fs_with_header_and_footer");
    String random = randomUUID().toString().toLowerCase(ENGLISH).replace("-", "");
    temporaryCreateTable = new SchemaTableName(database, "tmp_trino_test_create_" + random);
    config = new HiveConfig().setS3SelectPushdownEnabled(s3SelectPushdownEnabled);
    Optional<HostAndPort> proxy = Optional.ofNullable(System.getProperty("hive.metastore.thrift.client.socks-proxy")).map(HostAndPort::fromString);
    MetastoreLocator metastoreLocator = new TestingMetastoreLocator(proxy, HostAndPort.fromParts(host, port));
    HivePartitionManager hivePartitionManager = new HivePartitionManager(config);
    hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, new HdfsConfig(), new NoHdfsAuthentication());
    MetastoreConfig metastoreConfig = new MetastoreConfig();
    metastoreClient = new TestingHiveMetastore(new BridgingHiveMetastore(new ThriftHiveMetastore(metastoreLocator, new HiveConfig(), metastoreConfig, new ThriftMetastoreConfig(), hdfsEnvironment, false), new HiveIdentity(getHiveSession(config).getIdentity())), getBasePath(), hdfsEnvironment);
    locationService = new HiveLocationService(hdfsEnvironment);
    JsonCodec<PartitionUpdate> partitionUpdateCodec = JsonCodec.jsonCodec(PartitionUpdate.class);
    metadataFactory = new HiveMetadataFactory(new CatalogName("hive"), config, metastoreConfig, HiveMetastoreFactory.ofInstance(metastoreClient), hdfsEnvironment, hivePartitionManager, newDirectExecutorService(), heartbeatService, TESTING_TYPE_MANAGER, NOOP_METADATA_PROVIDER, locationService, partitionUpdateCodec, new NodeVersion("test_version"), new NoneHiveRedirectionsProvider(), ImmutableSet.of(new PartitionsSystemTableProvider(hivePartitionManager, TESTING_TYPE_MANAGER), new PropertiesSystemTableProvider()), new DefaultHiveMaterializedViewMetadataFactory(), SqlStandardAccessControlMetadata::new, NO_REDIRECTIONS, TableInvalidationCallback.NOOP);
    transactionManager = new HiveTransactionManager(metadataFactory);
    splitManager = new HiveSplitManager(transactionManager, hivePartitionManager, new NamenodeStats(), hdfsEnvironment, new CachingDirectoryLister(new HiveConfig()), new BoundedExecutor(executor, config.getMaxSplitIteratorThreads()), new CounterStat(), config.getMaxOutstandingSplits(), config.getMaxOutstandingSplitsSize(), config.getMinPartitionBatchSize(), config.getMaxPartitionBatchSize(), config.getMaxInitialSplits(), config.getSplitLoaderConcurrency(), config.getMaxSplitsPerSecond(), config.getRecursiveDirWalkerEnabled(), TESTING_TYPE_MANAGER);
    TypeOperators typeOperators = new TypeOperators();
    BlockTypeOperators blockTypeOperators = new BlockTypeOperators(typeOperators);
    pageSinkProvider = new HivePageSinkProvider(getDefaultHiveFileWriterFactories(config, hdfsEnvironment), hdfsEnvironment, PAGE_SORTER, HiveMetastoreFactory.ofInstance(metastoreClient), new GroupByHashPageIndexerFactory(new JoinCompiler(typeOperators), blockTypeOperators), TESTING_TYPE_MANAGER, config, locationService, partitionUpdateCodec, new TestingNodeManager("fake-environment"), new HiveEventClient(), getHiveSessionProperties(config), new HiveWriterStats());
    pageSourceProvider = new HivePageSourceProvider(TESTING_TYPE_MANAGER, hdfsEnvironment, config, getDefaultHivePageSourceFactories(hdfsEnvironment, config), getDefaultHiveRecordCursorProviders(config, hdfsEnvironment), new GenericHiveRecordCursorProvider(hdfsEnvironment, config), Optional.empty());
    onSetupComplete();
}
Also used : TestingMetastoreLocator(io.trino.plugin.hive.metastore.thrift.TestingMetastoreLocator) CounterStat(io.airlift.stats.CounterStat) MetastoreLocator(io.trino.plugin.hive.metastore.thrift.MetastoreLocator) TestingMetastoreLocator(io.trino.plugin.hive.metastore.thrift.TestingMetastoreLocator) ThriftMetastoreConfig(io.trino.plugin.hive.metastore.thrift.ThriftMetastoreConfig) NoHdfsAuthentication(io.trino.plugin.hive.authentication.NoHdfsAuthentication) HiveIdentity(io.trino.plugin.hive.authentication.HiveIdentity) HostAndPort(com.google.common.net.HostAndPort) BlockTypeOperators(io.trino.type.BlockTypeOperators) TestingNodeManager(io.trino.testing.TestingNodeManager) BridgingHiveMetastore(io.trino.plugin.hive.metastore.thrift.BridgingHiveMetastore) TypeOperators(io.trino.spi.type.TypeOperators) BlockTypeOperators(io.trino.type.BlockTypeOperators) JoinCompiler(io.trino.sql.gen.JoinCompiler) ThriftMetastoreConfig(io.trino.plugin.hive.metastore.thrift.ThriftMetastoreConfig) MetastoreConfig(io.trino.plugin.hive.metastore.MetastoreConfig) ThriftHiveMetastore(io.trino.plugin.hive.metastore.thrift.ThriftHiveMetastore) SchemaTableName(io.trino.spi.connector.SchemaTableName) BoundedExecutor(io.airlift.concurrent.BoundedExecutor) CatalogName(io.trino.plugin.base.CatalogName) GroupByHashPageIndexerFactory(io.trino.operator.GroupByHashPageIndexerFactory)

Example 19 with CatalogName

use of io.trino.plugin.base.CatalogName in project trino by trinodb.

the class TestRaptorConnector method setup.

@BeforeMethod
public void setup() {
    Jdbi dbi = createTestingJdbi();
    dummyHandle = dbi.open();
    metadataDao = dbi.onDemand(MetadataDao.class);
    createTablesWithRetry(dbi);
    dataDir = Files.createTempDir();
    CatalogName connectorId = new CatalogName("test");
    NodeManager nodeManager = new TestingNodeManager();
    NodeSupplier nodeSupplier = nodeManager::getWorkerNodes;
    ShardManager shardManager = createShardManager(dbi);
    StorageManager storageManager = createRaptorStorageManager(dbi, dataDir);
    StorageManagerConfig config = new StorageManagerConfig();
    connector = new RaptorConnector(new LifeCycleManager(ImmutableList.of(), null), new TestingNodeManager(), new RaptorMetadataFactory(dbi, shardManager), new RaptorSplitManager(connectorId, nodeSupplier, shardManager, false), new RaptorPageSourceProvider(storageManager), new RaptorPageSinkProvider(storageManager, new PagesIndexPageSorter(new PagesIndex.TestingFactory(false)), config), new RaptorNodePartitioningProvider(nodeSupplier), new RaptorSessionProperties(config), new RaptorTableProperties(TESTING_TYPE_MANAGER), ImmutableSet.of(), Optional.empty(), dbi);
}
Also used : DatabaseTesting.createTestingJdbi(io.trino.plugin.raptor.legacy.DatabaseTesting.createTestingJdbi) Jdbi(org.jdbi.v3.core.Jdbi) StorageManager(io.trino.plugin.raptor.legacy.storage.StorageManager) TestRaptorStorageManager.createRaptorStorageManager(io.trino.plugin.raptor.legacy.storage.TestRaptorStorageManager.createRaptorStorageManager) TestDatabaseShardManager.createShardManager(io.trino.plugin.raptor.legacy.metadata.TestDatabaseShardManager.createShardManager) ShardManager(io.trino.plugin.raptor.legacy.metadata.ShardManager) StorageManagerConfig(io.trino.plugin.raptor.legacy.storage.StorageManagerConfig) NodeManager(io.trino.spi.NodeManager) TestingNodeManager(io.trino.testing.TestingNodeManager) PagesIndexPageSorter(io.trino.operator.PagesIndexPageSorter) LifeCycleManager(io.airlift.bootstrap.LifeCycleManager) TestingNodeManager(io.trino.testing.TestingNodeManager) CatalogName(io.trino.plugin.base.CatalogName) MetadataDao(io.trino.plugin.raptor.legacy.metadata.MetadataDao) BeforeMethod(org.testng.annotations.BeforeMethod)

Example 20 with CatalogName

use of io.trino.plugin.base.CatalogName in project trino by trinodb.

the class TestRaptorSplitManager method setup.

@BeforeMethod
public void setup() throws Exception {
    Jdbi dbi = createTestingJdbi();
    dummyHandle = dbi.open();
    createTablesWithRetry(dbi);
    temporary = createTempDirectory(null);
    AssignmentLimiter assignmentLimiter = new AssignmentLimiter(ImmutableSet::of, systemTicker(), new MetadataConfig());
    shardManager = new DatabaseShardManager(dbi, new DaoSupplier<>(dbi, ShardDao.class), ImmutableSet::of, assignmentLimiter, systemTicker(), new Duration(0, MINUTES));
    TestingNodeManager nodeManager = new TestingNodeManager();
    NodeSupplier nodeSupplier = nodeManager::getWorkerNodes;
    String nodeName = UUID.randomUUID().toString();
    nodeManager.addNode(new InternalNode(nodeName, new URI("http://127.0.0.1/"), NodeVersion.UNKNOWN, false));
    CatalogName connectorId = new CatalogName("raptor");
    metadata = new RaptorMetadata(dbi, shardManager);
    metadata.createTable(SESSION, TEST_TABLE, false);
    tableHandle = metadata.getTableHandle(SESSION, TEST_TABLE.getTable());
    List<ShardInfo> shards = ImmutableList.<ShardInfo>builder().add(shardInfo(UUID.randomUUID(), nodeName)).add(shardInfo(UUID.randomUUID(), nodeName)).add(shardInfo(UUID.randomUUID(), nodeName)).add(shardInfo(UUID.randomUUID(), nodeName)).build();
    tableId = ((RaptorTableHandle) tableHandle).getTableId();
    List<ColumnInfo> columns = metadata.getColumnHandles(SESSION, tableHandle).values().stream().map(RaptorColumnHandle.class::cast).map(ColumnInfo::fromHandle).collect(toList());
    long transactionId = shardManager.beginTransaction();
    shardManager.commitShards(transactionId, tableId, columns, shards, Optional.empty(), 0);
    raptorSplitManager = new RaptorSplitManager(connectorId, nodeSupplier, shardManager, false);
}
Also used : DatabaseTesting.createTestingJdbi(io.trino.plugin.raptor.legacy.DatabaseTesting.createTestingJdbi) Jdbi(org.jdbi.v3.core.Jdbi) RaptorColumnHandle(io.trino.plugin.raptor.legacy.RaptorColumnHandle) RaptorMetadata(io.trino.plugin.raptor.legacy.RaptorMetadata) Duration(io.airlift.units.Duration) DaoSupplier(io.trino.plugin.raptor.legacy.util.DaoSupplier) URI(java.net.URI) ImmutableSet(com.google.common.collect.ImmutableSet) TestingNodeManager(io.trino.testing.TestingNodeManager) CatalogName(io.trino.plugin.base.CatalogName) InternalNode(io.trino.metadata.InternalNode) NodeSupplier(io.trino.plugin.raptor.legacy.NodeSupplier) RaptorSplitManager(io.trino.plugin.raptor.legacy.RaptorSplitManager) BeforeMethod(org.testng.annotations.BeforeMethod)

Aggregations

CatalogName (io.trino.plugin.base.CatalogName)22 HdfsEnvironment (io.trino.plugin.hive.HdfsEnvironment)7 TestingNodeManager (io.trino.testing.TestingNodeManager)7 Injector (com.google.inject.Injector)6 Bootstrap (io.airlift.bootstrap.Bootstrap)6 JsonModule (io.airlift.json.JsonModule)6 NoHdfsAuthentication (io.trino.plugin.hive.authentication.NoHdfsAuthentication)6 TypeManager (io.trino.spi.type.TypeManager)6 ImmutableSet (com.google.common.collect.ImmutableSet)5 HdfsConfigurationInitializer (io.trino.plugin.hive.HdfsConfigurationInitializer)5 HiveMetastore (io.trino.plugin.hive.metastore.HiveMetastore)5 ThreadContextClassLoader (io.trino.spi.classloader.ThreadContextClassLoader)5 LifeCycleManager (io.airlift.bootstrap.LifeCycleManager)4 NodeVersion (io.trino.plugin.hive.NodeVersion)4 MetastoreConfig (io.trino.plugin.hive.metastore.MetastoreConfig)4 ConnectorPageSinkProvider (io.trino.spi.connector.ConnectorPageSinkProvider)4 ConnectorSplitManager (io.trino.spi.connector.ConnectorSplitManager)4 Procedure (io.trino.spi.procedure.Procedure)4 Set (java.util.Set)4 ImmutableList (com.google.common.collect.ImmutableList)3