Search in sources :

Example 1 with ThriftMetastoreConfig

use of io.trino.plugin.hive.metastore.thrift.ThriftMetastoreConfig in project trino by trinodb.

the class AbstractTestHive method setup.

protected final void setup(String host, int port, String databaseName, String timeZone) {
    HiveConfig hiveConfig = getHiveConfig().setParquetTimeZone(timeZone).setRcfileTimeZone(timeZone);
    Optional<HostAndPort> proxy = Optional.ofNullable(System.getProperty("hive.metastore.thrift.client.socks-proxy")).map(HostAndPort::fromString);
    MetastoreLocator metastoreLocator = new TestingMetastoreLocator(proxy, HostAndPort.fromParts(host, port));
    hdfsEnvironment = new HdfsEnvironment(createTestHdfsConfiguration(), new HdfsConfig(), new NoHdfsAuthentication());
    HiveMetastore metastore = cachingHiveMetastore(new BridgingHiveMetastore(new ThriftHiveMetastore(metastoreLocator, hiveConfig, new MetastoreConfig(), new ThriftMetastoreConfig(), hdfsEnvironment, false), new HiveIdentity(SESSION.getIdentity())), executor, new Duration(1, MINUTES), Optional.of(new Duration(15, SECONDS)), 10000);
    setup(databaseName, hiveConfig, metastore, hdfsEnvironment);
}
Also used : ThriftMetastoreConfig(io.trino.plugin.hive.metastore.thrift.ThriftMetastoreConfig) MetastoreConfig(io.trino.plugin.hive.metastore.MetastoreConfig) TestingMetastoreLocator(io.trino.plugin.hive.metastore.thrift.TestingMetastoreLocator) HiveMetastore(io.trino.plugin.hive.metastore.HiveMetastore) BridgingHiveMetastore(io.trino.plugin.hive.metastore.thrift.BridgingHiveMetastore) CachingHiveMetastore.cachingHiveMetastore(io.trino.plugin.hive.metastore.cache.CachingHiveMetastore.cachingHiveMetastore) ThriftHiveMetastore(io.trino.plugin.hive.metastore.thrift.ThriftHiveMetastore) SemiTransactionalHiveMetastore(io.trino.plugin.hive.metastore.SemiTransactionalHiveMetastore) ThriftHiveMetastore(io.trino.plugin.hive.metastore.thrift.ThriftHiveMetastore) MetastoreLocator(io.trino.plugin.hive.metastore.thrift.MetastoreLocator) TestingMetastoreLocator(io.trino.plugin.hive.metastore.thrift.TestingMetastoreLocator) Duration(io.airlift.units.Duration) ThriftMetastoreConfig(io.trino.plugin.hive.metastore.thrift.ThriftMetastoreConfig) NoHdfsAuthentication(io.trino.plugin.hive.authentication.NoHdfsAuthentication) HiveIdentity(io.trino.plugin.hive.authentication.HiveIdentity) HostAndPort(com.google.common.net.HostAndPort) BridgingHiveMetastore(io.trino.plugin.hive.metastore.thrift.BridgingHiveMetastore)

Example 2 with ThriftMetastoreConfig

use of io.trino.plugin.hive.metastore.thrift.ThriftMetastoreConfig in project trino by trinodb.

the class BaseTestHiveOnDataLake method createQueryRunner.

@Override
protected QueryRunner createQueryRunner() throws Exception {
    this.bucketName = "test-hive-insert-overwrite-" + randomTableSuffix();
    this.dockerizedS3DataLake = closeAfterClass(new HiveMinioDataLake(bucketName, ImmutableMap.of(), hiveHadoopImage));
    this.dockerizedS3DataLake.start();
    this.metastoreClient = new BridgingHiveMetastore(new ThriftHiveMetastore(new TestingMetastoreLocator(Optional.empty(), this.dockerizedS3DataLake.getHiveHadoop().getHiveMetastoreEndpoint()), new HiveConfig(), new MetastoreConfig(), new ThriftMetastoreConfig(), new HdfsEnvironment(new HiveHdfsConfiguration(new HdfsConfigurationInitializer(new HdfsConfig(), ImmutableSet.of()), ImmutableSet.of()), new HdfsConfig(), new NoHdfsAuthentication()), false), HiveIdentity.none());
    return S3HiveQueryRunner.create(dockerizedS3DataLake, ImmutableMap.<String, String>builder().put("hive.insert-existing-partitions-behavior", "OVERWRITE").put("hive.non-managed-table-writes-enabled", "true").put("hive.metastore-cache-ttl", "1d").put("hive.metastore-refresh-interval", "1d").buildOrThrow());
}
Also used : MetastoreConfig(io.trino.plugin.hive.metastore.MetastoreConfig) ThriftMetastoreConfig(io.trino.plugin.hive.metastore.thrift.ThriftMetastoreConfig) TestingMetastoreLocator(io.trino.plugin.hive.metastore.thrift.TestingMetastoreLocator) ThriftHiveMetastore(io.trino.plugin.hive.metastore.thrift.ThriftHiveMetastore) HiveMinioDataLake(io.trino.plugin.hive.containers.HiveMinioDataLake) ThriftMetastoreConfig(io.trino.plugin.hive.metastore.thrift.ThriftMetastoreConfig) NoHdfsAuthentication(io.trino.plugin.hive.authentication.NoHdfsAuthentication) BridgingHiveMetastore(io.trino.plugin.hive.metastore.thrift.BridgingHiveMetastore)

Example 3 with ThriftMetastoreConfig

use of io.trino.plugin.hive.metastore.thrift.ThriftMetastoreConfig in project trino by trinodb.

the class TestHiveInMemoryMetastore method createMetastore.

@Override
protected HiveMetastore createMetastore(File tempDir, HiveIdentity identity) {
    File baseDir = new File(tempDir, "metastore");
    ThriftMetastoreConfig metastoreConfig = new ThriftMetastoreConfig();
    InMemoryThriftMetastore hiveMetastore = new InMemoryThriftMetastore(baseDir, metastoreConfig);
    return new BridgingHiveMetastore(hiveMetastore, identity);
}
Also used : InMemoryThriftMetastore(io.trino.plugin.hive.metastore.thrift.InMemoryThriftMetastore) ThriftMetastoreConfig(io.trino.plugin.hive.metastore.thrift.ThriftMetastoreConfig) File(java.io.File) BridgingHiveMetastore(io.trino.plugin.hive.metastore.thrift.BridgingHiveMetastore)

Example 4 with ThriftMetastoreConfig

use of io.trino.plugin.hive.metastore.thrift.ThriftMetastoreConfig in project trino by trinodb.

the class AbstractTestHiveFileSystem method setup.

protected void setup(String host, int port, String databaseName, boolean s3SelectPushdownEnabled, HdfsConfiguration hdfsConfiguration) {
    database = databaseName;
    table = new SchemaTableName(database, "trino_test_external_fs");
    tableWithHeader = new SchemaTableName(database, "trino_test_external_fs_with_header");
    tableWithHeaderAndFooter = new SchemaTableName(database, "trino_test_external_fs_with_header_and_footer");
    String random = randomUUID().toString().toLowerCase(ENGLISH).replace("-", "");
    temporaryCreateTable = new SchemaTableName(database, "tmp_trino_test_create_" + random);
    config = new HiveConfig().setS3SelectPushdownEnabled(s3SelectPushdownEnabled);
    Optional<HostAndPort> proxy = Optional.ofNullable(System.getProperty("hive.metastore.thrift.client.socks-proxy")).map(HostAndPort::fromString);
    MetastoreLocator metastoreLocator = new TestingMetastoreLocator(proxy, HostAndPort.fromParts(host, port));
    HivePartitionManager hivePartitionManager = new HivePartitionManager(config);
    hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, new HdfsConfig(), new NoHdfsAuthentication());
    MetastoreConfig metastoreConfig = new MetastoreConfig();
    metastoreClient = new TestingHiveMetastore(new BridgingHiveMetastore(new ThriftHiveMetastore(metastoreLocator, new HiveConfig(), metastoreConfig, new ThriftMetastoreConfig(), hdfsEnvironment, false), new HiveIdentity(getHiveSession(config).getIdentity())), getBasePath(), hdfsEnvironment);
    locationService = new HiveLocationService(hdfsEnvironment);
    JsonCodec<PartitionUpdate> partitionUpdateCodec = JsonCodec.jsonCodec(PartitionUpdate.class);
    metadataFactory = new HiveMetadataFactory(new CatalogName("hive"), config, metastoreConfig, HiveMetastoreFactory.ofInstance(metastoreClient), hdfsEnvironment, hivePartitionManager, newDirectExecutorService(), heartbeatService, TESTING_TYPE_MANAGER, NOOP_METADATA_PROVIDER, locationService, partitionUpdateCodec, new NodeVersion("test_version"), new NoneHiveRedirectionsProvider(), ImmutableSet.of(new PartitionsSystemTableProvider(hivePartitionManager, TESTING_TYPE_MANAGER), new PropertiesSystemTableProvider()), new DefaultHiveMaterializedViewMetadataFactory(), SqlStandardAccessControlMetadata::new, NO_REDIRECTIONS, TableInvalidationCallback.NOOP);
    transactionManager = new HiveTransactionManager(metadataFactory);
    splitManager = new HiveSplitManager(transactionManager, hivePartitionManager, new NamenodeStats(), hdfsEnvironment, new CachingDirectoryLister(new HiveConfig()), new BoundedExecutor(executor, config.getMaxSplitIteratorThreads()), new CounterStat(), config.getMaxOutstandingSplits(), config.getMaxOutstandingSplitsSize(), config.getMinPartitionBatchSize(), config.getMaxPartitionBatchSize(), config.getMaxInitialSplits(), config.getSplitLoaderConcurrency(), config.getMaxSplitsPerSecond(), config.getRecursiveDirWalkerEnabled(), TESTING_TYPE_MANAGER);
    TypeOperators typeOperators = new TypeOperators();
    BlockTypeOperators blockTypeOperators = new BlockTypeOperators(typeOperators);
    pageSinkProvider = new HivePageSinkProvider(getDefaultHiveFileWriterFactories(config, hdfsEnvironment), hdfsEnvironment, PAGE_SORTER, HiveMetastoreFactory.ofInstance(metastoreClient), new GroupByHashPageIndexerFactory(new JoinCompiler(typeOperators), blockTypeOperators), TESTING_TYPE_MANAGER, config, locationService, partitionUpdateCodec, new TestingNodeManager("fake-environment"), new HiveEventClient(), getHiveSessionProperties(config), new HiveWriterStats());
    pageSourceProvider = new HivePageSourceProvider(TESTING_TYPE_MANAGER, hdfsEnvironment, config, getDefaultHivePageSourceFactories(hdfsEnvironment, config), getDefaultHiveRecordCursorProviders(config, hdfsEnvironment), new GenericHiveRecordCursorProvider(hdfsEnvironment, config), Optional.empty());
    onSetupComplete();
}
Also used : TestingMetastoreLocator(io.trino.plugin.hive.metastore.thrift.TestingMetastoreLocator) CounterStat(io.airlift.stats.CounterStat) MetastoreLocator(io.trino.plugin.hive.metastore.thrift.MetastoreLocator) TestingMetastoreLocator(io.trino.plugin.hive.metastore.thrift.TestingMetastoreLocator) ThriftMetastoreConfig(io.trino.plugin.hive.metastore.thrift.ThriftMetastoreConfig) NoHdfsAuthentication(io.trino.plugin.hive.authentication.NoHdfsAuthentication) HiveIdentity(io.trino.plugin.hive.authentication.HiveIdentity) HostAndPort(com.google.common.net.HostAndPort) BlockTypeOperators(io.trino.type.BlockTypeOperators) TestingNodeManager(io.trino.testing.TestingNodeManager) BridgingHiveMetastore(io.trino.plugin.hive.metastore.thrift.BridgingHiveMetastore) TypeOperators(io.trino.spi.type.TypeOperators) BlockTypeOperators(io.trino.type.BlockTypeOperators) JoinCompiler(io.trino.sql.gen.JoinCompiler) ThriftMetastoreConfig(io.trino.plugin.hive.metastore.thrift.ThriftMetastoreConfig) MetastoreConfig(io.trino.plugin.hive.metastore.MetastoreConfig) ThriftHiveMetastore(io.trino.plugin.hive.metastore.thrift.ThriftHiveMetastore) SchemaTableName(io.trino.spi.connector.SchemaTableName) BoundedExecutor(io.airlift.concurrent.BoundedExecutor) CatalogName(io.trino.plugin.base.CatalogName) GroupByHashPageIndexerFactory(io.trino.operator.GroupByHashPageIndexerFactory)

Aggregations

BridgingHiveMetastore (io.trino.plugin.hive.metastore.thrift.BridgingHiveMetastore)4 ThriftMetastoreConfig (io.trino.plugin.hive.metastore.thrift.ThriftMetastoreConfig)4 NoHdfsAuthentication (io.trino.plugin.hive.authentication.NoHdfsAuthentication)3 MetastoreConfig (io.trino.plugin.hive.metastore.MetastoreConfig)3 TestingMetastoreLocator (io.trino.plugin.hive.metastore.thrift.TestingMetastoreLocator)3 ThriftHiveMetastore (io.trino.plugin.hive.metastore.thrift.ThriftHiveMetastore)3 HostAndPort (com.google.common.net.HostAndPort)2 HiveIdentity (io.trino.plugin.hive.authentication.HiveIdentity)2 MetastoreLocator (io.trino.plugin.hive.metastore.thrift.MetastoreLocator)2 BoundedExecutor (io.airlift.concurrent.BoundedExecutor)1 CounterStat (io.airlift.stats.CounterStat)1 Duration (io.airlift.units.Duration)1 GroupByHashPageIndexerFactory (io.trino.operator.GroupByHashPageIndexerFactory)1 CatalogName (io.trino.plugin.base.CatalogName)1 HiveMinioDataLake (io.trino.plugin.hive.containers.HiveMinioDataLake)1 HiveMetastore (io.trino.plugin.hive.metastore.HiveMetastore)1 SemiTransactionalHiveMetastore (io.trino.plugin.hive.metastore.SemiTransactionalHiveMetastore)1 CachingHiveMetastore.cachingHiveMetastore (io.trino.plugin.hive.metastore.cache.CachingHiveMetastore.cachingHiveMetastore)1 InMemoryThriftMetastore (io.trino.plugin.hive.metastore.thrift.InMemoryThriftMetastore)1 SchemaTableName (io.trino.spi.connector.SchemaTableName)1