use of io.trino.plugin.hive.metastore.thrift.ThriftHiveMetastore in project trino by trinodb.
the class AbstractTestHive method setup.
protected final void setup(String host, int port, String databaseName, String timeZone) {
HiveConfig hiveConfig = getHiveConfig().setParquetTimeZone(timeZone).setRcfileTimeZone(timeZone);
Optional<HostAndPort> proxy = Optional.ofNullable(System.getProperty("hive.metastore.thrift.client.socks-proxy")).map(HostAndPort::fromString);
MetastoreLocator metastoreLocator = new TestingMetastoreLocator(proxy, HostAndPort.fromParts(host, port));
hdfsEnvironment = new HdfsEnvironment(createTestHdfsConfiguration(), new HdfsConfig(), new NoHdfsAuthentication());
HiveMetastore metastore = cachingHiveMetastore(new BridgingHiveMetastore(new ThriftHiveMetastore(metastoreLocator, hiveConfig, new MetastoreConfig(), new ThriftMetastoreConfig(), hdfsEnvironment, false), new HiveIdentity(SESSION.getIdentity())), executor, new Duration(1, MINUTES), Optional.of(new Duration(15, SECONDS)), 10000);
setup(databaseName, hiveConfig, metastore, hdfsEnvironment);
}
use of io.trino.plugin.hive.metastore.thrift.ThriftHiveMetastore in project trino by trinodb.
the class BaseTestHiveOnDataLake method createQueryRunner.
@Override
protected QueryRunner createQueryRunner() throws Exception {
this.bucketName = "test-hive-insert-overwrite-" + randomTableSuffix();
this.dockerizedS3DataLake = closeAfterClass(new HiveMinioDataLake(bucketName, ImmutableMap.of(), hiveHadoopImage));
this.dockerizedS3DataLake.start();
this.metastoreClient = new BridgingHiveMetastore(new ThriftHiveMetastore(new TestingMetastoreLocator(Optional.empty(), this.dockerizedS3DataLake.getHiveHadoop().getHiveMetastoreEndpoint()), new HiveConfig(), new MetastoreConfig(), new ThriftMetastoreConfig(), new HdfsEnvironment(new HiveHdfsConfiguration(new HdfsConfigurationInitializer(new HdfsConfig(), ImmutableSet.of()), ImmutableSet.of()), new HdfsConfig(), new NoHdfsAuthentication()), false), HiveIdentity.none());
return S3HiveQueryRunner.create(dockerizedS3DataLake, ImmutableMap.<String, String>builder().put("hive.insert-existing-partitions-behavior", "OVERWRITE").put("hive.non-managed-table-writes-enabled", "true").put("hive.metastore-cache-ttl", "1d").put("hive.metastore-refresh-interval", "1d").buildOrThrow());
}
use of io.trino.plugin.hive.metastore.thrift.ThriftHiveMetastore in project trino by trinodb.
the class TestCachingHiveMetastore method setUp.
@BeforeMethod
public void setUp() {
mockClient = new MockThriftMetastoreClient();
ThriftHiveMetastore thriftHiveMetastore = createThriftHiveMetastore();
executor = listeningDecorator(newCachedThreadPool(daemonThreadsNamed(getClass().getSimpleName() + "-%s")));
metastore = cachingHiveMetastore(new BridgingHiveMetastore(thriftHiveMetastore, IDENTITY), executor, new Duration(5, TimeUnit.MINUTES), Optional.of(new Duration(1, TimeUnit.MINUTES)), 1000);
stats = thriftHiveMetastore.getStats();
}
use of io.trino.plugin.hive.metastore.thrift.ThriftHiveMetastore in project trino by trinodb.
the class TestCachingHiveMetastore method testCachingHiveMetastoreCreationViaMemoize.
@Test
public void testCachingHiveMetastoreCreationViaMemoize() {
ThriftHiveMetastore thriftHiveMetastore = createThriftHiveMetastore();
metastore = memoizeMetastore(new BridgingHiveMetastore(thriftHiveMetastore, IDENTITY), 1000);
assertEquals(mockClient.getAccessCount(), 0);
assertEquals(metastore.getAllDatabases(), ImmutableList.of(TEST_DATABASE));
assertEquals(mockClient.getAccessCount(), 1);
assertEquals(metastore.getAllDatabases(), ImmutableList.of(TEST_DATABASE));
assertEquals(mockClient.getAccessCount(), 1);
assertEquals(metastore.getDatabaseNamesStats().getRequestCount(), 0);
}
use of io.trino.plugin.hive.metastore.thrift.ThriftHiveMetastore in project trino by trinodb.
the class AbstractTestHiveFileSystem method setup.
protected void setup(String host, int port, String databaseName, boolean s3SelectPushdownEnabled, HdfsConfiguration hdfsConfiguration) {
database = databaseName;
table = new SchemaTableName(database, "trino_test_external_fs");
tableWithHeader = new SchemaTableName(database, "trino_test_external_fs_with_header");
tableWithHeaderAndFooter = new SchemaTableName(database, "trino_test_external_fs_with_header_and_footer");
String random = randomUUID().toString().toLowerCase(ENGLISH).replace("-", "");
temporaryCreateTable = new SchemaTableName(database, "tmp_trino_test_create_" + random);
config = new HiveConfig().setS3SelectPushdownEnabled(s3SelectPushdownEnabled);
Optional<HostAndPort> proxy = Optional.ofNullable(System.getProperty("hive.metastore.thrift.client.socks-proxy")).map(HostAndPort::fromString);
MetastoreLocator metastoreLocator = new TestingMetastoreLocator(proxy, HostAndPort.fromParts(host, port));
HivePartitionManager hivePartitionManager = new HivePartitionManager(config);
hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, new HdfsConfig(), new NoHdfsAuthentication());
MetastoreConfig metastoreConfig = new MetastoreConfig();
metastoreClient = new TestingHiveMetastore(new BridgingHiveMetastore(new ThriftHiveMetastore(metastoreLocator, new HiveConfig(), metastoreConfig, new ThriftMetastoreConfig(), hdfsEnvironment, false), new HiveIdentity(getHiveSession(config).getIdentity())), getBasePath(), hdfsEnvironment);
locationService = new HiveLocationService(hdfsEnvironment);
JsonCodec<PartitionUpdate> partitionUpdateCodec = JsonCodec.jsonCodec(PartitionUpdate.class);
metadataFactory = new HiveMetadataFactory(new CatalogName("hive"), config, metastoreConfig, HiveMetastoreFactory.ofInstance(metastoreClient), hdfsEnvironment, hivePartitionManager, newDirectExecutorService(), heartbeatService, TESTING_TYPE_MANAGER, NOOP_METADATA_PROVIDER, locationService, partitionUpdateCodec, new NodeVersion("test_version"), new NoneHiveRedirectionsProvider(), ImmutableSet.of(new PartitionsSystemTableProvider(hivePartitionManager, TESTING_TYPE_MANAGER), new PropertiesSystemTableProvider()), new DefaultHiveMaterializedViewMetadataFactory(), SqlStandardAccessControlMetadata::new, NO_REDIRECTIONS, TableInvalidationCallback.NOOP);
transactionManager = new HiveTransactionManager(metadataFactory);
splitManager = new HiveSplitManager(transactionManager, hivePartitionManager, new NamenodeStats(), hdfsEnvironment, new CachingDirectoryLister(new HiveConfig()), new BoundedExecutor(executor, config.getMaxSplitIteratorThreads()), new CounterStat(), config.getMaxOutstandingSplits(), config.getMaxOutstandingSplitsSize(), config.getMinPartitionBatchSize(), config.getMaxPartitionBatchSize(), config.getMaxInitialSplits(), config.getSplitLoaderConcurrency(), config.getMaxSplitsPerSecond(), config.getRecursiveDirWalkerEnabled(), TESTING_TYPE_MANAGER);
TypeOperators typeOperators = new TypeOperators();
BlockTypeOperators blockTypeOperators = new BlockTypeOperators(typeOperators);
pageSinkProvider = new HivePageSinkProvider(getDefaultHiveFileWriterFactories(config, hdfsEnvironment), hdfsEnvironment, PAGE_SORTER, HiveMetastoreFactory.ofInstance(metastoreClient), new GroupByHashPageIndexerFactory(new JoinCompiler(typeOperators), blockTypeOperators), TESTING_TYPE_MANAGER, config, locationService, partitionUpdateCodec, new TestingNodeManager("fake-environment"), new HiveEventClient(), getHiveSessionProperties(config), new HiveWriterStats());
pageSourceProvider = new HivePageSourceProvider(TESTING_TYPE_MANAGER, hdfsEnvironment, config, getDefaultHivePageSourceFactories(hdfsEnvironment, config), getDefaultHiveRecordCursorProviders(config, hdfsEnvironment), new GenericHiveRecordCursorProvider(hdfsEnvironment, config), Optional.empty());
onSetupComplete();
}
Aggregations