use of com.facebook.presto.hive.metastore.thrift.BridgingHiveMetastore in project presto by prestodb.
the class AbstractTestHiveClient method setup.
protected final void setup(String host, int port, String databaseName, String timeZone) {
HiveClientConfig hiveClientConfig = getHiveClientConfig();
CacheConfig cacheConfig = getCacheConfig();
MetastoreClientConfig metastoreClientConfig = getMetastoreClientConfig();
hiveClientConfig.setTimeZone(timeZone);
String proxy = System.getProperty("hive.metastore.thrift.client.socks-proxy");
if (proxy != null) {
metastoreClientConfig.setMetastoreSocksProxy(HostAndPort.fromString(proxy));
}
HiveCluster hiveCluster = new TestingHiveCluster(metastoreClientConfig, host, port);
ExtendedHiveMetastore metastore = new CachingHiveMetastore(new BridgingHiveMetastore(new ThriftHiveMetastore(hiveCluster, metastoreClientConfig), new HivePartitionMutator()), executor, false, Duration.valueOf("1m"), Duration.valueOf("15s"), 10000, false, MetastoreCacheScope.ALL, 0.0);
setup(databaseName, hiveClientConfig, cacheConfig, metastoreClientConfig, metastore);
}
use of com.facebook.presto.hive.metastore.thrift.BridgingHiveMetastore in project presto by prestodb.
the class TestHiveClientInMemoryMetastoreWithFilterPushdown method createMetastore.
@Override
protected ExtendedHiveMetastore createMetastore(File tempDir) {
File baseDir = new File(tempDir, "metastore");
InMemoryHiveMetastore hiveMetastore = new InMemoryHiveMetastore(baseDir);
return new BridgingHiveMetastore(hiveMetastore, new HivePartitionMutator());
}
use of com.facebook.presto.hive.metastore.thrift.BridgingHiveMetastore in project presto by prestodb.
the class TestCachingHiveMetastore method testCachingWithPartitionVersioning.
@Test
public void testCachingWithPartitionVersioning() {
MockHiveMetastoreClient mockClient = new MockHiveMetastoreClient();
MockHiveCluster mockHiveCluster = new MockHiveCluster(mockClient);
ListeningExecutorService executor = listeningDecorator(newCachedThreadPool(daemonThreadsNamed("partition-versioning-test-%s")));
MockHiveMetastore mockHiveMetastore = new MockHiveMetastore(mockHiveCluster);
PartitionMutator mockPartitionMutator = new MockPartitionMutator(identity());
ColumnConverter hiveColumnConverter = new HiveColumnConverter();
CachingHiveMetastore partitionCachingEnabledmetastore = new CachingHiveMetastore(new BridgingHiveMetastore(mockHiveMetastore, mockPartitionMutator), executor, false, new Duration(5, TimeUnit.MINUTES), new Duration(1, TimeUnit.MINUTES), 1000, true, MetastoreCacheScope.PARTITION, 0.0);
assertEquals(mockClient.getAccessCount(), 0);
assertEquals(partitionCachingEnabledmetastore.getPartitionNamesByFilter(TEST_METASTORE_CONTEXT, TEST_DATABASE, TEST_TABLE, ImmutableMap.of()), EXPECTED_PARTITIONS);
assertEquals(mockClient.getAccessCount(), 1);
assertEquals(partitionCachingEnabledmetastore.getPartitionNamesByFilter(TEST_METASTORE_CONTEXT, TEST_DATABASE, TEST_TABLE, ImmutableMap.of()), EXPECTED_PARTITIONS);
// Assert that we did not hit cache
assertEquals(mockClient.getAccessCount(), 2);
// Select all of the available partitions and load them into the cache
assertEquals(partitionCachingEnabledmetastore.getPartitionsByNames(TEST_METASTORE_CONTEXT, TEST_DATABASE, TEST_TABLE, ImmutableList.of(TEST_PARTITION1, TEST_PARTITION2)).size(), 2);
assertEquals(mockClient.getAccessCount(), 3);
// Now if we fetch any or both of them, they should hit the cache
assertEquals(partitionCachingEnabledmetastore.getPartitionsByNames(TEST_METASTORE_CONTEXT, TEST_DATABASE, TEST_TABLE, ImmutableList.of(TEST_PARTITION1)).size(), 1);
assertEquals(partitionCachingEnabledmetastore.getPartitionsByNames(TEST_METASTORE_CONTEXT, TEST_DATABASE, TEST_TABLE, ImmutableList.of(TEST_PARTITION2)).size(), 1);
assertEquals(partitionCachingEnabledmetastore.getPartitionsByNames(TEST_METASTORE_CONTEXT, TEST_DATABASE, TEST_TABLE, ImmutableList.of(TEST_PARTITION1, TEST_PARTITION2)).size(), 2);
assertEquals(mockClient.getAccessCount(), 3);
// This call should NOT invalidate the partition cache because partition version is same as before
assertEquals(partitionCachingEnabledmetastore.getPartitionNamesByFilter(TEST_METASTORE_CONTEXT, TEST_DATABASE, TEST_TABLE, ImmutableMap.of()), EXPECTED_PARTITIONS);
assertEquals(mockClient.getAccessCount(), 4);
assertEquals(partitionCachingEnabledmetastore.getPartitionsByNames(TEST_METASTORE_CONTEXT, TEST_DATABASE, TEST_TABLE, ImmutableList.of(TEST_PARTITION1, TEST_PARTITION2)).size(), 2);
// Assert that its a cache hit
assertEquals(mockClient.getAccessCount(), 4);
assertInvalidateCache(new MockPartitionMutator(version -> version + 1));
assertInvalidateCache(new MockPartitionMutator(version -> version - 1));
}
use of com.facebook.presto.hive.metastore.thrift.BridgingHiveMetastore in project presto by prestodb.
the class TestHiveMetadataFileFormatEncryptionSettings method setup.
@BeforeClass
public void setup() {
baseDirectory = new File(Files.createTempDir(), "metastore");
metastore = new BridgingHiveMetastore(new InMemoryHiveMetastore(baseDirectory), new HivePartitionMutator());
executor = newCachedThreadPool(daemonThreadsNamed("hive-encryption-test-%s"));
transactionManager = new HiveTransactionManager();
metadataFactory = new HiveMetadataFactory(metastore, HDFS_ENVIRONMENT, new HivePartitionManager(FUNCTION_AND_TYPE_MANAGER, HIVE_CLIENT_CONFIG), DateTimeZone.forTimeZone(TimeZone.getTimeZone(ZoneId.of(HIVE_CLIENT_CONFIG.getTimeZone()))), true, false, false, false, true, true, HIVE_CLIENT_CONFIG.getMaxPartitionBatchSize(), HIVE_CLIENT_CONFIG.getMaxPartitionsPerScan(), false, FUNCTION_AND_TYPE_MANAGER, new HiveLocationService(HDFS_ENVIRONMENT), FUNCTION_RESOLUTION, ROW_EXPRESSION_SERVICE, FILTER_STATS_CALCULATOR_SERVICE, new TableParameterCodec(), PARTITION_UPDATE_CODEC, PARTITION_UPDATE_SMILE_CODEC, listeningDecorator(executor), new HiveTypeTranslator(), new HiveStagingFileCommitter(HDFS_ENVIRONMENT, listeningDecorator(executor)), new HiveZeroRowFileCreator(HDFS_ENVIRONMENT, new OutputStreamDataSinkFactory(), listeningDecorator(executor)), TEST_SERVER_VERSION, new HivePartitionObjectBuilder(), new HiveEncryptionInformationProvider(ImmutableList.of(new TestDwrfEncryptionInformationSource())), new HivePartitionStats(), new HiveFileRenamer(), HiveColumnConverterProvider.DEFAULT_COLUMN_CONVERTER_PROVIDER);
metastore.createDatabase(METASTORE_CONTEXT, Database.builder().setDatabaseName(TEST_DB_NAME).setOwnerName("public").setOwnerType(PrincipalType.ROLE).build());
}
use of com.facebook.presto.hive.metastore.thrift.BridgingHiveMetastore in project presto by prestodb.
the class TestingSemiTransactionalHiveMetastore method create.
public static TestingSemiTransactionalHiveMetastore create() {
// none of these values matter, as we never use them
HiveClientConfig config = new HiveClientConfig();
MetastoreClientConfig metastoreClientConfig = new MetastoreClientConfig();
HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(config, metastoreClientConfig), ImmutableSet.of());
HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, metastoreClientConfig, new NoHdfsAuthentication());
HiveCluster hiveCluster = new TestingHiveCluster(metastoreClientConfig, HOST, PORT);
ColumnConverterProvider columnConverterProvider = HiveColumnConverterProvider.DEFAULT_COLUMN_CONVERTER_PROVIDER;
ExtendedHiveMetastore delegate = new BridgingHiveMetastore(new ThriftHiveMetastore(hiveCluster, metastoreClientConfig), new HivePartitionMutator());
ExecutorService executor = newCachedThreadPool(daemonThreadsNamed("hive-%s"));
ListeningExecutorService renameExecutor = listeningDecorator(executor);
return new TestingSemiTransactionalHiveMetastore(hdfsEnvironment, delegate, renameExecutor, false, false, true, columnConverterProvider);
}
Aggregations