use of io.trino.plugin.hive.metastore.HiveMetastore in project trino by trinodb.
the class TestIcebergOrcMetricsCollection method createQueryRunner.
@Override
protected QueryRunner createQueryRunner() throws Exception {
Session session = testSessionBuilder().setCatalog("iceberg").setSchema("test_schema").setSystemProperty(TASK_CONCURRENCY, "1").setSystemProperty(TASK_WRITER_COUNT, "1").setSystemProperty(MAX_DRIVERS_PER_TASK, "1").setCatalogSessionProperty("iceberg", "orc_string_statistics_limit", Integer.MAX_VALUE + "B").build();
DistributedQueryRunner queryRunner = DistributedQueryRunner.builder(session).setNodeCount(1).build();
File baseDir = queryRunner.getCoordinator().getBaseDataDir().resolve("iceberg_data").toFile();
HdfsConfig hdfsConfig = new HdfsConfig();
HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hdfsConfig), ImmutableSet.of());
HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hdfsConfig, new NoHdfsAuthentication());
HiveMetastore metastore = new FileHiveMetastore(new NodeVersion("test_version"), hdfsEnvironment, new MetastoreConfig(), new FileHiveMetastoreConfig().setCatalogDirectory(baseDir.toURI().toString()).setMetastoreUser("test"));
tableOperationsProvider = new FileMetastoreTableOperationsProvider(new HdfsFileIoProvider(hdfsEnvironment));
trinoCatalog = new TrinoHiveCatalog(new CatalogName("catalog"), memoizeMetastore(metastore, 1000), hdfsEnvironment, new TestingTypeManager(), tableOperationsProvider, "trino-version", false, false, false);
queryRunner.installPlugin(new TestingIcebergPlugin(Optional.of(metastore), Optional.empty(), EMPTY_MODULE));
queryRunner.createCatalog("iceberg", "iceberg");
queryRunner.installPlugin(new TpchPlugin());
queryRunner.createCatalog("tpch", "tpch");
queryRunner.execute("CREATE SCHEMA test_schema");
return queryRunner;
}
use of io.trino.plugin.hive.metastore.HiveMetastore in project trino by trinodb.
the class TestIcebergProjectionPushdownPlans method createLocalQueryRunner.
@Override
protected LocalQueryRunner createLocalQueryRunner() {
Session session = testSessionBuilder().setCatalog(CATALOG).setSchema(SCHEMA).build();
metastoreDir = Files.createTempDir();
HiveMetastore metastore = createTestingFileHiveMetastore(metastoreDir);
LocalQueryRunner queryRunner = LocalQueryRunner.create(session);
queryRunner.createCatalog(CATALOG, new TestingIcebergConnectorFactory(Optional.of(metastore), Optional.empty(), EMPTY_MODULE), ImmutableMap.of());
Database database = Database.builder().setDatabaseName(SCHEMA).setOwnerName(Optional.of("public")).setOwnerType(Optional.of(PrincipalType.ROLE)).build();
metastore.createDatabase(database);
return queryRunner;
}
use of io.trino.plugin.hive.metastore.HiveMetastore in project trino by trinodb.
the class TestIcebergMergeAppend method createQueryRunner.
@Override
protected QueryRunner createQueryRunner() throws Exception {
DistributedQueryRunner queryRunner = IcebergQueryRunner.createIcebergQueryRunner();
HdfsConfig hdfsConfig = new HdfsConfig();
HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hdfsConfig), ImmutableSet.of());
HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hdfsConfig, new NoHdfsAuthentication());
File baseDir = queryRunner.getCoordinator().getBaseDataDir().resolve("iceberg_data").toFile();
HiveMetastore metastore = new FileHiveMetastore(new NodeVersion("testversion"), hdfsEnvironment, new MetastoreConfig(), new FileHiveMetastoreConfig().setCatalogDirectory(baseDir.toURI().toString()).setMetastoreUser("test"));
tableOperationsProvider = new FileMetastoreTableOperationsProvider(new HdfsFileIoProvider(hdfsEnvironment));
trinoCatalog = new TrinoHiveCatalog(new CatalogName("catalog"), memoizeMetastore(metastore, 1000), hdfsEnvironment, new TestingTypeManager(), tableOperationsProvider, "trino-version", false, false, false);
return queryRunner;
}
use of io.trino.plugin.hive.metastore.HiveMetastore in project trino by trinodb.
the class TestIcebergMetastoreAccessOperations method createQueryRunner.
@Override
protected DistributedQueryRunner createQueryRunner() throws Exception {
DistributedQueryRunner queryRunner = DistributedQueryRunner.builder(TEST_SESSION).build();
File baseDir = queryRunner.getCoordinator().getBaseDataDir().resolve("iceberg_data").toFile();
HdfsConfig hdfsConfig = new HdfsConfig();
HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hdfsConfig), ImmutableSet.of());
HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hdfsConfig, new NoHdfsAuthentication());
HiveMetastore hiveMetastore = new FileHiveMetastore(new NodeVersion("testversion"), hdfsEnvironment, new MetastoreConfig(), new FileHiveMetastoreConfig().setCatalogDirectory(baseDir.toURI().toString()).setMetastoreUser("test"));
metastore = new CountingAccessFileHiveMetastore(hiveMetastore);
queryRunner.installPlugin(new TestingIcebergPlugin(Optional.of(metastore), Optional.empty(), EMPTY_MODULE));
queryRunner.createCatalog("iceberg", "iceberg");
queryRunner.execute("CREATE SCHEMA test_schema");
return queryRunner;
}
use of io.trino.plugin.hive.metastore.HiveMetastore in project trino by trinodb.
the class TestDeltaLakePerTransactionMetastoreCache method createQueryRunner.
private DistributedQueryRunner createQueryRunner(boolean enablePerTransactionHiveMetastoreCaching) throws Exception {
boolean createdDeltaLake = false;
if (dockerizedMinioDataLake == null) {
// share environment between testcases to speed things up
dockerizedMinioDataLake = createDockerizedMinioDataLakeForDeltaLake(BUCKET_NAME);
createdDeltaLake = true;
}
Session session = testSessionBuilder().setCatalog(DELTA_CATALOG).setSchema("default").build();
DistributedQueryRunner queryRunner = DistributedQueryRunner.builder(session).build();
queryRunner.installPlugin(new Plugin() {
@Override
public Iterable<ConnectorFactory> getConnectorFactories() {
return ImmutableList.of(new ConnectorFactory() {
@Override
public String getName() {
return TEST_DELTA_CONNECTOR_NAME;
}
@Override
public Connector create(String catalogName, Map<String, String> config, ConnectorContext context) {
return InternalDeltaLakeConnectorFactory.createConnector(catalogName, config, context, new AbstractConfigurationAwareModule() {
@Override
protected void setup(Binder binder) {
newOptionalBinder(binder, ThriftMetastoreClientFactory.class).setDefault().to(DefaultThriftMetastoreClientFactory.class).in(Scopes.SINGLETON);
binder.bind(MetastoreLocator.class).to(StaticMetastoreLocator.class).in(Scopes.SINGLETON);
configBinder(binder).bindConfig(StaticMetastoreConfig.class);
configBinder(binder).bindConfig(ThriftMetastoreConfig.class);
binder.bind(ThriftMetastore.class).to(ThriftHiveMetastore.class).in(Scopes.SINGLETON);
newExporter(binder).export(ThriftMetastore.class).as((generator) -> generator.generatedNameOf(ThriftHiveMetastore.class));
install(new ThriftMetastoreAuthenticationModule());
binder.bind(Boolean.class).annotatedWith(HideNonDeltaLakeTables.class).toInstance(false);
binder.bind(BridgingHiveMetastoreFactory.class).in(Scopes.SINGLETON);
}
@Provides
@Singleton
@RawHiveMetastoreFactory
public HiveMetastoreFactory getCountingHiveMetastoreFactory(BridgingHiveMetastoreFactory bridgingHiveMetastoreFactory) {
return new HiveMetastoreFactory() {
@Override
public boolean isImpersonationEnabled() {
return false;
}
@Override
public HiveMetastore createMetastore(Optional<ConnectorIdentity> identity) {
HiveMetastore bridgingHiveMetastore = bridgingHiveMetastoreFactory.createMetastore(identity);
// bind HiveMetastore which counts method executions
return Reflection.newProxy(HiveMetastore.class, (proxy, method, args) -> {
String methodName = method.getName();
long count = hiveMetastoreInvocationCounts.getOrDefault(methodName, 0L);
hiveMetastoreInvocationCounts.put(methodName, count + 1);
return method.invoke(bridgingHiveMetastore, args);
});
}
};
}
});
}
});
}
});
ImmutableMap.Builder<String, String> deltaLakeProperties = ImmutableMap.builder();
deltaLakeProperties.put("hive.metastore.uri", dockerizedMinioDataLake.getTestingHadoop().getMetastoreAddress());
deltaLakeProperties.put("hive.s3.aws-access-key", MINIO_ACCESS_KEY);
deltaLakeProperties.put("hive.s3.aws-secret-key", MINIO_SECRET_KEY);
deltaLakeProperties.put("hive.s3.endpoint", dockerizedMinioDataLake.getMinioAddress());
deltaLakeProperties.put("hive.s3.path-style-access", "true");
// use test value so we do not get clash with default bindings)
deltaLakeProperties.put("hive.metastore", "test");
if (!enablePerTransactionHiveMetastoreCaching) {
// almost disable the cache; 0 is not allowed as config property value
deltaLakeProperties.put("hive.per-transaction-metastore-cache-maximum-size", "1");
}
queryRunner.createCatalog(DELTA_CATALOG, TEST_DELTA_CONNECTOR_NAME, deltaLakeProperties.buildOrThrow());
if (createdDeltaLake) {
List<TpchTable<? extends TpchEntity>> tpchTables = List.of(TpchTable.NATION, TpchTable.REGION);
tpchTables.forEach(table -> {
String tableName = table.getTableName();
dockerizedMinioDataLake.copyResources("io/trino/plugin/deltalake/testing/resources/databricks/" + tableName, tableName);
queryRunner.execute(format("CREATE TABLE %s.%s.%s (dummy int) WITH (location = 's3://%s/%3$s')", DELTA_CATALOG, "default", tableName, BUCKET_NAME));
});
}
return queryRunner;
}
Aggregations