use of io.trino.plugin.hive.HdfsConfig in project trino by trinodb.
the class TestIcebergMetadataListing method createQueryRunner.
@Override
protected DistributedQueryRunner createQueryRunner() throws Exception {
Session session = testSessionBuilder().setIdentity(Identity.forUser("hive").withConnectorRole("hive", new SelectedRole(ROLE, Optional.of("admin"))).build()).build();
DistributedQueryRunner queryRunner = DistributedQueryRunner.builder(session).build();
File baseDir = queryRunner.getCoordinator().getBaseDataDir().resolve("iceberg_data").toFile();
HdfsConfig hdfsConfig = new HdfsConfig();
HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hdfsConfig), ImmutableSet.of());
HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hdfsConfig, new NoHdfsAuthentication());
metastore = new FileHiveMetastore(new NodeVersion("test_version"), hdfsEnvironment, new MetastoreConfig(), new FileHiveMetastoreConfig().setCatalogDirectory(baseDir.toURI().toString()).setMetastoreUser("test"));
queryRunner.installPlugin(new TestingIcebergPlugin(Optional.of(metastore), Optional.empty(), EMPTY_MODULE));
queryRunner.createCatalog("iceberg", "iceberg");
queryRunner.installPlugin(new TestingHivePlugin(metastore));
queryRunner.createCatalog("hive", "hive", ImmutableMap.of("hive.security", "sql-standard"));
return queryRunner;
}
use of io.trino.plugin.hive.HdfsConfig in project trino by trinodb.
the class TestIcebergMetastoreAccessOperations method createQueryRunner.
@Override
protected DistributedQueryRunner createQueryRunner() throws Exception {
DistributedQueryRunner queryRunner = DistributedQueryRunner.builder(TEST_SESSION).build();
File baseDir = queryRunner.getCoordinator().getBaseDataDir().resolve("iceberg_data").toFile();
HdfsConfig hdfsConfig = new HdfsConfig();
HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hdfsConfig), ImmutableSet.of());
HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hdfsConfig, new NoHdfsAuthentication());
HiveMetastore hiveMetastore = new FileHiveMetastore(new NodeVersion("testversion"), hdfsEnvironment, new MetastoreConfig(), new FileHiveMetastoreConfig().setCatalogDirectory(baseDir.toURI().toString()).setMetastoreUser("test"));
metastore = new CountingAccessFileHiveMetastore(hiveMetastore);
queryRunner.installPlugin(new TestingIcebergPlugin(Optional.of(metastore), Optional.empty(), EMPTY_MODULE));
queryRunner.createCatalog("iceberg", "iceberg");
queryRunner.execute("CREATE SCHEMA test_schema");
return queryRunner;
}
use of io.trino.plugin.hive.HdfsConfig in project trino by trinodb.
the class TestTrinoGlueCatalogTest method testDefaultLocation.
@Test
public void testDefaultLocation() throws IOException {
Path tmpDirectory = Files.createTempDirectory("test_glue_catalog_default_location_");
tmpDirectory.toFile().deleteOnExit();
HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(new HiveHdfsConfiguration(new HdfsConfigurationInitializer(new HdfsConfig(), ImmutableSet.of()), ImmutableSet.of()), new HdfsConfig(), new NoHdfsAuthentication());
TrinoCatalog catalogWithDefaultLocation = new TrinoGlueCatalog(hdfsEnvironment, new GlueIcebergTableOperationsProvider(new HdfsFileIoProvider(hdfsEnvironment), new GlueHiveMetastoreConfig()), AWSGlueAsyncClientBuilder.defaultClient(), new GlueMetastoreStats(), Optional.of(tmpDirectory.toAbsolutePath().toString()), false);
String namespace = "test_default_location_" + randomTableSuffix();
String table = "tableName";
SchemaTableName schemaTableName = new SchemaTableName(namespace, table);
catalogWithDefaultLocation.createNamespace(SESSION, namespace, ImmutableMap.of(), new TrinoPrincipal(PrincipalType.USER, SESSION.getUser()));
try {
File expectedSchemaDirectory = new File(tmpDirectory.toFile(), namespace + ".db");
File expectedTableDirectory = new File(expectedSchemaDirectory, schemaTableName.getTableName());
assertEquals(catalogWithDefaultLocation.defaultTableLocation(SESSION, schemaTableName), expectedTableDirectory.toPath().toAbsolutePath().toString());
} finally {
try {
catalogWithDefaultLocation.dropNamespace(SESSION, namespace);
} catch (Exception e) {
LOG.warn("Failed to clean up namespace: %s", namespace);
}
}
}
use of io.trino.plugin.hive.HdfsConfig in project trino by trinodb.
the class FileHiveMetastore method createTestingFileHiveMetastore.
@VisibleForTesting
public static FileHiveMetastore createTestingFileHiveMetastore(File catalogDirectory) {
HdfsConfig hdfsConfig = new HdfsConfig();
HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hdfsConfig), ImmutableSet.of());
HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hdfsConfig, new NoHdfsAuthentication());
return new FileHiveMetastore(new NodeVersion("testversion"), hdfsEnvironment, new MetastoreConfig(), new FileHiveMetastoreConfig().setCatalogDirectory(catalogDirectory.toURI().toString()).setMetastoreUser("test"));
}
use of io.trino.plugin.hive.HdfsConfig in project trino by trinodb.
the class TestTableSnapshot method setUp.
@BeforeMethod
public void setUp() throws IOException, URISyntaxException {
checkpointSchemaManager = new CheckpointSchemaManager(typeManager);
URI deltaLogPath = getClass().getClassLoader().getResource("databricks/person").toURI();
tableLocation = new Path(deltaLogPath);
Configuration conf = new Configuration(false);
FileSystem filesystem = tableLocation.getFileSystem(conf);
accessTrackingFileSystem = new AccessTrackingFileSystem(filesystem);
HdfsConfig hdfsConfig = new HdfsConfig();
HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hdfsConfig), ImmutableSet.of());
hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hdfsConfig, new NoHdfsAuthentication());
}
Aggregations