use of io.trino.plugin.hive.HiveHdfsConfiguration in project trino by trinodb.
the class AbstractTestDeltaLakeCreateTableStatistics method getAddFileEntries.
protected List<AddFileEntry> getAddFileEntries(String tableName) throws IOException {
TestingConnectorContext context = new TestingConnectorContext();
HdfsConfig hdfsConfig = new HdfsConfig();
HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hdfsConfig), ImmutableSet.of());
HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hdfsConfig, new NoHdfsAuthentication());
TransactionLogAccess transactionLogAccess = new TransactionLogAccess(context.getTypeManager(), new CheckpointSchemaManager(context.getTypeManager()), new DeltaLakeConfig(), new FileFormatDataSourceStats(), hdfsEnvironment, new ParquetReaderConfig(), new DeltaLakeConfig());
return transactionLogAccess.getActiveFiles(transactionLogAccess.loadSnapshot(new SchemaTableName(SCHEMA, tableName), new Path(format("s3://%s/%s", bucketName, tableName)), SESSION), SESSION);
}
use of io.trino.plugin.hive.HiveHdfsConfiguration in project trino by trinodb.
the class TestRubixCaching method getCachingFileSystem.
private FileSystem getCachingFileSystem(HdfsContext context, Path path) throws IOException {
HdfsConfigurationInitializer configurationInitializer = new HdfsConfigurationInitializer(config, ImmutableSet.of());
HiveHdfsConfiguration configuration = new HiveHdfsConfiguration(configurationInitializer, ImmutableSet.of(rubixConfigInitializer, (dynamicConfig, ignoredContext, ignoredUri) -> {
dynamicConfig.set("fs.file.impl", CachingLocalFileSystem.class.getName());
dynamicConfig.setBoolean("fs.gs.lazy.init.enable", true);
dynamicConfig.set("fs.azure.account.key", "Zm9vCg==");
dynamicConfig.set("fs.adl.oauth2.client.id", "test");
dynamicConfig.set("fs.adl.oauth2.refresh.url", "http://localhost");
dynamicConfig.set("fs.adl.oauth2.credential", "password");
}));
HdfsEnvironment environment = new HdfsEnvironment(configuration, config, new NoHdfsAuthentication());
return environment.getFileSystem(context, path);
}
use of io.trino.plugin.hive.HiveHdfsConfiguration in project trino by trinodb.
the class TestIcebergTableWithExternalLocation method createQueryRunner.
@Override
protected DistributedQueryRunner createQueryRunner() throws Exception {
metastoreDir = Files.createTempDirectory("test_iceberg").toFile();
HdfsConfig hdfsConfig = new HdfsConfig();
HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hdfsConfig), ImmutableSet.of());
hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hdfsConfig, new NoHdfsAuthentication());
FileHiveMetastoreConfig config = new FileHiveMetastoreConfig().setCatalogDirectory(metastoreDir.toURI().toString()).setMetastoreUser("test");
hdfsContext = new HdfsContext(ConnectorIdentity.ofUser(config.getMetastoreUser()));
metastore = new FileHiveMetastore(new NodeVersion("testversion"), hdfsEnvironment, new MetastoreConfig(), config);
return createIcebergQueryRunner(ImmutableMap.of(), ImmutableMap.of(), ImmutableList.of(), Optional.of(metastoreDir));
}
use of io.trino.plugin.hive.HiveHdfsConfiguration in project trino by trinodb.
the class TestIcebergV2 method createQueryRunner.
@Override
protected QueryRunner createQueryRunner() throws Exception {
HdfsConfig config = new HdfsConfig();
HdfsConfiguration configuration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(config), ImmutableSet.of());
hdfsEnvironment = new HdfsEnvironment(configuration, config, new NoHdfsAuthentication());
tempDir = Files.createTempDirectory("test_iceberg_v2");
metastoreDir = tempDir.resolve("iceberg_data").toFile();
metastore = createTestingFileHiveMetastore(metastoreDir);
return createIcebergQueryRunner(ImmutableMap.of(), ImmutableMap.of(), ImmutableList.of(NATION), Optional.of(metastoreDir));
}
use of io.trino.plugin.hive.HiveHdfsConfiguration in project trino by trinodb.
the class TestIcebergOrcMetricsCollection method createQueryRunner.
@Override
protected QueryRunner createQueryRunner() throws Exception {
Session session = testSessionBuilder().setCatalog("iceberg").setSchema("test_schema").setSystemProperty(TASK_CONCURRENCY, "1").setSystemProperty(TASK_WRITER_COUNT, "1").setSystemProperty(MAX_DRIVERS_PER_TASK, "1").setCatalogSessionProperty("iceberg", "orc_string_statistics_limit", Integer.MAX_VALUE + "B").build();
DistributedQueryRunner queryRunner = DistributedQueryRunner.builder(session).setNodeCount(1).build();
File baseDir = queryRunner.getCoordinator().getBaseDataDir().resolve("iceberg_data").toFile();
HdfsConfig hdfsConfig = new HdfsConfig();
HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hdfsConfig), ImmutableSet.of());
HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hdfsConfig, new NoHdfsAuthentication());
HiveMetastore metastore = new FileHiveMetastore(new NodeVersion("test_version"), hdfsEnvironment, new MetastoreConfig(), new FileHiveMetastoreConfig().setCatalogDirectory(baseDir.toURI().toString()).setMetastoreUser("test"));
tableOperationsProvider = new FileMetastoreTableOperationsProvider(new HdfsFileIoProvider(hdfsEnvironment));
trinoCatalog = new TrinoHiveCatalog(new CatalogName("catalog"), memoizeMetastore(metastore, 1000), hdfsEnvironment, new TestingTypeManager(), tableOperationsProvider, "trino-version", false, false, false);
queryRunner.installPlugin(new TestingIcebergPlugin(Optional.of(metastore), Optional.empty(), EMPTY_MODULE));
queryRunner.createCatalog("iceberg", "iceberg");
queryRunner.installPlugin(new TpchPlugin());
queryRunner.createCatalog("tpch", "tpch");
queryRunner.execute("CREATE SCHEMA test_schema");
return queryRunner;
}
Aggregations