use of io.trino.plugin.iceberg.catalog.file.FileMetastoreTableOperationsProvider in project trino by trinodb.
the class TestIcebergSplitSource method createQueryRunner.
@Override
protected QueryRunner createQueryRunner() throws Exception {
HdfsConfig config = new HdfsConfig();
HdfsConfiguration configuration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(config), ImmutableSet.of());
HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(configuration, config, new NoHdfsAuthentication());
File tempDir = Files.createTempDirectory("test_iceberg_split_source").toFile();
this.metastoreDir = new File(tempDir, "iceberg_data");
HiveMetastore metastore = createTestingFileHiveMetastore(metastoreDir);
IcebergTableOperationsProvider operationsProvider = new FileMetastoreTableOperationsProvider(new HdfsFileIoProvider(hdfsEnvironment));
this.catalog = new TrinoHiveCatalog(new CatalogName("hive"), memoizeMetastore(metastore, 1000), hdfsEnvironment, new TestingTypeManager(), operationsProvider, "test", false, false, false);
return createIcebergQueryRunner(ImmutableMap.of(), ImmutableMap.of(), ImmutableList.of(NATION), Optional.of(metastoreDir));
}
use of io.trino.plugin.iceberg.catalog.file.FileMetastoreTableOperationsProvider in project trino by trinodb.
the class TestIcebergV2 method updateTableToV2.
private Table updateTableToV2(String tableName) {
IcebergTableOperationsProvider tableOperationsProvider = new FileMetastoreTableOperationsProvider(new HdfsFileIoProvider(hdfsEnvironment));
TrinoCatalog catalog = new TrinoHiveCatalog(new CatalogName("hive"), CachingHiveMetastore.memoizeMetastore(metastore, 1000), hdfsEnvironment, new TestingTypeManager(), tableOperationsProvider, "test", false, false, false);
BaseTable table = (BaseTable) loadIcebergTable(catalog, tableOperationsProvider, SESSION, new SchemaTableName("tpch", tableName));
TableOperations operations = table.operations();
TableMetadata currentMetadata = operations.current();
operations.commit(currentMetadata, currentMetadata.upgradeToFormatVersion(2));
return table;
}
use of io.trino.plugin.iceberg.catalog.file.FileMetastoreTableOperationsProvider in project trino by trinodb.
the class TestIcebergOrcMetricsCollection method createQueryRunner.
@Override
protected QueryRunner createQueryRunner() throws Exception {
Session session = testSessionBuilder().setCatalog("iceberg").setSchema("test_schema").setSystemProperty(TASK_CONCURRENCY, "1").setSystemProperty(TASK_WRITER_COUNT, "1").setSystemProperty(MAX_DRIVERS_PER_TASK, "1").setCatalogSessionProperty("iceberg", "orc_string_statistics_limit", Integer.MAX_VALUE + "B").build();
DistributedQueryRunner queryRunner = DistributedQueryRunner.builder(session).setNodeCount(1).build();
File baseDir = queryRunner.getCoordinator().getBaseDataDir().resolve("iceberg_data").toFile();
HdfsConfig hdfsConfig = new HdfsConfig();
HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hdfsConfig), ImmutableSet.of());
HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hdfsConfig, new NoHdfsAuthentication());
HiveMetastore metastore = new FileHiveMetastore(new NodeVersion("test_version"), hdfsEnvironment, new MetastoreConfig(), new FileHiveMetastoreConfig().setCatalogDirectory(baseDir.toURI().toString()).setMetastoreUser("test"));
tableOperationsProvider = new FileMetastoreTableOperationsProvider(new HdfsFileIoProvider(hdfsEnvironment));
trinoCatalog = new TrinoHiveCatalog(new CatalogName("catalog"), memoizeMetastore(metastore, 1000), hdfsEnvironment, new TestingTypeManager(), tableOperationsProvider, "trino-version", false, false, false);
queryRunner.installPlugin(new TestingIcebergPlugin(Optional.of(metastore), Optional.empty(), EMPTY_MODULE));
queryRunner.createCatalog("iceberg", "iceberg");
queryRunner.installPlugin(new TpchPlugin());
queryRunner.createCatalog("tpch", "tpch");
queryRunner.execute("CREATE SCHEMA test_schema");
return queryRunner;
}
use of io.trino.plugin.iceberg.catalog.file.FileMetastoreTableOperationsProvider in project trino by trinodb.
the class TestIcebergMergeAppend method createQueryRunner.
@Override
protected QueryRunner createQueryRunner() throws Exception {
DistributedQueryRunner queryRunner = IcebergQueryRunner.createIcebergQueryRunner();
HdfsConfig hdfsConfig = new HdfsConfig();
HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hdfsConfig), ImmutableSet.of());
HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hdfsConfig, new NoHdfsAuthentication());
File baseDir = queryRunner.getCoordinator().getBaseDataDir().resolve("iceberg_data").toFile();
HiveMetastore metastore = new FileHiveMetastore(new NodeVersion("testversion"), hdfsEnvironment, new MetastoreConfig(), new FileHiveMetastoreConfig().setCatalogDirectory(baseDir.toURI().toString()).setMetastoreUser("test"));
tableOperationsProvider = new FileMetastoreTableOperationsProvider(new HdfsFileIoProvider(hdfsEnvironment));
trinoCatalog = new TrinoHiveCatalog(new CatalogName("catalog"), memoizeMetastore(metastore, 1000), hdfsEnvironment, new TestingTypeManager(), tableOperationsProvider, "trino-version", false, false, false);
return queryRunner;
}
Aggregations