use of io.trino.plugin.hive.metastore.HiveMetastore in project trino by trinodb.
the class TestIcebergSplitSource method createQueryRunner.
@Override
protected QueryRunner createQueryRunner() throws Exception {
HdfsConfig config = new HdfsConfig();
HdfsConfiguration configuration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(config), ImmutableSet.of());
HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(configuration, config, new NoHdfsAuthentication());
File tempDir = Files.createTempDirectory("test_iceberg_split_source").toFile();
this.metastoreDir = new File(tempDir, "iceberg_data");
HiveMetastore metastore = createTestingFileHiveMetastore(metastoreDir);
IcebergTableOperationsProvider operationsProvider = new FileMetastoreTableOperationsProvider(new HdfsFileIoProvider(hdfsEnvironment));
this.catalog = new TrinoHiveCatalog(new CatalogName("hive"), memoizeMetastore(metastore, 1000), hdfsEnvironment, new TestingTypeManager(), operationsProvider, "test", false, false, false);
return createIcebergQueryRunner(ImmutableMap.of(), ImmutableMap.of(), ImmutableList.of(NATION), Optional.of(metastoreDir));
}
use of io.trino.plugin.hive.metastore.HiveMetastore in project trino by trinodb.
the class TestMetadataQueryOptimization method createLocalQueryRunner.
@Override
protected LocalQueryRunner createLocalQueryRunner() {
Session session = testSessionBuilder().setCatalog(ICEBERG_CATALOG).setSchema(SCHEMA_NAME).build();
baseDir = Files.createTempDir();
HiveMetastore metastore = createTestingFileHiveMetastore(baseDir);
LocalQueryRunner queryRunner = LocalQueryRunner.create(session);
queryRunner.createCatalog(ICEBERG_CATALOG, new TestingIcebergConnectorFactory(Optional.of(metastore), Optional.empty(), EMPTY_MODULE), ImmutableMap.of());
Database database = Database.builder().setDatabaseName(SCHEMA_NAME).setOwnerName(Optional.of("public")).setOwnerType(Optional.of(PrincipalType.ROLE)).build();
metastore.createDatabase(database);
return queryRunner;
}
use of io.trino.plugin.hive.metastore.HiveMetastore in project trino by trinodb.
the class TestIcebergMetadataFileOperations method createQueryRunner.
@Override
protected DistributedQueryRunner createQueryRunner() throws Exception {
Session session = testSessionBuilder().setCatalog("iceberg").setSchema("test_schema").build();
DistributedQueryRunner queryRunner = DistributedQueryRunner.builder(session).setNodeCount(1).build();
File baseDir = queryRunner.getCoordinator().getBaseDataDir().resolve("iceberg_data").toFile();
HiveMetastore metastore = createTestingFileHiveMetastore(baseDir);
trackingFileIoProvider = new TrackingFileIoProvider(new HdfsFileIoProvider(HDFS_ENVIRONMENT));
queryRunner.installPlugin(new TestingIcebergPlugin(Optional.of(metastore), Optional.of(trackingFileIoProvider), EMPTY_MODULE));
queryRunner.createCatalog("iceberg", "iceberg");
queryRunner.installPlugin(new TpchPlugin());
queryRunner.createCatalog("tpch", "tpch");
queryRunner.execute("CREATE SCHEMA test_schema");
copyTpchTables(queryRunner, "tpch", TINY_SCHEMA_NAME, session, TpchTable.getTables());
return queryRunner;
}
use of io.trino.plugin.hive.metastore.HiveMetastore in project trino by trinodb.
the class TestSpatialJoins method createQueryRunner.
@Override
protected DistributedQueryRunner createQueryRunner() throws Exception {
Session session = testSessionBuilder().setSource(TestSpatialJoins.class.getSimpleName()).setCatalog("hive").setSchema("default").build();
DistributedQueryRunner queryRunner = DistributedQueryRunner.builder(session).build();
queryRunner.installPlugin(new GeoPlugin());
File baseDir = queryRunner.getCoordinator().getBaseDataDir().resolve("hive_data").toFile();
HiveMetastore metastore = createTestingFileHiveMetastore(baseDir);
metastore.createDatabase(Database.builder().setDatabaseName("default").setOwnerName(Optional.of("public")).setOwnerType(Optional.of(PrincipalType.ROLE)).build());
queryRunner.installPlugin(new TestingHivePlugin(metastore));
queryRunner.createCatalog("hive", "hive");
return queryRunner;
}
use of io.trino.plugin.hive.metastore.HiveMetastore in project trino by trinodb.
the class TestHivePageSink method writeTestFile.
private static long writeTestFile(HiveConfig config, HiveMetastore metastore, String outputPath) {
HiveTransactionHandle transaction = new HiveTransactionHandle(false);
HiveWriterStats stats = new HiveWriterStats();
ConnectorPageSink pageSink = createPageSink(transaction, config, metastore, new Path("file:///" + outputPath), stats);
List<LineItemColumn> columns = getTestColumns();
List<Type> columnTypes = columns.stream().map(LineItemColumn::getType).map(TestHivePageSink::getHiveType).map(hiveType -> hiveType.getType(TESTING_TYPE_MANAGER)).collect(toList());
PageBuilder pageBuilder = new PageBuilder(columnTypes);
int rows = 0;
for (LineItem lineItem : new LineItemGenerator(0.01, 1, 1)) {
rows++;
if (rows >= NUM_ROWS) {
break;
}
pageBuilder.declarePosition();
for (int i = 0; i < columns.size(); i++) {
LineItemColumn column = columns.get(i);
BlockBuilder blockBuilder = pageBuilder.getBlockBuilder(i);
switch(column.getType().getBase()) {
case IDENTIFIER:
BIGINT.writeLong(blockBuilder, column.getIdentifier(lineItem));
break;
case INTEGER:
INTEGER.writeLong(blockBuilder, column.getInteger(lineItem));
break;
case DATE:
DATE.writeLong(blockBuilder, column.getDate(lineItem));
break;
case DOUBLE:
DOUBLE.writeDouble(blockBuilder, column.getDouble(lineItem));
break;
case VARCHAR:
createUnboundedVarcharType().writeSlice(blockBuilder, Slices.utf8Slice(column.getString(lineItem)));
break;
default:
throw new IllegalArgumentException("Unsupported type " + column.getType());
}
}
}
Page page = pageBuilder.build();
pageSink.appendPage(page);
getFutureValue(pageSink.finish());
File outputDir = new File(outputPath);
List<File> files = ImmutableList.copyOf(outputDir.listFiles((dir, name) -> !name.endsWith(".crc")));
File outputFile = getOnlyElement(files);
long length = outputFile.length();
ConnectorPageSource pageSource = createPageSource(transaction, config, outputFile);
List<Page> pages = new ArrayList<>();
while (!pageSource.isFinished()) {
Page nextPage = pageSource.getNextPage();
if (nextPage != null) {
pages.add(nextPage.getLoadedPage());
}
}
MaterializedResult expectedResults = toMaterializedResult(getHiveSession(config), columnTypes, ImmutableList.of(page));
MaterializedResult results = toMaterializedResult(getHiveSession(config), columnTypes, pages);
assertEquals(results, expectedResults);
assertEquals(round(stats.getInputPageSizeInBytes().getAllTime().getMax()), page.getRetainedSizeInBytes());
return length;
}
Aggregations