use of io.trino.plugin.hive.NodeVersion in project trino by trinodb.
the class TestIcebergMetadataListing method createQueryRunner.
@Override
protected DistributedQueryRunner createQueryRunner() throws Exception {
Session session = testSessionBuilder().setIdentity(Identity.forUser("hive").withConnectorRole("hive", new SelectedRole(ROLE, Optional.of("admin"))).build()).build();
DistributedQueryRunner queryRunner = DistributedQueryRunner.builder(session).build();
File baseDir = queryRunner.getCoordinator().getBaseDataDir().resolve("iceberg_data").toFile();
HdfsConfig hdfsConfig = new HdfsConfig();
HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hdfsConfig), ImmutableSet.of());
HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hdfsConfig, new NoHdfsAuthentication());
metastore = new FileHiveMetastore(new NodeVersion("test_version"), hdfsEnvironment, new MetastoreConfig(), new FileHiveMetastoreConfig().setCatalogDirectory(baseDir.toURI().toString()).setMetastoreUser("test"));
queryRunner.installPlugin(new TestingIcebergPlugin(Optional.of(metastore), Optional.empty(), EMPTY_MODULE));
queryRunner.createCatalog("iceberg", "iceberg");
queryRunner.installPlugin(new TestingHivePlugin(metastore));
queryRunner.createCatalog("hive", "hive", ImmutableMap.of("hive.security", "sql-standard"));
return queryRunner;
}
use of io.trino.plugin.hive.NodeVersion in project trino by trinodb.
the class TestIcebergMetastoreAccessOperations method createQueryRunner.
@Override
protected DistributedQueryRunner createQueryRunner() throws Exception {
DistributedQueryRunner queryRunner = DistributedQueryRunner.builder(TEST_SESSION).build();
File baseDir = queryRunner.getCoordinator().getBaseDataDir().resolve("iceberg_data").toFile();
HdfsConfig hdfsConfig = new HdfsConfig();
HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hdfsConfig), ImmutableSet.of());
HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hdfsConfig, new NoHdfsAuthentication());
HiveMetastore hiveMetastore = new FileHiveMetastore(new NodeVersion("testversion"), hdfsEnvironment, new MetastoreConfig(), new FileHiveMetastoreConfig().setCatalogDirectory(baseDir.toURI().toString()).setMetastoreUser("test"));
metastore = new CountingAccessFileHiveMetastore(hiveMetastore);
queryRunner.installPlugin(new TestingIcebergPlugin(Optional.of(metastore), Optional.empty(), EMPTY_MODULE));
queryRunner.createCatalog("iceberg", "iceberg");
queryRunner.execute("CREATE SCHEMA test_schema");
return queryRunner;
}
use of io.trino.plugin.hive.NodeVersion in project trino by trinodb.
the class FileHiveMetastore method createTestingFileHiveMetastore.
@VisibleForTesting
public static FileHiveMetastore createTestingFileHiveMetastore(File catalogDirectory) {
HdfsConfig hdfsConfig = new HdfsConfig();
HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hdfsConfig), ImmutableSet.of());
HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hdfsConfig, new NoHdfsAuthentication());
return new FileHiveMetastore(new NodeVersion("testversion"), hdfsEnvironment, new MetastoreConfig(), new FileHiveMetastoreConfig().setCatalogDirectory(catalogDirectory.toURI().toString()).setMetastoreUser("test"));
}
use of io.trino.plugin.hive.NodeVersion in project trino by trinodb.
the class TestDeltaLakePageSink method createPageSink.
private static ConnectorPageSink createPageSink(Path outputPath, DeltaLakeWriterStats stats) {
HiveTransactionHandle transaction = new HiveTransactionHandle(false);
DeltaLakeConfig deltaLakeConfig = new DeltaLakeConfig();
DeltaLakeOutputTableHandle tableHandle = new DeltaLakeOutputTableHandle(SCHEMA_NAME, TABLE_NAME, getColumnHandles(), outputPath.toString(), Optional.of(deltaLakeConfig.getDefaultCheckpointWritingInterval()), true);
DeltaLakePageSinkProvider provider = new DeltaLakePageSinkProvider(new GroupByHashPageIndexerFactory(new JoinCompiler(new TypeOperators()), new BlockTypeOperators()), HDFS_ENVIRONMENT, JsonCodec.jsonCodec(DataFileInfo.class), stats, deltaLakeConfig, new TestingTypeManager(), new NodeVersion("test-version"));
return provider.createPageSink(transaction, SESSION, tableHandle);
}
use of io.trino.plugin.hive.NodeVersion in project trino by trinodb.
the class TestDeltaLakeGlueMetastore method setUp.
@BeforeClass
public void setUp() {
tempDir = Files.createTempDir();
String temporaryLocation = tempDir.toURI().toString();
Map<String, String> config = ImmutableMap.<String, String>builder().put("hive.metastore", "glue").put("delta.hide-non-delta-lake-tables", "true").buildOrThrow();
Bootstrap app = new Bootstrap(// connector dependencies
new JsonModule(), binder -> {
ConnectorContext context = new TestingConnectorContext();
binder.bind(CatalogName.class).toInstance(new CatalogName("test"));
binder.bind(TypeManager.class).toInstance(context.getTypeManager());
binder.bind(NodeManager.class).toInstance(context.getNodeManager());
binder.bind(PageIndexerFactory.class).toInstance(context.getPageIndexerFactory());
binder.bind(NodeVersion.class).toInstance(new NodeVersion("test_version"));
}, // connector modules
new DeltaLakeMetastoreModule(), new DeltaLakeModule(), // test setup
binder -> {
binder.bind(HdfsEnvironment.class).toInstance(HDFS_ENVIRONMENT);
});
Injector injector = app.doNotInitializeLogging().setRequiredConfigurationProperties(config).initialize();
lifeCycleManager = injector.getInstance(LifeCycleManager.class);
metastoreClient = injector.getInstance(GlueHiveMetastore.class);
metadataFactory = injector.getInstance(DeltaLakeMetadataFactory.class);
session = TestingConnectorSession.builder().setPropertyMetadata(injector.getInstance(DeltaLakeSessionProperties.class).getSessionProperties()).build();
databaseName = "test_delta_glue" + randomName();
metastoreClient.createDatabase(Database.builder().setDatabaseName(databaseName).setOwnerName(Optional.of("public")).setOwnerType(Optional.of(ROLE)).setLocation(Optional.of(temporaryLocation)).build());
}
Aggregations