use of io.trino.plugin.hive.containers.HiveMinioDataLake in project trino by trinodb.
the class BaseTestHiveOnDataLake method createQueryRunner.
@Override
protected QueryRunner createQueryRunner() throws Exception {
this.bucketName = "test-hive-insert-overwrite-" + randomTableSuffix();
this.dockerizedS3DataLake = closeAfterClass(new HiveMinioDataLake(bucketName, ImmutableMap.of(), hiveHadoopImage));
this.dockerizedS3DataLake.start();
this.metastoreClient = new BridgingHiveMetastore(new ThriftHiveMetastore(new TestingMetastoreLocator(Optional.empty(), this.dockerizedS3DataLake.getHiveHadoop().getHiveMetastoreEndpoint()), new HiveConfig(), new MetastoreConfig(), new ThriftMetastoreConfig(), new HdfsEnvironment(new HiveHdfsConfiguration(new HdfsConfigurationInitializer(new HdfsConfig(), ImmutableSet.of()), ImmutableSet.of()), new HdfsConfig(), new NoHdfsAuthentication()), false), HiveIdentity.none());
return S3HiveQueryRunner.create(dockerizedS3DataLake, ImmutableMap.<String, String>builder().put("hive.insert-existing-partitions-behavior", "OVERWRITE").put("hive.non-managed-table-writes-enabled", "true").put("hive.metastore-cache-ttl", "1d").put("hive.metastore-refresh-interval", "1d").buildOrThrow());
}
use of io.trino.plugin.hive.containers.HiveMinioDataLake in project trino by trinodb.
the class TestHiveQueryFailureRecoveryTest method createQueryRunner.
@Override
protected QueryRunner createQueryRunner(List<TpchTable<?>> requiredTpchTables, Map<String, String> configProperties, Map<String, String> coordinatorProperties) throws Exception {
// randomizing bucket name to ensure cached TrinoS3FileSystem objects are not reused
String bucketName = "test-hive-insert-overwrite-" + randomTableSuffix();
this.dockerizedS3DataLake = new HiveMinioDataLake(bucketName, ImmutableMap.of(), HiveHadoop.DEFAULT_IMAGE);
dockerizedS3DataLake.start();
this.minioStorage = new MinioStorage("test-exchange-spooling-" + randomTableSuffix());
minioStorage.start();
return S3HiveQueryRunner.builder(dockerizedS3DataLake).setInitialTables(requiredTpchTables).setExtraProperties(configProperties).setCoordinatorProperties(coordinatorProperties).setAdditionalSetup(runner -> {
runner.installPlugin(new FileSystemExchangePlugin());
runner.loadExchangeManager("filesystem", getExchangeManagerProperties(minioStorage));
}).setHiveProperties(ImmutableMap.<String, String>builder().put("hive.s3.streaming.enabled", "false").buildOrThrow()).build();
}
use of io.trino.plugin.hive.containers.HiveMinioDataLake in project trino by trinodb.
the class TestHiveTaskFailureRecoveryTest method createQueryRunner.
@Override
protected QueryRunner createQueryRunner(List<TpchTable<?>> requiredTpchTables, Map<String, String> configProperties, Map<String, String> coordinatorProperties) throws Exception {
// randomizing bucket name to ensure cached TrinoS3FileSystem objects are not reused
String bucketName = "test-hive-insert-overwrite-" + randomTableSuffix();
this.dockerizedS3DataLake = new HiveMinioDataLake(bucketName, ImmutableMap.of(), HiveHadoop.DEFAULT_IMAGE);
dockerizedS3DataLake.start();
this.minioStorage = new MinioStorage("test-exchange-spooling-" + randomTableSuffix());
minioStorage.start();
return S3HiveQueryRunner.builder(dockerizedS3DataLake).setInitialTables(requiredTpchTables).setExtraProperties(ImmutableMap.<String, String>builder().putAll(configProperties).put("enable-dynamic-filtering", "false").buildOrThrow()).setCoordinatorProperties(coordinatorProperties).setAdditionalSetup(runner -> {
runner.installPlugin(new FileSystemExchangePlugin());
runner.loadExchangeManager("filesystem", getExchangeManagerProperties(minioStorage));
}).setHiveProperties(ImmutableMap.<String, String>builder().put("hive.s3.streaming.enabled", "false").buildOrThrow()).build();
}
Aggregations