use of io.trino.plugin.exchange.FileSystemExchangePlugin in project trino by trinodb.
the class BaseHiveConnectorTest method createHiveQueryRunner.
protected static QueryRunner createHiveQueryRunner(Map<String, String> extraProperties, Map<String, String> exchangeManagerProperties) throws Exception {
DistributedQueryRunner queryRunner = HiveQueryRunner.builder().setExtraProperties(extraProperties).setAdditionalSetup(runner -> {
if (!exchangeManagerProperties.isEmpty()) {
runner.installPlugin(new FileSystemExchangePlugin());
runner.loadExchangeManager("filesystem", exchangeManagerProperties);
}
}).setHiveProperties(ImmutableMap.of("hive.allow-register-partition-procedure", "true", // Reduce writer sort buffer size to ensure SortingFileWriter gets used
"hive.writer-sort-buffer-size", "1MB", // Make weighted split scheduling more conservative to avoid OOMs in test
"hive.minimum-assigned-split-weight", "0.5")).addExtraProperty("legacy.allow-set-view-authorization", "true").setInitialTables(REQUIRED_TPCH_TABLES).build();
// extra catalog with NANOSECOND timestamp precision
queryRunner.createCatalog("hive_timestamp_nanos", "hive", ImmutableMap.of("hive.timestamp-precision", "NANOSECONDS"));
return queryRunner;
}
use of io.trino.plugin.exchange.FileSystemExchangePlugin in project trino by trinodb.
the class TestHiveFaultTolerantExecutionWindowQueries method createQueryRunner.
@Override
protected QueryRunner createQueryRunner(Map<String, String> extraProperties) throws Exception {
this.minioStorage = new MinioStorage("test-exchange-spooling-" + randomTableSuffix());
minioStorage.start();
return HiveQueryRunner.builder().setExtraProperties(extraProperties).setAdditionalSetup(runner -> {
runner.installPlugin(new FileSystemExchangePlugin());
runner.loadExchangeManager("filesystem", getExchangeManagerProperties(minioStorage));
}).setInitialTables(getTables()).build();
}
use of io.trino.plugin.exchange.FileSystemExchangePlugin in project trino by trinodb.
the class TestHiveQueryFailureRecoveryTest method createQueryRunner.
@Override
protected QueryRunner createQueryRunner(List<TpchTable<?>> requiredTpchTables, Map<String, String> configProperties, Map<String, String> coordinatorProperties) throws Exception {
// randomizing bucket name to ensure cached TrinoS3FileSystem objects are not reused
String bucketName = "test-hive-insert-overwrite-" + randomTableSuffix();
this.dockerizedS3DataLake = new HiveMinioDataLake(bucketName, ImmutableMap.of(), HiveHadoop.DEFAULT_IMAGE);
dockerizedS3DataLake.start();
this.minioStorage = new MinioStorage("test-exchange-spooling-" + randomTableSuffix());
minioStorage.start();
return S3HiveQueryRunner.builder(dockerizedS3DataLake).setInitialTables(requiredTpchTables).setExtraProperties(configProperties).setCoordinatorProperties(coordinatorProperties).setAdditionalSetup(runner -> {
runner.installPlugin(new FileSystemExchangePlugin());
runner.loadExchangeManager("filesystem", getExchangeManagerProperties(minioStorage));
}).setHiveProperties(ImmutableMap.<String, String>builder().put("hive.s3.streaming.enabled", "false").buildOrThrow()).build();
}
use of io.trino.plugin.exchange.FileSystemExchangePlugin in project trino by trinodb.
the class TestIcebergQueryFailureRecoveryTest method createQueryRunner.
@Override
protected QueryRunner createQueryRunner(List<TpchTable<?>> requiredTpchTables, Map<String, String> configProperties, Map<String, String> coordinatorProperties) throws Exception {
this.minioStorage = new MinioStorage("test-exchange-spooling-" + randomTableSuffix());
minioStorage.start();
return IcebergQueryRunner.builder().setInitialTables(requiredTpchTables).setCoordinatorProperties(coordinatorProperties).setExtraProperties(configProperties).setAdditionalSetup(runner -> {
runner.installPlugin(new FileSystemExchangePlugin());
runner.loadExchangeManager("filesystem", getExchangeManagerProperties(minioStorage));
}).build();
}
use of io.trino.plugin.exchange.FileSystemExchangePlugin in project trino by trinodb.
the class TestHiveTaskFailureRecoveryTest method createQueryRunner.
@Override
protected QueryRunner createQueryRunner(List<TpchTable<?>> requiredTpchTables, Map<String, String> configProperties, Map<String, String> coordinatorProperties) throws Exception {
// randomizing bucket name to ensure cached TrinoS3FileSystem objects are not reused
String bucketName = "test-hive-insert-overwrite-" + randomTableSuffix();
this.dockerizedS3DataLake = new HiveMinioDataLake(bucketName, ImmutableMap.of(), HiveHadoop.DEFAULT_IMAGE);
dockerizedS3DataLake.start();
this.minioStorage = new MinioStorage("test-exchange-spooling-" + randomTableSuffix());
minioStorage.start();
return S3HiveQueryRunner.builder(dockerizedS3DataLake).setInitialTables(requiredTpchTables).setExtraProperties(ImmutableMap.<String, String>builder().putAll(configProperties).put("enable-dynamic-filtering", "false").buildOrThrow()).setCoordinatorProperties(coordinatorProperties).setAdditionalSetup(runner -> {
runner.installPlugin(new FileSystemExchangePlugin());
runner.loadExchangeManager("filesystem", getExchangeManagerProperties(minioStorage));
}).setHiveProperties(ImmutableMap.<String, String>builder().put("hive.s3.streaming.enabled", "false").buildOrThrow()).build();
}
Aggregations