use of io.trino.testing.QueryRunner in project trino by trinodb.
the class KuduQueryRunnerFactory method createKuduQueryRunner.
public static QueryRunner createKuduQueryRunner(TestingKuduServer kuduServer, String kuduSchema) throws Exception {
QueryRunner runner = null;
try {
runner = DistributedQueryRunner.builder(createSession(kuduSchema)).build();
installKuduConnector(kuduServer.getMasterAddress(), runner, kuduSchema, Optional.of(""));
return runner;
} catch (Throwable e) {
closeAllSuppress(e, runner);
throw e;
}
}
use of io.trino.testing.QueryRunner in project trino by trinodb.
the class TestKuduIntegrationDynamicFilter method testIncompleteDynamicFilterTimeout.
@Test(timeOut = 30_000)
public void testIncompleteDynamicFilterTimeout() throws Exception {
QueryRunner runner = getQueryRunner();
TransactionManager transactionManager = runner.getTransactionManager();
TransactionId transactionId = transactionManager.beginTransaction(false);
Session session = Session.builder(getSession()).setCatalogSessionProperty("kudu", "dynamic_filtering_wait_timeout", "1s").build().beginTransactionId(transactionId, transactionManager, new AllowAllAccessControl());
QualifiedObjectName tableName = new QualifiedObjectName("kudu", "tpch", "orders");
Optional<TableHandle> tableHandle = runner.getMetadata().getTableHandle(session, tableName);
assertTrue(tableHandle.isPresent());
SplitSource splitSource = runner.getSplitManager().getSplits(session, tableHandle.get(), UNGROUPED_SCHEDULING, new IncompleteDynamicFilter(), alwaysTrue());
List<Split> splits = new ArrayList<>();
while (!splitSource.isFinished()) {
splits.addAll(splitSource.getNextBatch(NOT_PARTITIONED, Lifespan.taskWide(), 1000).get().getSplits());
}
splitSource.close();
assertFalse(splits.isEmpty());
}
use of io.trino.testing.QueryRunner in project trino by trinodb.
the class ThriftQueryRunner method createThriftQueryRunnerInternal.
private static DistributedQueryRunner createThriftQueryRunnerInternal(List<DriftServer> servers, Map<String, String> properties) throws Exception {
String addresses = servers.stream().map(server -> "localhost:" + driftServerPort(server)).collect(joining(","));
Session defaultSession = testSessionBuilder().setCatalog("thrift").setSchema("tiny").build();
DistributedQueryRunner queryRunner = DistributedQueryRunner.builder(defaultSession).setExtraProperties(properties).build();
queryRunner.installPlugin(new ThriftPlugin());
Map<String, String> connectorProperties = ImmutableMap.<String, String>builder().put("trino.thrift.client.addresses", addresses).put("trino.thrift.client.connect-timeout", "30s").put("trino-thrift.lookup-requests-concurrency", "2").buildOrThrow();
queryRunner.createCatalog("thrift", "trino-thrift", connectorProperties);
queryRunner.installPlugin(new TpchPlugin());
queryRunner.createCatalog("tpch", "tpch");
return queryRunner;
}
use of io.trino.testing.QueryRunner in project trino by trinodb.
the class TestIcebergQueryFailureRecoveryTest method createQueryRunner.
@Override
protected QueryRunner createQueryRunner(List<TpchTable<?>> requiredTpchTables, Map<String, String> configProperties, Map<String, String> coordinatorProperties) throws Exception {
this.minioStorage = new MinioStorage("test-exchange-spooling-" + randomTableSuffix());
minioStorage.start();
return IcebergQueryRunner.builder().setInitialTables(requiredTpchTables).setCoordinatorProperties(coordinatorProperties).setExtraProperties(configProperties).setAdditionalSetup(runner -> {
runner.installPlugin(new FileSystemExchangePlugin());
runner.loadExchangeManager("filesystem", getExchangeManagerProperties(minioStorage));
}).build();
}
use of io.trino.testing.QueryRunner in project trino by trinodb.
the class TestSharedGlueMetastore method createQueryRunner.
@Override
protected QueryRunner createQueryRunner() throws Exception {
Session icebergSession = testSessionBuilder().setCatalog(ICEBERG_CATALOG).setSchema(schema).build();
Session hiveSession = testSessionBuilder().setCatalog(HIVE_CATALOG).setSchema(schema).build();
DistributedQueryRunner queryRunner = DistributedQueryRunner.builder(icebergSession).build();
queryRunner.installPlugin(new TpchPlugin());
queryRunner.createCatalog("tpch", "tpch");
this.dataDirectory = queryRunner.getCoordinator().getBaseDataDir().resolve("iceberg_data");
this.dataDirectory.toFile().deleteOnExit();
queryRunner.installPlugin(new IcebergPlugin());
queryRunner.createCatalog(ICEBERG_CATALOG, "iceberg", ImmutableMap.of("iceberg.catalog.type", "glue", "hive.metastore.glue.default-warehouse-dir", dataDirectory.toString()));
HdfsConfig hdfsConfig = new HdfsConfig();
HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hdfsConfig), ImmutableSet.of()), hdfsConfig, new NoHdfsAuthentication());
this.glueMetastore = new GlueHiveMetastore(hdfsEnvironment, new GlueHiveMetastoreConfig(), directExecutor(), new DefaultGlueColumnStatisticsProviderFactory(new GlueHiveMetastoreConfig(), directExecutor(), directExecutor()), Optional.empty(), table -> true);
queryRunner.installPlugin(new TestingHivePlugin(glueMetastore));
queryRunner.createCatalog(HIVE_CATALOG, "hive");
queryRunner.createCatalog("hive_with_redirections", "hive", ImmutableMap.of("hive.iceberg-catalog-name", "iceberg"));
queryRunner.execute("CREATE SCHEMA " + schema + " WITH (location = '" + dataDirectory.toString() + "')");
copyTpchTables(queryRunner, "tpch", TINY_SCHEMA_NAME, icebergSession, ImmutableList.of(TpchTable.NATION));
copyTpchTables(queryRunner, "tpch", TINY_SCHEMA_NAME, hiveSession, ImmutableList.of(TpchTable.REGION));
return queryRunner;
}
Aggregations