use of com.facebook.presto.tests.DistributedQueryRunner in project presto by prestodb.
the class TestIcebergSystemTables method createQueryRunner.
@Override
protected QueryRunner createQueryRunner() throws Exception {
Session session = testSessionBuilder().setCatalog(ICEBERG_CATALOG).build();
DistributedQueryRunner queryRunner = DistributedQueryRunner.builder(session).build();
Path catalogDir = queryRunner.getCoordinator().getBaseDataDir().resolve("iceberg_data").resolve("catalog");
queryRunner.installPlugin(new IcebergPlugin());
Map<String, String> icebergProperties = ImmutableMap.<String, String>builder().put("hive.metastore", "file").put("hive.metastore.catalog.dir", catalogDir.toFile().toURI().toString()).build();
queryRunner.createCatalog(ICEBERG_CATALOG, "iceberg", icebergProperties);
return queryRunner;
}
use of com.facebook.presto.tests.DistributedQueryRunner in project presto by prestodb.
the class CassandraQueryRunner method createCassandraQueryRunner.
public static synchronized DistributedQueryRunner createCassandraQueryRunner() throws Exception {
EmbeddedCassandra.start();
DistributedQueryRunner queryRunner = new DistributedQueryRunner(createCassandraSession("tpch"), 4);
queryRunner.installPlugin(new TpchPlugin());
queryRunner.createCatalog("tpch", "tpch");
queryRunner.installPlugin(new CassandraPlugin());
queryRunner.createCatalog("cassandra", "cassandra", ImmutableMap.of("cassandra.contact-points", EmbeddedCassandra.getHost(), "cassandra.native-protocol-port", Integer.toString(EmbeddedCassandra.getPort()), "cassandra.allow-drop-table", "true"));
if (!tpchLoaded) {
createKeyspace(EmbeddedCassandra.getSession(), "tpch");
List<TpchTable<?>> tables = TpchTable.getTables();
copyTpchTables(queryRunner, "tpch", TINY_SCHEMA_NAME, createCassandraSession("tpch"), tables);
for (TpchTable table : tables) {
EmbeddedCassandra.refreshSizeEstimates("tpch", table.getTableName());
}
tpchLoaded = true;
}
return queryRunner;
}
use of com.facebook.presto.tests.DistributedQueryRunner in project presto by prestodb.
the class TestHiveRecoverableExecution method testRecoverableGroupedExecution.
private void testRecoverableGroupedExecution(DistributedQueryRunner queryRunner, int writerConcurrency, boolean optimizedPartitionUpdateSerializationEnabled, List<String> preQueries, @Language("SQL") String queryWithoutFailure, @Language("SQL") String queryWithFailure, int expectedUpdateCount, List<String> postQueries) throws Exception {
waitUntilAllNodesAreHealthy(queryRunner, new Duration(10, SECONDS));
Session recoverableSession = createRecoverableSession(writerConcurrency, optimizedPartitionUpdateSerializationEnabled);
for (@Language("SQL") String postQuery : postQueries) {
queryRunner.execute(recoverableSession, postQuery);
}
try {
for (@Language("SQL") String preQuery : preQueries) {
queryRunner.execute(recoverableSession, preQuery);
}
// test no failure case
Stopwatch noRecoveryStopwatch = Stopwatch.createStarted();
assertEquals(queryRunner.execute(recoverableSession, queryWithoutFailure).getUpdateCount(), OptionalLong.of(expectedUpdateCount));
log.info("Query with no recovery took %sms", noRecoveryStopwatch.elapsed(MILLISECONDS));
// cancel all queries and tasks to make sure we are dealing only with a single running query
cancelAllQueries(queryRunner);
cancelAllTasks(queryRunner);
// test failure case
Stopwatch recoveryStopwatch = Stopwatch.createStarted();
ListenableFuture<MaterializedResult> result = executor.submit(() -> queryRunner.execute(recoverableSession, queryWithFailure));
List<TestingPrestoServer> workers = queryRunner.getServers().stream().filter(server -> !server.isCoordinator()).collect(toList());
shuffle(workers);
TestingPrestoServer worker1 = workers.get(0);
// kill worker1 right away, to make sure recoverable execution works in cases when the task hasn't been yet submitted
worker1.stopResponding();
// kill worker2 only after the task has been scheduled
TestingPrestoServer worker2 = workers.get(1);
sleep(1000);
worker2.stopResponding();
assertEquals(result.get(1000, SECONDS).getUpdateCount(), OptionalLong.of(expectedUpdateCount));
log.info("Query with recovery took %sms", recoveryStopwatch.elapsed(MILLISECONDS));
} finally {
queryRunner.getServers().forEach(TestingPrestoServer::startResponding);
cancelAllQueries(queryRunner);
cancelAllTasks(queryRunner);
for (@Language("SQL") String postQuery : postQueries) {
queryRunner.execute(recoverableSession, postQuery);
}
}
}
use of com.facebook.presto.tests.DistributedQueryRunner in project presto by prestodb.
the class OracleQueryRunner method main.
public static void main(String[] args) throws Exception {
Logging.initialize();
DistributedQueryRunner queryRunner = createOracleQueryRunner(new TestingOracleServer(), TpchTable.getTables());
Logger log = Logger.get(OracleQueryRunner.class);
log.info("======== SERVER STARTED ========");
log.info("\n====\n%s\n====", queryRunner.getCoordinator().getBaseUrl());
}
use of com.facebook.presto.tests.DistributedQueryRunner in project presto by prestodb.
the class MySqlQueryRunner method createMySqlQueryRunner.
public static QueryRunner createMySqlQueryRunner(String jdbcUrl, Map<String, String> connectorProperties, Iterable<TpchTable<?>> tables) throws Exception {
DistributedQueryRunner queryRunner = null;
try {
queryRunner = new DistributedQueryRunner(createSession(), 3);
queryRunner.installPlugin(new TpchPlugin());
queryRunner.createCatalog("tpch", "tpch");
connectorProperties = new HashMap<>(ImmutableMap.copyOf(connectorProperties));
connectorProperties.putIfAbsent("connection-url", jdbcUrl);
connectorProperties.putIfAbsent("allow-drop-table", "true");
queryRunner.installPlugin(new MySqlPlugin());
queryRunner.createCatalog("mysql", "mysql", connectorProperties);
copyTpchTables(queryRunner, "tpch", TINY_SCHEMA_NAME, createSession(), tables);
return queryRunner;
} catch (Throwable e) {
closeAllSuppress(e, queryRunner);
throw e;
}
}
Aggregations