use of com.facebook.presto.tests.DistributedQueryRunner in project presto by prestodb.
the class JdbcQueryRunner method createJdbcQueryRunner.
public static DistributedQueryRunner createJdbcQueryRunner(Iterable<TpchTable<?>> tables) throws Exception {
DistributedQueryRunner queryRunner = null;
try {
queryRunner = new DistributedQueryRunner(createSession(), 3);
queryRunner.installPlugin(new TpchPlugin());
queryRunner.createCatalog("tpch", "tpch");
Map<String, String> properties = TestingH2JdbcModule.createProperties();
createSchema(properties, "tpch");
queryRunner.installPlugin(new JdbcPlugin("base-jdbc", new TestingH2JdbcModule()));
queryRunner.createCatalog("jdbc", "base-jdbc", properties);
copyTpchTables(queryRunner, "tpch", TINY_SCHEMA_NAME, createSession(), tables);
return queryRunner;
} catch (Throwable e) {
closeAllSuppress(e, queryRunner);
throw e;
}
}
use of com.facebook.presto.tests.DistributedQueryRunner in project presto by prestodb.
the class ElasticsearchQueryRunner method createElasticsearchQueryRunner.
public static DistributedQueryRunner createElasticsearchQueryRunner(EmbeddedElasticsearchNode embeddedElasticsearchNode, Iterable<TpchTable<?>> tables) throws Exception {
DistributedQueryRunner queryRunner = null;
try {
queryRunner = DistributedQueryRunner.builder(createSession()).setNodeCount(NODE_COUNT).build();
queryRunner.installPlugin(new TpchPlugin());
queryRunner.createCatalog("tpch", "tpch");
embeddedElasticsearchNode.start();
TestingElasticsearchConnectorFactory testFactory = new TestingElasticsearchConnectorFactory();
installElasticsearchPlugin(queryRunner, testFactory);
TestingPrestoClient prestoClient = queryRunner.getRandomClient();
LOG.info("Loading data...");
long startTime = System.nanoTime();
for (TpchTable<?> table : tables) {
loadTpchTopic(embeddedElasticsearchNode, prestoClient, table);
}
LOG.info("Loading complete in %s", nanosSince(startTime).toString(SECONDS));
return queryRunner;
} catch (Exception e) {
closeAllSuppress(e, queryRunner, embeddedElasticsearchNode);
throw e;
}
}
use of com.facebook.presto.tests.DistributedQueryRunner in project presto by prestodb.
the class DruidQueryRunner method main.
public static void main(String[] args) throws Exception {
DistributedQueryRunner queryRunner = createDruidQueryRunner(ImmutableMap.of());
log.info(format("Presto server started: %s", queryRunner.getCoordinator().getBaseUrl()));
}
use of com.facebook.presto.tests.DistributedQueryRunner in project presto by prestodb.
the class DruidQueryRunner method createDruidQueryRunner.
public static DistributedQueryRunner createDruidQueryRunner(Map<String, String> connectorProperties) throws Exception {
DistributedQueryRunner queryRunner = DistributedQueryRunner.builder(createSession()).build();
try {
queryRunner.installPlugin(new DruidPlugin());
connectorProperties = new HashMap<>(ImmutableMap.copyOf(connectorProperties));
connectorProperties.putIfAbsent("druid.coordinator-url", coordinator);
connectorProperties.putIfAbsent("druid.broker-url", broker);
queryRunner.createCatalog(DEFAULT_CATALOG, "druid", connectorProperties);
return queryRunner;
} catch (Exception e) {
queryRunner.close();
throw e;
}
}
use of com.facebook.presto.tests.DistributedQueryRunner in project presto by prestodb.
the class HiveQueryRunner method createQueryRunner.
public static DistributedQueryRunner createQueryRunner(Iterable<TpchTable<?>> tables, Map<String, String> extraProperties, Map<String, String> extraCoordinatorProperties, String security, Map<String, String> extraHiveProperties, Optional<Integer> workerCount, Optional<Path> baseDataDir, Optional<BiFunction<Integer, URI, Process>> externalWorkerLauncher) throws Exception {
assertEquals(DateTimeZone.getDefault(), TIME_ZONE, "Timezone not configured correctly. Add -Duser.timezone=America/Bahia_Banderas to your JVM arguments");
setupLogging();
Map<String, String> systemProperties = ImmutableMap.<String, String>builder().put("task.writer-count", "2").put("task.partitioned-writer-count", "4").put("tracing.tracer-type", "simple").put("tracing.enable-distributed-tracing", "simple").putAll(extraProperties).build();
DistributedQueryRunner queryRunner = DistributedQueryRunner.builder(createSession(Optional.of(new SelectedRole(ROLE, Optional.of("admin"))))).setNodeCount(workerCount.orElse(4)).setExtraProperties(systemProperties).setCoordinatorProperties(extraCoordinatorProperties).setBaseDataDir(baseDataDir).setExternalWorkerLauncher(externalWorkerLauncher).build();
try {
queryRunner.installPlugin(new TpchPlugin());
queryRunner.installPlugin(new TestingHiveEventListenerPlugin());
queryRunner.createCatalog("tpch", "tpch");
File baseDir = queryRunner.getCoordinator().getBaseDataDir().resolve("hive_data").toFile();
HiveClientConfig hiveClientConfig = new HiveClientConfig();
MetastoreClientConfig metastoreClientConfig = new MetastoreClientConfig();
HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hiveClientConfig, metastoreClientConfig), ImmutableSet.of());
HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, metastoreClientConfig, new NoHdfsAuthentication());
FileHiveMetastore metastore = new FileHiveMetastore(hdfsEnvironment, baseDir.toURI().toString(), "test");
queryRunner.installPlugin(new HivePlugin(HIVE_CATALOG, Optional.of(metastore)));
Map<String, String> hiveProperties = ImmutableMap.<String, String>builder().putAll(extraHiveProperties).put("hive.time-zone", TIME_ZONE.getID()).put("hive.security", security).put("hive.max-partitions-per-scan", "1000").put("hive.assume-canonical-partition-keys", "true").put("hive.collect-column-statistics-on-write", "true").put("hive.temporary-table-schema", TEMPORARY_TABLE_SCHEMA).build();
Map<String, String> storageProperties = extraHiveProperties.containsKey("hive.storage-format") ? ImmutableMap.copyOf(hiveProperties) : ImmutableMap.<String, String>builder().putAll(hiveProperties).put("hive.storage-format", "TEXTFILE").put("hive.compression-codec", "NONE").build();
Map<String, String> hiveBucketedProperties = ImmutableMap.<String, String>builder().putAll(storageProperties).put("hive.max-initial-split-size", // so that each bucket has multiple splits
"10kB").put("hive.max-split-size", // so that each bucket has multiple splits
"10kB").build();
queryRunner.createCatalog(HIVE_CATALOG, HIVE_CATALOG, hiveProperties);
queryRunner.createCatalog(HIVE_BUCKETED_CATALOG, HIVE_CATALOG, hiveBucketedProperties);
if (!metastore.getDatabase(METASTORE_CONTEXT, TPCH_SCHEMA).isPresent()) {
metastore.createDatabase(METASTORE_CONTEXT, createDatabaseMetastoreObject(TPCH_SCHEMA));
copyTpchTables(queryRunner, "tpch", TINY_SCHEMA_NAME, createSession(Optional.empty()), tables);
}
if (!metastore.getDatabase(METASTORE_CONTEXT, TPCH_BUCKETED_SCHEMA).isPresent()) {
metastore.createDatabase(METASTORE_CONTEXT, createDatabaseMetastoreObject(TPCH_BUCKETED_SCHEMA));
copyTpchTablesBucketed(queryRunner, "tpch", TINY_SCHEMA_NAME, createBucketedSession(Optional.empty()), tables);
}
if (!metastore.getDatabase(METASTORE_CONTEXT, TEMPORARY_TABLE_SCHEMA).isPresent()) {
metastore.createDatabase(METASTORE_CONTEXT, createDatabaseMetastoreObject(TEMPORARY_TABLE_SCHEMA));
}
return queryRunner;
} catch (Exception e) {
queryRunner.close();
throw e;
}
}
Aggregations