use of io.prestosql.tests.DistributedQueryRunner in project hetu-core by openlookeng.
the class GreenPlumQueryRunner method createGreenPlumQueryRunner.
public static QueryRunner createGreenPlumQueryRunner(TestingPostgreSqlServer server, Map<String, String> connectorProperties, Iterable<TpchTable<?>> tables) throws Exception {
DistributedQueryRunner queryRunner = null;
try {
queryRunner = new DistributedQueryRunner(createSession(), 3);
queryRunner.installPlugin(new TpchPlugin());
queryRunner.createCatalog("tpch", "tpch");
Map<String, String> connectorPropertiesMap = new HashMap<>(ImmutableMap.copyOf(connectorProperties));
connectorPropertiesMap.putIfAbsent("connection-url", server.getJdbcUrl());
connectorPropertiesMap.putIfAbsent("allow-drop-table", "true");
connectorPropertiesMap.putIfAbsent("jdbc.pushdown-enabled", "false");
createSchema(server.getJdbcUrl(), "tpch");
queryRunner.installPlugin(new GreenPlumSqlPlugin());
queryRunner.createCatalog(GREENPLUM_CONNECTOR_NAME, GREENPLUM_CONNECTOR_NAME, connectorPropertiesMap);
copyTpchTables(queryRunner, "tpch", TINY_SCHEMA_NAME, createSession(), tables);
return queryRunner;
} catch (Throwable e) {
closeAllSuppress(e, queryRunner, server);
throw e;
}
}
use of io.prestosql.tests.DistributedQueryRunner in project hetu-core by openlookeng.
the class JdbcQueryRunner method createJdbcQueryRunner.
public static DistributedQueryRunner createJdbcQueryRunner(Iterable<TpchTable<?>> tables) throws Exception {
DistributedQueryRunner queryRunner = null;
try {
queryRunner = new DistributedQueryRunner(createSession(), 3);
queryRunner.installPlugin(new TpchPlugin());
queryRunner.createCatalog("tpch", "tpch");
Map<String, String> properties = TestingH2JdbcModule.createProperties();
createSchema(properties, "tpch");
queryRunner.installPlugin(new JdbcPlugin("base-jdbc", new TestingH2JdbcModule()));
queryRunner.createCatalog("jdbc", "base-jdbc", properties);
copyTpchTables(queryRunner, "tpch", TINY_SCHEMA_NAME, createSession(), tables);
return queryRunner;
} catch (Throwable e) {
closeAllSuppress(e, queryRunner);
throw e;
}
}
use of io.prestosql.tests.DistributedQueryRunner in project hetu-core by openlookeng.
the class ElasticsearchQueryRunner method main.
public static void main(String[] args) throws Exception {
Logging.initialize();
DistributedQueryRunner queryRunner = createElasticsearchQueryRunner(EmbeddedElasticsearchNode.createEmbeddedElasticsearchNode(), TpchTable.getTables());
Thread.sleep(10);
Logger log = Logger.get(ElasticsearchQueryRunner.class);
log.info("======== SERVER STARTED ========");
log.info("\n====\n%s\n====", queryRunner.getCoordinator().getBaseUrl());
}
use of io.prestosql.tests.DistributedQueryRunner in project hetu-core by openlookeng.
the class HiveQueryRunner method createQueryRunner.
public static DistributedQueryRunner createQueryRunner(Iterable<TpchTable<?>> tables, Map<String, String> extraProperties, String security, Map<String, String> extraHiveProperties, Optional<Path> baseDataDir, boolean hasStateStore, String jdbcUrl) throws Exception {
assertEquals(DateTimeZone.getDefault(), TIME_ZONE, "Timezone not configured correctly. Add -Duser.timezone=America/Bahia_Banderas to your JVM arguments");
setupLogging();
DistributedQueryRunner queryRunner = null;
if (hasStateStore) {
queryRunner = DistributedQueryRunner.builder(createSession(Optional.of(new SelectedRole(ROLE, Optional.of("admin"))))).setNodeCount(4).setExtraProperties(extraProperties).setBaseDataDir(baseDataDir).buildWithStateStore();
} else {
Map<String, String> configProperties = new HashMap<>();
configProperties.put("auto-vacuum.enabled", "true");
configProperties.put("optimizer.cte-reuse-enabled", "true");
configProperties.put("auto-vacuum.scan.interval", "15s");
configProperties.put("hetu.split-cache-map.enabled", "true");
queryRunner = DistributedQueryRunner.builder(createSession(Optional.of(new SelectedRole(ROLE, Optional.of("admin"))))).setNodeCount(4).setCoordinatorProperties(configProperties).setExtraProperties(extraProperties).setBaseDataDir(baseDataDir).build();
}
try {
if (jdbcUrl != null && !jdbcUrl.isEmpty()) {
File directory = new File("");
String courseFile = directory.getCanonicalPath();
System.setProperty("config", courseFile + "/etc/");
String configDir = System.getProperty("config");
String hetumetastoreConfig = configDir + "hetu-metastore.properties";
File file = new File(configDir);
if (!file.exists()) {
file.mkdirs();
}
File file2 = new File(configDir, "hetu-metastore.properties");
if (!file2.exists()) {
try {
file2.createNewFile();
} catch (IOException e) {
log.info(e.getMessage());
}
}
try (BufferedWriter bufferedWriter = new BufferedWriter(new FileWriter(hetumetastoreConfig))) {
bufferedWriter.write("hetu.metastore.db.url = " + jdbcUrl);
bufferedWriter.write("\n");
bufferedWriter.write("hetu.metastore.type = jdbc\n");
bufferedWriter.write("hetu.metastore.db.user = user\n");
bufferedWriter.write("hetu.metastore.db.password = testpass\n");
bufferedWriter.write("hetu.metastore.cache.ttl = 0s");
}
queryRunner.installPlugin(new HetuMetastorePlugin());
queryRunner.getCoordinator().loadMetastore();
queryRunner.installPlugin(new StarTreePlugin());
}
queryRunner.installPlugin(new TpchPlugin());
queryRunner.createCatalog("tpch", "tpch");
File baseDir = queryRunner.getCoordinator().getBaseDataDir().resolve("hive_data").toFile();
HiveConfig hiveConfig = new HiveConfig();
HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hiveConfig), ImmutableSet.of());
HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hiveConfig, new NoHdfsAuthentication());
FileHiveMetastore metastore = new FileHiveMetastore(hdfsEnvironment, baseDir.toURI().toString(), "test");
queryRunner.installPlugin(new HivePlugin(HIVE_CATALOG, Optional.of(metastore)));
Map<String, String> hiveProperties = ImmutableMap.<String, String>builder().putAll(extraHiveProperties).put("hive.rcfile.time-zone", TIME_ZONE.getID()).put("hive.parquet.time-zone", TIME_ZONE.getID()).put("hive.security", security).put("hive.max-partitions-per-scan", "1000").put("hive.assume-canonical-partition-keys", "true").build();
Map<String, String> hiveBucketedProperties = ImmutableMap.<String, String>builder().putAll(hiveProperties).put("hive.max-initial-split-size", // so that each bucket has multiple splits
"10kB").put("hive.max-split-size", // so that each bucket has multiple splits
"10kB").put("hive.storage-format", // so that there's no minimum split size for the file
"TEXTFILE").put("hive.compression-codec", // so that the file is splittable
"NONE").build();
Map<String, String> hiveAutoVacuumProperties = ImmutableMap.<String, String>builder().putAll(hiveProperties).put("hive.auto-vacuum-enabled", "true").put("hive.vacuum-collector-interval", "15s").build();
queryRunner.createCatalog(HIVE_AUTO_VACUUM_CATALOG, HIVE_CATALOG, hiveAutoVacuumProperties);
queryRunner.createCatalog(HIVE_CATALOG, HIVE_CATALOG, hiveProperties);
queryRunner.createCatalog(HIVE_BUCKETED_CATALOG, HIVE_CATALOG, hiveBucketedProperties);
HiveIdentity identity = new HiveIdentity(SESSION);
if (!metastore.getDatabase(TPCH_SCHEMA).isPresent()) {
metastore.createDatabase(identity, createDatabaseMetastoreObject(TPCH_SCHEMA));
copyTpchTables(queryRunner, "tpch", TINY_SCHEMA_NAME, createSession(Optional.empty()), tables);
}
if (!metastore.getDatabase(TPCH_BUCKETED_SCHEMA).isPresent()) {
metastore.createDatabase(identity, createDatabaseMetastoreObject(TPCH_BUCKETED_SCHEMA));
copyTpchTablesBucketed(queryRunner, "tpch", TINY_SCHEMA_NAME, createBucketedSession(Optional.empty()), tables);
}
return queryRunner;
} catch (Exception e) {
queryRunner.close();
throw e;
}
}
use of io.prestosql.tests.DistributedQueryRunner in project hetu-core by openlookeng.
the class TestHiveDistributedJoinQueriesWithDynamicFiltering method testJoinWithEmptyBuildSide.
@Test
public void testJoinWithEmptyBuildSide() {
Session session = Session.builder(getSession()).setSystemProperty(JOIN_DISTRIBUTION_TYPE, FeaturesConfig.JoinDistributionType.BROADCAST.name()).build();
DistributedQueryRunner runner = (DistributedQueryRunner) getQueryRunner();
ResultWithQueryId<MaterializedResult> result = runner.executeWithQueryId(session, "SELECT * FROM lineitem JOIN orders ON lineitem.orderkey = orders.orderkey AND orders.totalprice = 123.4567");
assertEquals(result.getResult().getRowCount(), 0);
OperatorStats probeStats = searchScanFilterAndProjectOperatorStats(result.getQueryId(), "tpch:lineitem");
// Probe-side is not scanned at all, due to dynamic filtering:
assertEquals(probeStats.getInputPositions(), 0L);
}
Aggregations