use of com.facebook.presto.tpch.TpchConnectorFactory in project presto by prestodb.
the class TestMLQueries method createQueryRunner.
@Override
protected QueryRunner createQueryRunner() {
Session defaultSession = testSessionBuilder().setCatalog("local").setSchema(TINY_SCHEMA_NAME).build();
LocalQueryRunner localQueryRunner = new LocalQueryRunner(defaultSession);
// add the tpch catalog
// local queries run directly against the generator
localQueryRunner.createCatalog(defaultSession.getCatalog().get(), new TpchConnectorFactory(1), ImmutableMap.of());
MLPlugin plugin = new MLPlugin();
for (Type type : plugin.getTypes()) {
localQueryRunner.getFunctionAndTypeManager().addType(type);
}
for (ParametricType parametricType : plugin.getParametricTypes()) {
localQueryRunner.getFunctionAndTypeManager().addParametricType(parametricType);
}
localQueryRunner.getMetadata().registerBuiltInFunctions(extractFunctions(new MLPlugin().getFunctions()));
return localQueryRunner;
}
use of com.facebook.presto.tpch.TpchConnectorFactory in project presto by prestodb.
the class TestPrestoSparkLauncherIntegrationSmokeTest method setUp.
@BeforeClass
public void setUp() throws Exception {
assertEquals(DateTimeZone.getDefault(), TIME_ZONE, "Timezone not configured correctly. Add -Duser.timezone=America/Bahia_Banderas to your JVM arguments");
// the default temporary directory location on MacOS is not sharable to docker
tempDir = new File("/tmp", randomUUID().toString());
createDirectories(tempDir.toPath());
sparkWorkDirectory = new File(tempDir, "work");
createDirectories(sparkWorkDirectory.toPath());
File composeYaml = extractResource("docker-compose.yml", tempDir);
dockerCompose = new DockerCompose(composeYaml);
dockerCompose.verifyInstallation();
dockerCompose.pull();
composeProcess = dockerCompose.up(ImmutableMap.of("spark-master", 1, "spark-worker", 2, "hadoop-master", 1));
Session session = testSessionBuilder().setCatalog("hive").setSchema("default").build();
localQueryRunner = new LocalQueryRunner(session);
HiveHadoop2Plugin plugin = new HiveHadoop2Plugin();
ConnectorFactory hiveConnectorFactory = getOnlyElement(plugin.getConnectorFactories());
addStaticResolution("hadoop-master", "127.0.0.1");
String hadoopMasterAddress = dockerCompose.getContainerAddress("hadoop-master");
// datanode is accessed via the internal docker IP address that is not accessible from the host
addStaticResolution(hadoopMasterAddress, "127.0.0.1");
localQueryRunner.createCatalog("hive", hiveConnectorFactory, ImmutableMap.of("hive.metastore.uri", "thrift://127.0.0.1:9083", "hive.time-zone", TIME_ZONE.getID(), "hive.experimental-optimized-partition-update-serialization-enabled", "true"));
localQueryRunner.createCatalog("tpch", new TpchConnectorFactory(), ImmutableMap.of());
// it may take some time for the docker container to start
ensureHiveIsRunning(localQueryRunner, new Duration(10, MINUTES));
importTables(localQueryRunner, "lineitem", "orders");
importTablesBucketed(localQueryRunner, ImmutableList.of("orderkey"), "lineitem", "orders");
File projectRoot = resolveProjectRoot();
prestoLauncher = resolveFile(new File(projectRoot, "presto-spark-launcher/target"), Pattern.compile("presto-spark-launcher-[\\d\\.]+(-SNAPSHOT)?\\.jar"));
logPackageInfo(prestoLauncher);
prestoPackage = resolveFile(new File(projectRoot, "presto-spark-package/target"), Pattern.compile("presto-spark-package-.+\\.tar\\.gz"));
logPackageInfo(prestoPackage);
configProperties = new File(tempDir, "config.properties");
storeProperties(configProperties, ImmutableMap.of("query.hash-partition-count", "10"));
catalogDirectory = new File(tempDir, "catalogs");
createDirectories(catalogDirectory.toPath());
storeProperties(new File(catalogDirectory, "hive.properties"), ImmutableMap.of("connector.name", "hive-hadoop2", "hive.metastore.uri", "thrift://hadoop-master:9083", // the getnetgrent dependency is missing
"hive.dfs.require-hadoop-native", "false", "hive.time-zone", TIME_ZONE.getID()));
storeProperties(new File(catalogDirectory, "tpch.properties"), ImmutableMap.of("connector.name", "tpch", "tpch.splits-per-node", "4", "tpch.partitioning-enabled", "false"));
}
use of com.facebook.presto.tpch.TpchConnectorFactory in project presto by prestodb.
the class HiveBenchmarkQueryRunner method createLocalQueryRunner.
public static LocalQueryRunner createLocalQueryRunner(File tempDir) {
Session session = testSessionBuilder().setCatalog("hive").setSchema("tpch").build();
LocalQueryRunner localQueryRunner = new LocalQueryRunner(session);
// add tpch
localQueryRunner.createCatalog("tpch", new TpchConnectorFactory(1), ImmutableMap.of());
// add hive
File hiveDir = new File(tempDir, "hive_data");
ExtendedHiveMetastore metastore = createTestingFileHiveMetastore(hiveDir);
metastore.createDatabase(METASTORE_CONTEXT, Database.builder().setDatabaseName("tpch").setOwnerName("public").setOwnerType(PrincipalType.ROLE).build());
HiveConnectorFactory hiveConnectorFactory = new HiveConnectorFactory("hive", HiveBenchmarkQueryRunner.class.getClassLoader(), Optional.of(metastore));
Map<String, String> hiveCatalogConfig = ImmutableMap.<String, String>builder().put("hive.max-split-size", "10GB").build();
localQueryRunner.createCatalog("hive", hiveConnectorFactory, hiveCatalogConfig);
localQueryRunner.execute("CREATE TABLE orders AS SELECT * FROM tpch.sf1.orders");
localQueryRunner.execute("CREATE TABLE lineitem AS SELECT * FROM tpch.sf1.lineitem");
return localQueryRunner;
}
use of com.facebook.presto.tpch.TpchConnectorFactory in project presto by prestodb.
the class TestMemoryPools method setUp.
private void setUp(Supplier<List<Driver>> driversSupplier) {
checkState(localQueryRunner == null, "Already set up");
Session session = testSessionBuilder().setCatalog("tpch").setSchema("tiny").setSystemProperty("task_default_concurrency", "1").build();
localQueryRunner = queryRunnerWithInitialTransaction(session);
// add tpch
localQueryRunner.createCatalog("tpch", new TpchConnectorFactory(1), ImmutableMap.of());
userPool = new MemoryPool(new MemoryPoolId("test"), TEN_MEGABYTES);
fakeQueryId = new QueryId("fake");
SpillSpaceTracker spillSpaceTracker = new SpillSpaceTracker(new DataSize(1, GIGABYTE));
QueryContext queryContext = new QueryContext(new QueryId("query"), TEN_MEGABYTES, new DataSize(20, MEGABYTE), TEN_MEGABYTES, new DataSize(1, GIGABYTE), userPool, new TestingGcMonitor(), localQueryRunner.getExecutor(), localQueryRunner.getScheduler(), TEN_MEGABYTES, spillSpaceTracker, listJsonCodec(TaskMemoryReservationSummary.class));
taskContext = createTaskContext(queryContext, localQueryRunner.getExecutor(), session);
drivers = driversSupplier.get();
}
use of com.facebook.presto.tpch.TpchConnectorFactory in project presto by prestodb.
the class TestIterativeOptimizer method setUp.
@BeforeClass
public void setUp() {
Session.SessionBuilder sessionBuilder = testSessionBuilder().setCatalog("local").setSchema("tiny").setSystemProperty("task_concurrency", "1").setSystemProperty("iterative_optimizer_enabled", "true").setSystemProperty("iterative_optimizer_timeout", "1ms");
queryRunner = new LocalQueryRunner(sessionBuilder.build());
queryRunner.createCatalog(queryRunner.getDefaultSession().getCatalog().get(), new TpchConnectorFactory(1), ImmutableMap.of());
}
Aggregations