use of io.trino.plugin.tpch.TpchPlugin in project trino by trinodb.
the class TestIcebergOrcMetricsCollection method createQueryRunner.
@Override
protected QueryRunner createQueryRunner() throws Exception {
Session session = testSessionBuilder().setCatalog("iceberg").setSchema("test_schema").setSystemProperty(TASK_CONCURRENCY, "1").setSystemProperty(TASK_WRITER_COUNT, "1").setSystemProperty(MAX_DRIVERS_PER_TASK, "1").setCatalogSessionProperty("iceberg", "orc_string_statistics_limit", Integer.MAX_VALUE + "B").build();
DistributedQueryRunner queryRunner = DistributedQueryRunner.builder(session).setNodeCount(1).build();
File baseDir = queryRunner.getCoordinator().getBaseDataDir().resolve("iceberg_data").toFile();
HdfsConfig hdfsConfig = new HdfsConfig();
HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hdfsConfig), ImmutableSet.of());
HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hdfsConfig, new NoHdfsAuthentication());
HiveMetastore metastore = new FileHiveMetastore(new NodeVersion("test_version"), hdfsEnvironment, new MetastoreConfig(), new FileHiveMetastoreConfig().setCatalogDirectory(baseDir.toURI().toString()).setMetastoreUser("test"));
tableOperationsProvider = new FileMetastoreTableOperationsProvider(new HdfsFileIoProvider(hdfsEnvironment));
trinoCatalog = new TrinoHiveCatalog(new CatalogName("catalog"), memoizeMetastore(metastore, 1000), hdfsEnvironment, new TestingTypeManager(), tableOperationsProvider, "trino-version", false, false, false);
queryRunner.installPlugin(new TestingIcebergPlugin(Optional.of(metastore), Optional.empty(), EMPTY_MODULE));
queryRunner.createCatalog("iceberg", "iceberg");
queryRunner.installPlugin(new TpchPlugin());
queryRunner.createCatalog("tpch", "tpch");
queryRunner.execute("CREATE SCHEMA test_schema");
return queryRunner;
}
use of io.trino.plugin.tpch.TpchPlugin in project trino by trinodb.
the class DruidQueryRunner method createDruidQueryRunnerTpch.
public static DistributedQueryRunner createDruidQueryRunnerTpch(TestingDruidServer testingDruidServer, Map<String, String> extraProperties, Iterable<TpchTable<?>> tables) throws Exception {
DistributedQueryRunner queryRunner = null;
try {
queryRunner = DistributedQueryRunner.builder(createSession()).setExtraProperties(extraProperties).build();
queryRunner.installPlugin(new TpchPlugin());
queryRunner.createCatalog("tpch", "tpch");
Map<String, String> connectorProperties = new HashMap<>();
connectorProperties.putIfAbsent("connection-url", testingDruidServer.getJdbcUrl());
queryRunner.installPlugin(new DruidJdbcPlugin());
queryRunner.createCatalog("druid", "druid", connectorProperties);
log.info("Loading data from druid.%s...", SCHEMA);
long startTime = System.nanoTime();
for (TpchTable<?> table : tables) {
long start = System.nanoTime();
log.info("Running import for %s", table.getTableName());
MaterializedResult rows = queryRunner.execute(DruidTpchTables.getSelectQuery(table.getTableName()));
copyAndIngestTpchData(rows, testingDruidServer, table.getTableName());
log.info("Imported %s rows for %s in %s", rows.getRowCount(), table.getTableName(), nanosSince(start).convertToMostSuccinctTimeUnit());
}
log.info("Loading from druid.%s complete in %s", SCHEMA, nanosSince(startTime).toString(SECONDS));
return queryRunner;
} catch (Throwable e) {
closeAllSuppress(e, queryRunner);
throw e;
}
}
use of io.trino.plugin.tpch.TpchPlugin in project trino by trinodb.
the class ElasticsearchQueryRunner method createElasticsearchQueryRunner.
public static DistributedQueryRunner createElasticsearchQueryRunner(HostAndPort address, Iterable<TpchTable<?>> tables, Map<String, String> extraProperties, Map<String, String> extraConnectorProperties, int nodeCount) throws Exception {
RestHighLevelClient client = null;
DistributedQueryRunner queryRunner = null;
try {
queryRunner = DistributedQueryRunner.builder(createSession()).setExtraProperties(extraProperties).setNodeCount(nodeCount).build();
queryRunner.installPlugin(new JmxPlugin());
queryRunner.createCatalog("jmx", "jmx");
queryRunner.installPlugin(new TpchPlugin());
queryRunner.createCatalog("tpch", "tpch");
ElasticsearchConnectorFactory testFactory = new ElasticsearchConnectorFactory();
installElasticsearchPlugin(address, queryRunner, testFactory, extraConnectorProperties);
TestingTrinoClient trinoClient = queryRunner.getClient();
LOG.info("Loading data...");
client = new RestHighLevelClient(RestClient.builder(HttpHost.create(address.toString())));
long startTime = System.nanoTime();
for (TpchTable<?> table : tables) {
loadTpchTopic(client, trinoClient, table);
}
LOG.info("Loading complete in %s", nanosSince(startTime).toString(SECONDS));
return queryRunner;
} catch (Exception e) {
closeAllSuppress(e, queryRunner, client);
throw e;
}
}
use of io.trino.plugin.tpch.TpchPlugin in project trino by trinodb.
the class TestTrinoDatabaseMetaData method setupServer.
@BeforeClass
public void setupServer() throws Exception {
Logging.initialize();
server = TestingTrinoServer.create();
server.installPlugin(new TpchPlugin());
server.createCatalog(TEST_CATALOG, "tpch");
server.installPlugin(new BlackHolePlugin());
server.createCatalog("blackhole", "blackhole");
server.installPlugin(new HivePlugin());
server.createCatalog("hive", "hive", ImmutableMap.<String, String>builder().put("hive.metastore", "file").put("hive.metastore.catalog.dir", server.getBaseDataDir().resolve("hive").toAbsolutePath().toString()).put("hive.security", "sql-standard").buildOrThrow());
countingMockConnector = new CountingMockConnector();
server.installPlugin(countingMockConnector.getPlugin());
server.createCatalog(COUNTING_CATALOG, "mock", ImmutableMap.of());
server.waitForNodeRefresh(Duration.ofSeconds(10));
try (Connection connection = createConnection();
Statement statement = connection.createStatement()) {
statement.executeUpdate("CREATE SCHEMA blackhole.blackhole");
}
try (Connection connection = createConnection()) {
connection.setCatalog("hive");
try (Statement statement = connection.createStatement()) {
statement.execute("SET ROLE admin IN hive");
statement.execute("CREATE SCHEMA default");
statement.execute("CREATE TABLE default.test_table (a varchar)");
statement.execute("CREATE VIEW default.test_view AS SELECT * FROM hive.default.test_table");
}
}
}
use of io.trino.plugin.tpch.TpchPlugin in project trino by trinodb.
the class AccumuloQueryRunner method createAccumuloQueryRunner.
public static synchronized DistributedQueryRunner createAccumuloQueryRunner(Map<String, String> extraProperties) throws Exception {
DistributedQueryRunner queryRunner = DistributedQueryRunner.builder(createSession()).setExtraProperties(extraProperties).build();
queryRunner.installPlugin(new TpchPlugin());
queryRunner.createCatalog("tpch", "tpch");
TestingAccumuloServer server = TestingAccumuloServer.getInstance();
queryRunner.installPlugin(new AccumuloPlugin());
Map<String, String> accumuloProperties = ImmutableMap.<String, String>builder().put(AccumuloConfig.INSTANCE, server.getInstanceName()).put(AccumuloConfig.ZOOKEEPERS, server.getZooKeepers()).put(AccumuloConfig.USERNAME, server.getUser()).put(AccumuloConfig.PASSWORD, server.getPassword()).put(AccumuloConfig.ZOOKEEPER_METADATA_ROOT, "/presto-accumulo-test").buildOrThrow();
queryRunner.createCatalog("accumulo", "accumulo", accumuloProperties);
if (!tpchLoaded) {
copyTpchTables(queryRunner, "tpch", TINY_SCHEMA_NAME, createSession(), TpchTable.getTables());
server.getConnector().tableOperations().addSplits("tpch.orders", ImmutableSortedSet.of(new Text(new LexicoderRowSerializer().encode(BIGINT, 7500L))));
tpchLoaded = true;
}
return queryRunner;
}
Aggregations