use of io.hetu.core.metastore.HetuMetastorePlugin in project hetu-core by openlookeng.
the class HiveQueryRunner method createQueryRunner.
public static DistributedQueryRunner createQueryRunner(Iterable<TpchTable<?>> tables, Map<String, String> extraProperties, String security, Map<String, String> extraHiveProperties, Optional<Path> baseDataDir, boolean hasStateStore, String jdbcUrl) throws Exception {
assertEquals(DateTimeZone.getDefault(), TIME_ZONE, "Timezone not configured correctly. Add -Duser.timezone=America/Bahia_Banderas to your JVM arguments");
setupLogging();
DistributedQueryRunner queryRunner = null;
if (hasStateStore) {
queryRunner = DistributedQueryRunner.builder(createSession(Optional.of(new SelectedRole(ROLE, Optional.of("admin"))))).setNodeCount(4).setExtraProperties(extraProperties).setBaseDataDir(baseDataDir).buildWithStateStore();
} else {
Map<String, String> configProperties = new HashMap<>();
configProperties.put("auto-vacuum.enabled", "true");
configProperties.put("optimizer.cte-reuse-enabled", "true");
configProperties.put("auto-vacuum.scan.interval", "15s");
configProperties.put("hetu.split-cache-map.enabled", "true");
queryRunner = DistributedQueryRunner.builder(createSession(Optional.of(new SelectedRole(ROLE, Optional.of("admin"))))).setNodeCount(4).setCoordinatorProperties(configProperties).setExtraProperties(extraProperties).setBaseDataDir(baseDataDir).build();
}
try {
if (jdbcUrl != null && !jdbcUrl.isEmpty()) {
File directory = new File("");
String courseFile = directory.getCanonicalPath();
System.setProperty("config", courseFile + "/etc/");
String configDir = System.getProperty("config");
String hetumetastoreConfig = configDir + "hetu-metastore.properties";
File file = new File(configDir);
if (!file.exists()) {
file.mkdirs();
}
File file2 = new File(configDir, "hetu-metastore.properties");
if (!file2.exists()) {
try {
file2.createNewFile();
} catch (IOException e) {
log.info(e.getMessage());
}
}
try (BufferedWriter bufferedWriter = new BufferedWriter(new FileWriter(hetumetastoreConfig))) {
bufferedWriter.write("hetu.metastore.db.url = " + jdbcUrl);
bufferedWriter.write("\n");
bufferedWriter.write("hetu.metastore.type = jdbc\n");
bufferedWriter.write("hetu.metastore.db.user = user\n");
bufferedWriter.write("hetu.metastore.db.password = testpass\n");
bufferedWriter.write("hetu.metastore.cache.ttl = 0s");
}
queryRunner.installPlugin(new HetuMetastorePlugin());
queryRunner.getCoordinator().loadMetastore();
queryRunner.installPlugin(new StarTreePlugin());
}
queryRunner.installPlugin(new TpchPlugin());
queryRunner.createCatalog("tpch", "tpch");
File baseDir = queryRunner.getCoordinator().getBaseDataDir().resolve("hive_data").toFile();
HiveConfig hiveConfig = new HiveConfig();
HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hiveConfig), ImmutableSet.of());
HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hiveConfig, new NoHdfsAuthentication());
FileHiveMetastore metastore = new FileHiveMetastore(hdfsEnvironment, baseDir.toURI().toString(), "test");
queryRunner.installPlugin(new HivePlugin(HIVE_CATALOG, Optional.of(metastore)));
Map<String, String> hiveProperties = ImmutableMap.<String, String>builder().putAll(extraHiveProperties).put("hive.rcfile.time-zone", TIME_ZONE.getID()).put("hive.parquet.time-zone", TIME_ZONE.getID()).put("hive.security", security).put("hive.max-partitions-per-scan", "1000").put("hive.assume-canonical-partition-keys", "true").build();
Map<String, String> hiveBucketedProperties = ImmutableMap.<String, String>builder().putAll(hiveProperties).put("hive.max-initial-split-size", // so that each bucket has multiple splits
"10kB").put("hive.max-split-size", // so that each bucket has multiple splits
"10kB").put("hive.storage-format", // so that there's no minimum split size for the file
"TEXTFILE").put("hive.compression-codec", // so that the file is splittable
"NONE").build();
Map<String, String> hiveAutoVacuumProperties = ImmutableMap.<String, String>builder().putAll(hiveProperties).put("hive.auto-vacuum-enabled", "true").put("hive.vacuum-collector-interval", "15s").build();
queryRunner.createCatalog(HIVE_AUTO_VACUUM_CATALOG, HIVE_CATALOG, hiveAutoVacuumProperties);
queryRunner.createCatalog(HIVE_CATALOG, HIVE_CATALOG, hiveProperties);
queryRunner.createCatalog(HIVE_BUCKETED_CATALOG, HIVE_CATALOG, hiveBucketedProperties);
HiveIdentity identity = new HiveIdentity(SESSION);
if (!metastore.getDatabase(TPCH_SCHEMA).isPresent()) {
metastore.createDatabase(identity, createDatabaseMetastoreObject(TPCH_SCHEMA));
copyTpchTables(queryRunner, "tpch", TINY_SCHEMA_NAME, createSession(Optional.empty()), tables);
}
if (!metastore.getDatabase(TPCH_BUCKETED_SCHEMA).isPresent()) {
metastore.createDatabase(identity, createDatabaseMetastoreObject(TPCH_BUCKETED_SCHEMA));
copyTpchTablesBucketed(queryRunner, "tpch", TINY_SCHEMA_NAME, createBucketedSession(Optional.empty()), tables);
}
return queryRunner;
} catch (Exception e) {
queryRunner.close();
throw e;
}
}
use of io.hetu.core.metastore.HetuMetastorePlugin in project hetu-core by openlookeng.
the class HindexQueryRunner method createQueryRunner.
public static DistributedQueryRunner createQueryRunner(Map<String, String> extraProperties, Map<String, String> metastoreProperties, Map<String, String> coordinatorProperties) throws Exception {
Session session = testSessionBuilder().setSource("test").setCatalog("hive").setSchema("test").build();
DistributedQueryRunner queryRunner = DistributedQueryRunner.builder(session).setNodeCount(1).setExtraProperties(extraProperties).setCoordinatorProperties(coordinatorProperties).build();
try {
File tempDir = Files.createTempDirectory("test-hive").toFile();
File hiveDir = new File(tempDir, "hive_data");
HiveMetastore metastore = createTestingFileHiveMetastore(hiveDir);
HiveIdentity identity = new HiveIdentity(SESSION);
metastore.createDatabase(identity, Database.builder().setDatabaseName("test").setOwnerName("public").setOwnerType(PrincipalType.ROLE).build());
queryRunner.installPlugin(new HetuFileSystemClientPlugin());
queryRunner.installPlugin(new HetuMetastorePlugin());
queryRunner.installPlugin(new HiveHadoop2Plugin());
queryRunner.installPlugin(new HeuristicIndexPlugin());
queryRunner.installPlugin(new HivePlugin("Hive", Optional.of(metastore)));
queryRunner.getServers().forEach(server -> {
try {
server.loadMetastore(metastoreProperties);
server.getHeuristicIndexerManager().buildIndexClient();
server.getHeuristicIndexerManager().initCache();
} catch (Exception e) {
throw new RuntimeException(e);
}
});
Map<String, String> hiveProperties = ImmutableMap.<String, String>builder().put("hive.allow-drop-table", "true").build();
queryRunner.createCatalog("hive", "Hive", hiveProperties);
return queryRunner;
} catch (Exception e) {
queryRunner.close();
throw e;
}
}
use of io.hetu.core.metastore.HetuMetastorePlugin in project hetu-core by openlookeng.
the class TestSpatialJoinPlanning method createQueryRunner.
private static LocalQueryRunner createQueryRunner() throws IOException {
LocalQueryRunner queryRunner = new LocalQueryRunner(testSessionBuilder().setCatalog("memory").setSchema("default").build());
queryRunner.installPlugin(new HetuFileSystemClientPlugin());
queryRunner.installPlugin(new HetuMetastorePlugin());
queryRunner.installPlugin(new GeoPlugin());
queryRunner.createCatalog("tpch", new TpchConnectorFactory(1), ImmutableMap.of());
TempFolder folder = new TempFolder().create();
Runtime.getRuntime().addShutdownHook(new Thread(folder::close));
HashMap<String, String> metastoreConfig = new HashMap<>();
metastoreConfig.put("hetu.metastore.type", "hetufilesystem");
metastoreConfig.put("hetu.metastore.hetufilesystem.profile-name", "default");
metastoreConfig.put("hetu.metastore.hetufilesystem.path", folder.newFolder("metastore").getAbsolutePath());
metastoreConfig.put("hetu.metastore.cache.type", "local");
queryRunner.loadMetastore(metastoreConfig);
queryRunner.createCatalog("memory", new MemoryConnectorFactory(), ImmutableMap.of("memory.spill-path", folder.newFolder("memory-connector").getAbsolutePath()));
queryRunner.execute(format("CREATE TABLE kdb_tree AS SELECT '%s' AS v", KDB_TREE_JSON));
queryRunner.execute("CREATE TABLE points (lng, lat, name) AS (VALUES (2.1e0, 2.1e0, 'x'))");
queryRunner.execute("CREATE TABLE polygons (wkt, name) AS (VALUES ('POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))', 'a'))");
return queryRunner;
}
use of io.hetu.core.metastore.HetuMetastorePlugin in project boostkit-bigdata by kunpengcompute.
the class HiveQueryRunner method createQueryRunner.
public static DistributedQueryRunner createQueryRunner(Iterable<TpchTable<?>> tables, Map<String, String> extraProperties, String security, Map<String, String> extraHiveProperties, Optional<Path> baseDataDir, boolean hasStateStore, String jdbcUrl) throws Exception {
assertEquals(DateTimeZone.getDefault(), TIME_ZONE, "Timezone not configured correctly. Add -Duser.timezone=America/Bahia_Banderas to your JVM arguments");
setupLogging();
DistributedQueryRunner queryRunner = null;
if (hasStateStore) {
queryRunner = DistributedQueryRunner.builder(createSession(Optional.of(new SelectedRole(ROLE, Optional.of("admin"))))).setNodeCount(4).setExtraProperties(extraProperties).setBaseDataDir(baseDataDir).buildWithStateStore();
} else {
Map<String, String> configProperties = new HashMap<>();
configProperties.put("auto-vacuum.enabled", "true");
configProperties.put("optimizer.cte-reuse-enabled", "true");
configProperties.put("auto-vacuum.scan.interval", "15s");
configProperties.put("hetu.split-cache-map.enabled", "true");
queryRunner = DistributedQueryRunner.builder(createSession(Optional.of(new SelectedRole(ROLE, Optional.of("admin"))))).setNodeCount(4).setCoordinatorProperties(configProperties).setExtraProperties(extraProperties).setBaseDataDir(baseDataDir).build();
}
try {
if (jdbcUrl != null && !jdbcUrl.isEmpty()) {
File directory = new File("");
String courseFile = directory.getCanonicalPath();
System.setProperty("config", courseFile + "/etc/");
String configDir = System.getProperty("config");
String hetumetastoreConfig = configDir + "hetu-metastore.properties";
File file = new File(configDir);
if (!file.exists()) {
file.mkdirs();
}
File file2 = new File(configDir, "hetu-metastore.properties");
if (!file2.exists()) {
try {
file2.createNewFile();
} catch (IOException e) {
log.info(e.getMessage());
}
}
try (BufferedWriter bufferedWriter = new BufferedWriter(new FileWriter(hetumetastoreConfig))) {
bufferedWriter.write("hetu.metastore.db.url = " + jdbcUrl);
bufferedWriter.write("\n");
bufferedWriter.write("hetu.metastore.type = jdbc\n");
bufferedWriter.write("hetu.metastore.db.user = user\n");
bufferedWriter.write("hetu.metastore.db.password = testpass\n");
bufferedWriter.write("hetu.metastore.cache.ttl = 0s");
}
queryRunner.installPlugin(new HetuMetastorePlugin());
queryRunner.getCoordinator().loadMetastore();
queryRunner.installPlugin(new StarTreePlugin());
}
queryRunner.installPlugin(new TpchPlugin());
queryRunner.createCatalog("tpch", "tpch");
File baseDir = queryRunner.getCoordinator().getBaseDataDir().resolve("hive_data").toFile();
HiveConfig hiveConfig = new HiveConfig();
HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hiveConfig), ImmutableSet.of());
HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hiveConfig, new NoHdfsAuthentication());
FileHiveMetastore metastore = new FileHiveMetastore(hdfsEnvironment, baseDir.toURI().toString(), "test");
queryRunner.installPlugin(new HivePlugin(HIVE_CATALOG, Optional.of(metastore)));
Map<String, String> hiveProperties = ImmutableMap.<String, String>builder().putAll(extraHiveProperties).put("hive.rcfile.time-zone", TIME_ZONE.getID()).put("hive.parquet.time-zone", TIME_ZONE.getID()).put("hive.security", security).put("hive.max-partitions-per-scan", "1000").put("hive.assume-canonical-partition-keys", "true").put("hive.omnidata-enabled", "false").build();
Map<String, String> hiveBucketedProperties = ImmutableMap.<String, String>builder().putAll(hiveProperties).put("hive.max-initial-split-size", // so that each bucket has multiple splits
"10kB").put("hive.max-split-size", // so that each bucket has multiple splits
"10kB").put("hive.storage-format", // so that there's no minimum split size for the file
"TEXTFILE").put("hive.compression-codec", // so that the file is splittable
"NONE").build();
Map<String, String> hiveAutoVacuumProperties = ImmutableMap.<String, String>builder().putAll(hiveProperties).put("hive.auto-vacuum-enabled", "true").put("hive.vacuum-collector-interval", "15s").build();
queryRunner.createCatalog(HIVE_AUTO_VACUUM_CATALOG, HIVE_CATALOG, hiveAutoVacuumProperties);
queryRunner.createCatalog(HIVE_CATALOG, HIVE_CATALOG, hiveProperties);
queryRunner.createCatalog(HIVE_BUCKETED_CATALOG, HIVE_CATALOG, hiveBucketedProperties);
HiveIdentity identity = new HiveIdentity(SESSION);
if (!metastore.getDatabase(TPCH_SCHEMA).isPresent()) {
metastore.createDatabase(identity, createDatabaseMetastoreObject(TPCH_SCHEMA));
copyTpchTables(queryRunner, "tpch", TINY_SCHEMA_NAME, createSession(Optional.empty()), tables);
}
if (!metastore.getDatabase(TPCH_BUCKETED_SCHEMA).isPresent()) {
metastore.createDatabase(identity, createDatabaseMetastoreObject(TPCH_BUCKETED_SCHEMA));
copyTpchTablesBucketed(queryRunner, "tpch", TINY_SCHEMA_NAME, createBucketedSession(Optional.empty()), tables);
}
return queryRunner;
} catch (Exception e) {
queryRunner.close();
throw e;
}
}
use of io.hetu.core.metastore.HetuMetastorePlugin in project hetu-core by openlookeng.
the class TestHetuConnection method setupServer.
@BeforeClass
public void setupServer() throws Exception {
Logging.initialize();
TempFolder folder = new TempFolder().create();
Runtime.getRuntime().addShutdownHook(new Thread(folder::close));
HashMap<String, String> metastoreConfig = new HashMap<>();
metastoreConfig.put("hetu.metastore.type", "hetufilesystem");
metastoreConfig.put("hetu.metastore.hetufilesystem.profile-name", "default");
metastoreConfig.put("hetu.metastore.hetufilesystem.path", folder.newFolder("metastore").getAbsolutePath());
server = new TestingPrestoServer();
server.installPlugin(new HetuFileSystemClientPlugin());
server.installPlugin(new HetuMetastorePlugin());
server.installPlugin(new MemoryPlugin());
server.loadMetastore(metastoreConfig);
server.createCatalog("memory", "memory", ImmutableMap.of("memory.spill-path", folder.newFolder("memory-connector").getAbsolutePath()));
try (Connection connection = createConnection();
Statement statement = connection.createStatement()) {
statement.execute("CREATE SCHEMA testschema");
}
}
Aggregations