use of io.hetu.core.common.filesystem.TempFolder in project hetu-core by openlookeng.
the class TestHeuristicIndexClient method testDeleteSelectedColumnsHelper.
@Test
public void testDeleteSelectedColumnsHelper() throws IOException {
String tableName = "catalog.schema.UT_test";
try (TempFolder folder = new TempFolder()) {
folder.create();
File tableFolder = new File(folder.getRoot().getPath(), tableName);
assertTrue(tableFolder.mkdir());
File columnFolder = new File(tableFolder, "test_column");
assertTrue(columnFolder.mkdirs());
File indexTypeFolder = new File(columnFolder, "BLOOM");
assertTrue(indexTypeFolder.mkdirs());
assertTrue(new File(indexTypeFolder, "testIndex.index").createNewFile());
HetuFileSystemClient fs = new HetuLocalFileSystemClient(new LocalConfig(new Properties()), folder.getRoot().toPath());
HetuMetastore testMetaStore = new HetuFsMetastore(new HetuFsMetastoreConfig().setHetuFileSystemMetastorePath(folder.getRoot().getPath()), fs);
HeuristicIndexClient client = new HeuristicIndexClient(fs, testMetaStore, folder.getRoot().toPath());
client.addIndexRecord(new CreateIndexMetadata("idx1", tableName, "BLOOM", 0L, Collections.singletonList(new Pair<>("test_column", VARCHAR)), Collections.emptyList(), new Properties(), "user", CreateIndexMetadata.Level.UNDEFINED));
client.deleteIndex("idx1", Collections.emptyList());
assertFalse(indexTypeFolder.exists());
}
}
use of io.hetu.core.common.filesystem.TempFolder in project hetu-core by openlookeng.
the class TestSpatialJoinPlanning method createQueryRunner.
private static LocalQueryRunner createQueryRunner() throws IOException {
LocalQueryRunner queryRunner = new LocalQueryRunner(testSessionBuilder().setCatalog("memory").setSchema("default").build());
queryRunner.installPlugin(new HetuFileSystemClientPlugin());
queryRunner.installPlugin(new HetuMetastorePlugin());
queryRunner.installPlugin(new GeoPlugin());
queryRunner.createCatalog("tpch", new TpchConnectorFactory(1), ImmutableMap.of());
TempFolder folder = new TempFolder().create();
Runtime.getRuntime().addShutdownHook(new Thread(folder::close));
HashMap<String, String> metastoreConfig = new HashMap<>();
metastoreConfig.put("hetu.metastore.type", "hetufilesystem");
metastoreConfig.put("hetu.metastore.hetufilesystem.profile-name", "default");
metastoreConfig.put("hetu.metastore.hetufilesystem.path", folder.newFolder("metastore").getAbsolutePath());
metastoreConfig.put("hetu.metastore.cache.type", "local");
queryRunner.loadMetastore(metastoreConfig);
queryRunner.createCatalog("memory", new MemoryConnectorFactory(), ImmutableMap.of("memory.spill-path", folder.newFolder("memory-connector").getAbsolutePath()));
queryRunner.execute(format("CREATE TABLE kdb_tree AS SELECT '%s' AS v", KDB_TREE_JSON));
queryRunner.execute("CREATE TABLE points (lng, lat, name) AS (VALUES (2.1e0, 2.1e0, 'x'))");
queryRunner.execute("CREATE TABLE polygons (wkt, name) AS (VALUES ('POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))', 'a'))");
return queryRunner;
}
use of io.hetu.core.common.filesystem.TempFolder in project hetu-core by openlookeng.
the class DockerizedHive method correctPortMapping.
/**
* Adjust the Hadoop Configuration object and hive properties object by creating two temporary mock core-site.xml
* and hdfs-site.xml that contains the correct port to the container.
*/
private void correctPortMapping() {
// We'll create two temporary core-site.xml and hdfs-site.xml that contains the correct port to the
// running docker container. Then, we'll update the hive.properties to point to these two temp files.
File coreSite;
File hdfsSite;
try {
configFolder = new TempFolder().create();
coreSite = configFolder.newFile();
hdfsSite = configFolder.newFile();
} catch (IOException e) {
throw new IllegalStateException("Failed to create an temporary folder and file", e);
}
try (InputStream coreIs = new FileInputStream(coreSitePath)) {
DocumentBuilderFactory documentBuilderFactory = DocumentBuilderFactory.newInstance();
documentBuilderFactory.setFeature("http://apache.org/xml/features/disallow-doctype-decl", true);
DocumentBuilder documentBuilder = documentBuilderFactory.newDocumentBuilder();
Document coreXml = documentBuilder.parse(coreIs);
coreXml.getDocumentElement().normalize();
NodeList localProperties = coreXml.getElementsByTagName("property");
for (int i = 0; i < localProperties.getLength(); i++) {
Node node = localProperties.item(i);
node.normalize();
if (node.getNodeType() == Node.ELEMENT_NODE) {
Element element = (Element) node;
if (element.getElementsByTagName("name").item(0).getTextContent().equals("fs.defaultFS")) {
element.getElementsByTagName("value").item(0).setTextContent(String.format("hdfs://hadoop-master:%s", hostPortProvider.getHostPort(9000)));
break;
}
}
}
Transformer transformer = TransformerFactory.newInstance().newTransformer();
Result output = new StreamResult(coreSite);
Source input = new DOMSource(coreXml);
transformer.transform(input, output);
// Since hdfs-site.xml doesn't contain any port, so we can just copy it over
Files.copy(new File(hdfsSitePath), hdfsSite);
} catch (FileNotFoundException e) {
throw new IllegalStateException(coreSitePath + " not found", e);
} catch (IOException e) {
throw new IllegalStateException("Failed to create mock core-site.xml and hdfs-site.xml", e);
} catch (ParserConfigurationException | SAXException | TransformerException e) {
throw new IllegalStateException("Failed to load " + coreSitePath + " and " + hdfsSitePath, e);
}
// Update hive.properties file with the correct port
try {
hdfsSitePath = hdfsSite.getCanonicalPath();
coreSitePath = coreSite.getCanonicalPath();
properties.setProperty(TestConstantsHelper.HIVE_CONFIG_RESOURCES, coreSitePath + "," + hdfsSitePath);
} catch (IOException e) {
throw new IllegalStateException(coreSite + " and " + hdfsSite + " weren't created successfully", e);
}
properties.setProperty(TestConstantsHelper.HIVE_METASTORE_URI, String.format("thrift://hadoop-master:%s", hostPortProvider.getHostPort(9083)));
}
use of io.hetu.core.common.filesystem.TempFolder in project hetu-core by openlookeng.
the class TestHetuConnection method setupServer.
@BeforeClass
public void setupServer() throws Exception {
Logging.initialize();
TempFolder folder = new TempFolder().create();
Runtime.getRuntime().addShutdownHook(new Thread(folder::close));
HashMap<String, String> metastoreConfig = new HashMap<>();
metastoreConfig.put("hetu.metastore.type", "hetufilesystem");
metastoreConfig.put("hetu.metastore.hetufilesystem.profile-name", "default");
metastoreConfig.put("hetu.metastore.hetufilesystem.path", folder.newFolder("metastore").getAbsolutePath());
server = new TestingPrestoServer();
server.installPlugin(new HetuFileSystemClientPlugin());
server.installPlugin(new HetuMetastorePlugin());
server.installPlugin(new MemoryPlugin());
server.loadMetastore(metastoreConfig);
server.createCatalog("memory", "memory", ImmutableMap.of("memory.spill-path", folder.newFolder("memory-connector").getAbsolutePath()));
try (Connection connection = createConnection();
Statement statement = connection.createStatement()) {
statement.execute("CREATE SCHEMA testschema");
}
}
use of io.hetu.core.common.filesystem.TempFolder in project hetu-core by openlookeng.
the class TestDynamicHiveScalarFunction method setUpClass.
@BeforeClass
public void setUpClass() throws Exception {
RecognizedFunctions.addRecognizedFunction("io.hetu.core.hive.dynamicfunctions.examples.udf.ListStringUDF", "io.hetu.core.hive.dynamicfunctions.examples.udf.TimeOutUDF", "io.hetu.core.hive.dynamicfunctions.examples.udf.BooleanUDF", "io.hetu.core.hive.dynamicfunctions.examples.udf.MapDoubleUDF", "io.hetu.core.hive.dynamicfunctions.examples.udf.DoubleUDF", "io.hetu.core.hive.dynamicfunctions.examples.udf.EvaluateOverloadUDF", "io.hetu.core.hive.dynamicfunctions.examples.udf.ShortUDF", "io.hetu.core.hive.dynamicfunctions.examples.udf.ArrayListDoubleUDF", "io.hetu.core.hive.dynamicfunctions.examples.udf.FloatWrapperUDF", "io.hetu.core.hive.dynamicfunctions.examples.udf.LongUDF", "io.hetu.core.hive.dynamicfunctions.examples.udf.IntThreeArgsUDF", "io.hetu.core.hive.dynamicfunctions.examples.udf.IntUDF", "io.hetu.core.hive.dynamicfunctions.examples.udf.IntTwoArgsUDF", "io.hetu.core.hive.dynamicfunctions.examples.udf.IntFiveArgsUDF", "io.hetu.core.hive.dynamicfunctions.examples.udf.MapIntUDF", "io.hetu.core.hive.dynamicfunctions.examples.udf.MapStringUDF", "io.hetu.core.hive.dynamicfunctions.examples.udf.EmptyParameterUDF", "io.hetu.core.hive.dynamicfunctions.examples.udf.IntWrapperUDF", "io.hetu.core.hive.dynamicfunctions.examples.udf.IntSixArgsUDF", "io.hetu.core.hive.dynamicfunctions.examples.udf.IntFourArgsUDF", "io.hetu.core.hive.dynamicfunctions.examples.udf.BooleanWrappperUDF", "io.hetu.core.hive.dynamicfunctions.examples.udf.ListIntUDF", "io.hetu.core.hive.dynamicfunctions.examples.udf.LongWrapperUDF", "io.hetu.core.hive.dynamicfunctions.examples.udf.ByteWrapperUDF", "io.hetu.core.hive.dynamicfunctions.examples.udf.ListDoubleUDF", "io.hetu.core.hive.dynamicfunctions.examples.udf.ShortWrapperUDF", "io.hetu.core.hive.dynamicfunctions.examples.udf.ByteUDF", "io.hetu.core.hive.dynamicfunctions.examples.udf.DoubleWrapperUDF", "io.hetu.core.hive.dynamicfunctions.examples.udf.FloatUDF", "io.hetu.core.hive.dynamicfunctions.examples.udf.NullInputUDF");
try {
queryRunner = DistributedQueryRunner.builder(testSessionBuilder().setCatalog("memory").setSchema("default").build()).setNodeCount(1).build();
TempFolder folder = new TempFolder();
folder.create();
Runtime.getRuntime().addShutdownHook(new Thread(folder::close));
// need dep on hetu-filesystem
queryRunner.installPlugin(new HetuFileSystemClientPlugin());
// need dep on hetu-metastore
queryRunner.installPlugin(new HetuMetastorePlugin());
queryRunner.installPlugin(new MemoryPlugin());
HashMap<String, String> metastoreConfig = new HashMap<>();
metastoreConfig.put("hetu.metastore.type", "hetufilesystem");
metastoreConfig.put("hetu.metastore.hetufilesystem.profile-name", "default");
metastoreConfig.put("hetu.metastore.hetufilesystem.path", folder.newFolder("metastore").getAbsolutePath());
queryRunner.getCoordinator().loadMetastore(metastoreConfig);
queryRunner.createCatalog("memory", "memory", ImmutableMap.of("memory.spill-path", folder.newFolder("memory-connector").getAbsolutePath()));
createBooleanTable();
createByteTable();
createShortTable();
createIntTable();
createLongTable();
createFloatTable();
createDoubleTable();
createListTable();
createMapTable();
createMultipleParameterTable();
createEvaluateOverloadTable();
} catch (Exception e) {
closeAllSuppress(e, queryRunner);
throw e;
}
}
Aggregations